xref: /petsc/src/benchmarks/streams/makefile (revision cdb0f33d09c128f365fdb48a6f07c56e211b6a43)
1*cdb0f33dSPierre Jolivet-include ../../../petscdir.mk
20dad66eaSBarry SmithCFLAGS	      =
3d8c74875SBarry SmithFFLAGS	      =
4d8c74875SBarry SmithCPPFLAGS      =
5d8c74875SBarry SmithFPPFLAGS      =
640eeca14SSatish BalayLOCDIR        = src/benchmarks/streams/
719816777SMarkEXAMPLESC     = BasicVersion.c MPIVersion.c OpenMPVersion.c SSEVersion.c PthreadVersion.c CUDAVersion.cu
8d8c74875SBarry SmithEXAMPLESF     =
9d8c74875SBarry SmithTESTS         = BasicVersion OpenMPVersion
10d8c74875SBarry SmithMANSEC        = Sys
11d8c74875SBarry Smith
12af0996ceSBarry Smithinclude ${PETSC_DIR}/lib/petsc/conf/variables
13af0996ceSBarry Smithinclude ${PETSC_DIR}/lib/petsc/conf/rules
14af0996ceSBarry Smithinclude ${PETSC_DIR}/lib/petsc/conf/test
15d8c74875SBarry Smith
16dc0529c6SBarry SmithBasicVersion: BasicVersion.o
1780094aa7SBarry Smith	-@${CLINKER} -o BasicVersion BasicVersion.o ${PETSC_LIB}
1880094aa7SBarry Smith	@${RM} -f BasicVersion.o
19d8c74875SBarry Smith
20dc0529c6SBarry SmithMPIVersion: MPIVersion.o
21d3ae85c4SBarry Smith	-@${CLINKER} -o MPIVersion MPIVersion.o ${PETSC_LIB}
22d3ae85c4SBarry Smith	@${RM} -f MPIVersion.o
23d3ae85c4SBarry Smith
2419816777SMarkCUDAVersion: CUDAVersion.o
2519816777SMark	-@${CLINKER} -o CUDAVersion CUDAVersion.o ${PETSC_LIB}
2619816777SMark	@${RM} -f CUDAVersion.o
2719816777SMark
28dc0529c6SBarry SmithOpenMPVersion: OpenMPVersion.o
294198fb66SBarry Smith	-@${CLINKER} -o OpenMPVersion OpenMPVersion.o
3080094aa7SBarry Smith	@${RM} -f OpenMPVersion.o
31d8c74875SBarry Smith
32dc0529c6SBarry SmithSSEVersion: SSEVersion.o
33b8a1809bSJed Brown	-${CLINKER} -o $@ $< ${PETSC_LIB}
34b8a1809bSJed Brown	${RM} -f $<
35d3ae85c4SBarry Smith
36dc0529c6SBarry SmithPthreadVersion: PthreadVersion.o
3793af4de9SShri Abhyankar	-@${CLINKER} -o PthreadVersion PthreadVersion.o ${PETSC_LIB}
3893af4de9SShri Abhyankar	@${RM} -f PthreadVersion.o
39b8a1809bSJed Brown
40dc32dc79SSatish Balay# make streams [NPMAX=integer_number_of_MPI_processes_to_use] [MPI_BINDING='binding options']
414198fb66SBarry Smithmpistream:  MPIVersion
425f27b2e0SBarry Smith	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
43a6cca095SBarry Smith	-@printf "" > scaling.log
444198fb66SBarry Smith	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n"
45372a53e9SSatish Balay	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
467115ffacSBarry Smith	  ${MPIEXEC} ${MPI_BINDING} -n $${i} ./MPIVersion | tee -a scaling.log; \
47d3ae85c4SBarry Smith        done
48d8c74875SBarry Smith	-@echo "------------------------------------------------"
494198fb66SBarry Smith	-@${PYTHON} process.py MPI fileoutput
504198fb66SBarry Smith
5119816777SMark# Works on SUMMIT
5219816777SMarkcudastreamjsrun:  CUDAVersion
5319816777SMark	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
5419816777SMark	-@printf "" > scaling.log
5519816777SMark	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n"
5619816777SMark	-@i=0; while [ $${i} -lt ${NPMAX} ] && [ $${i} -lt 7 ]; do i=`expr $${i} + 1`; \
5719816777SMark	  ${MPIEXEC} ${MPI_BINDING} -n 1 -c$${i} -a$${i} -g1 ./CUDAVersion | tee -a scaling.log; \
5819816777SMark        done
5919816777SMark	-@n=1; i=7; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 7`; n=`expr $${n} + 1`; \
6019816777SMark	       c=5; while [ $${c} -lt 7 ]; do c=`expr $${c} + 1`; \
6119816777SMark	  ${MPIEXEC} ${MPI_BINDING} -n $${n} -c$${c} -a$${c} -g1 ./CUDAVersion | tee -a scaling.log; \
6219816777SMark        done; done
6319816777SMark	-@echo "------------------------------------------------"
6419816777SMark	-@${PYTHON} process.py CUDA fileoutput
6519816777SMark
664198fb66SBarry Smithopenmpstream:  OpenMPVersion
674198fb66SBarry Smith	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmpstream NPMAX=<integer number of threads>\n"; exit 1 ; fi
684198fb66SBarry Smith	-@printf "" > scaling.log
694198fb66SBarry Smith	@-@printf "Running openmpstreams using 'NPMAX=${NPMAX}'\n"
704198fb66SBarry Smith	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
714198fb66SBarry Smith	  OMP_NUM_THREADS=$${i} ./OpenMPVersion  | tee -a scaling.log;\
724198fb66SBarry Smith        done
734198fb66SBarry Smith	-@${PYTHON} process.py OpenMP fileoutput
74a6cca095SBarry Smith
75c40481a3SBarry Smithhwloc:
7677fe4bf7SBarry Smith	-@if [ "${LSTOPO}foo" != "foo" ]; then ${MPIEXEC} ${MPI_BINDING} -n 1 ${LSTOPO} --no-icaches --no-io --ignore PU ; fi
77c40481a3SBarry Smith
784198fb66SBarry Smithmpistreams: mpistream hwloc
794198fb66SBarry Smith	-@${PYTHON} process.py MPI
804198fb66SBarry Smith
814198fb66SBarry Smith
824198fb66SBarry Smithopenmpstreams: openmpstream hwloc
834198fb66SBarry Smith	-@${PYTHON} process.py OpenMP
84d3ae85c4SBarry Smith
85d3ae85c4SBarry Smith
86