Machine specific configure scripts: Difference between revisions
Jump to navigation
Jump to search
No edit summary |
m (→G100) |
||
(15 intermediate revisions by 2 users not shown) | |||
Line 1: | Line 1: | ||
== | == Yambo @ HPC machines == | ||
The GPL version of Yambo is already installed on different HPC systems around the world, here we report some of them: | The GPL version of Yambo is already installed on different HPC systems around the world, here we report some of them: | ||
Niflheim at Technical University of Denmark | * Leonardo at CINECA | ||
SP6 at CINECA | * Eurora at CINECA | ||
Arina at SGI at Universidad del Pais Vasco. | * Niflheim at Technical University of Denmark | ||
Core.Sam at University of Pittsburgh. | * SP6 at CINECA | ||
* Arina at SGI at Universidad del Pais Vasco. | |||
* Core.Sam at University of Pittsburgh. | |||
Below are some configure options that have been used in the past. Of course, since compilers and architectures vary a lot, there are no guarantees that they will work on your system. Be particularly careful when specifying FCFLAGS, as you may override settings which are necessary for compilation, e.g. -nofor_main with ifort. | Below are some configure options that have been used in the past. Of course, since compilers and architectures vary a lot, there are no guarantees that they will work on your system. Be particularly careful when specifying FCFLAGS, as you may override settings which are necessary for compilation, e.g. -nofor_main with ifort. | ||
CH: Suggest to list/group these in order of architecture, linux, IBM, Cray, OS/X, etc | <!-- CH: Suggest to list/group these in order of architecture, linux, IBM, Cray, OS/X, etc --> | ||
== CINECA HPC centre == | |||
=== | === M100 === | ||
module ... | |||
=== G100 === | |||
module purge | |||
module load autoload | |||
module load intel/oneapi-2021--binary | |||
module load mkl/oneapi-2021--binary | |||
module load intelmpi/oneapi-2021--binary | |||
module load intel-oneapi-compilers/2021.4.0 | |||
module load intel-oneapi-mpi/2021.4.0 | |||
module load intel-oneapi-mkl/2021.4.0 | |||
module load hdf5/1.10.7--intel-oneapi-mpi--2021.4.0--intel--2021.4.0 | |||
module load netcdf-c/4.8.1--intel-oneapi-mpi--2021.4.0--intel--2021.4.0 | |||
module load netcdf-fortran/4.5.3--intel-oneapi-mpi--2021.4.0--intel--2021.4.0 | |||
module load libxc/5.1.5--intel--oneapi-2021--binary | |||
export FC=ifort | |||
export F77=ifort | |||
export CPP="icc -E -ansi" | |||
export CC=icc | |||
export FPP="fpp -free -P" | |||
export F90SUFFIX=".f90" | |||
export MPIFC=mpiifort | |||
export MPIF77=mpiifort | |||
export MPICC=mpiicc | |||
export MPICXX=mpiicpc | |||
./configure \ | |||
--enable-open-mp --enable-mpi --enable-slepc-linalg --enable-hdf5-par-io --enable-par-linalg \ | |||
--enable-msgs-comps --enable-time-profile --enable-memory-profile \ | |||
--with-blas-libs="-L${MKLROOT}/lib/intel64 -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl" \ | |||
--with-lapack-libs="-L${MKLROOT}/lib/intel64 -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl" \ | |||
--with-scalapack-libs="-L${MKLROOT}/lib/intel64 -lmkl_scalapack_lp64" \ | |||
--with-blacs-libs="-L${MKLROOT}/lib/intel64 -lmkl_blacs_intelmpi_lp64" \ | |||
--with-fft-includedir="${MKLROOT}/include" \ | |||
--with-fft-libs="-L${MKLROOT}/lib/intel64 -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl" \ | |||
--with-netcdf-path=$NETCDF_C_HOME \ | |||
--with-netcdff-path=$NETCDF_FORTRAN_HOME \ | |||
--with-hdf5-path=$HDF5_HOME \ | |||
--with-libxc-path=$LIBXC_HOME \ | |||
--with-extlibs-path=${HOME}/opt/ext-libs/oneapi-2021 | |||
=== EURORA | === EURORA SP6 === | ||
EURORA is a hybrid supercomputer, with Intel Xeon andyBridge processors and GPU NVIDIA Tesla K20 accelerators | EURORA is a hybrid supercomputer, with Intel Xeon andyBridge processors and GPU NVIDIA Tesla K20 accelerators | ||
module load autoload/0.1 intel/cs-xe-2013--binary intelmpi/4.1.0--binary mkl/11.0.1--binary gnu/4.6.3 cuda/5.0.35 qe/5.0.3 netcdf/4.1.3--intel--cs-xe-2013--binary hdf5/1.8.9_ser--intel--cs-xe-2013--binary szip/2.1--gnu--4.6.3 zlib/1.2.7--gnu--4.6.3 | |||
module load autoload/0.1 intel/cs-xe-2013--binary intelmpi/4.1.0--binary mkl/11.0.1--binary gnu/4.6.3 cuda/5.0.35 qe/5.0.3 netcdf/4.1.3--intel--cs-xe-2013--binary hdf5/1.8.9_ser--intel- | ./configure --with-p2y=5.0 \ | ||
-cs-xe-2013--binary szip/2.1--gnu--4.6.3 zlib/1.2.7--gnu--4.6.3 | --with-iotk=/cineca/prod/build/applications/qe/5.0.3/cuda--5.0.35/BA_WORK/espresso-5.0.3/iotk/ \ | ||
./configure --with-p2y=5.0 \ | --with-netcdf-lib=/cineca/prod/libraries/netcdf/4.1.3/intel--cs-xe-2013--binary/lib/ \ | ||
--with-iotk=/cineca/prod/build/applications/qe/5.0.3/cuda--5.0.35/BA_WORK/espresso-5.0.3/iotk/ \ | --with-netcdf-include=/cineca/prod/libraries/netcdf/4.1.3/intel--cs-xe-2013--binary/include \ | ||
--with-netcdf-lib=/cineca/prod/libraries/netcdf/4.1.3/intel--cs-xe-2013--binary/lib/ \ | --with-netcdf-link="-L/cineca/prod/libraries/hdf5/1.8.9_ser/intel--cs-xe-2013--binary/lib -L/cineca/prod/libraries/szip/2.1/gnu--4.6.3/lib -lhdf5_fortran -lhdf5_hl -lhdf5 -lnetcdff -lnetcdf -lcurl -lsz -lz" | ||
--with-netcdf-include=/cineca/prod/libraries/netcdf/4.1.3/intel--cs-xe-2013--binary/include \ | |||
--with-netcdf-link="-L/cineca/prod/libraries/hdf5/1.8.9_ser/intel--cs-xe-2013--binary/lib -L/cineca/prod/libraries/szip/2.1/gnu--4.6.3/lib -lhdf5_fortran -lhdf5_hl -lhdf5 -lnetcdff -lnetcdf -lcurl -lsz -lz" | |||
export | === IBM AIX and xlf === | ||
export | Linking with netCDF, PWscf, FFTW. Production runs | ||
export | export CPP=cpp | ||
export | export CC=xlc_r | ||
export F77=xlf_r | |||
export FC=xlf90_r | |||
export FCFLAGS='-O2 -q64 -qstrict -qarch=pwr6 -qtune=pwr6 -qmaxmem=-1 -qsuffix=f=f' | |||
./configure --build=powerpc-ibm --with-fftw=/cineca/prod/libraries/fftw/3.2.2/xl--10.1/lib | |||
--with-netcdf-lib=/cineca/prod/libraries/netcdf/4.0.1/xl--10.1/lib | |||
--with-netcdf-include=/cineca/prod/libraries/netcdf/4.0.1/xl--10.1/include | |||
--with-iotk=/cineca/prod/build/applications/QuantumESPRESSO/4.1/xl--10.1/BA_WORK/QuantumESPRESSO-4.1/iotk | |||
--with-p2y=4.0 | |||
== Other HPC centers == | |||
Latest revision as of 19:02, 28 June 2025
Yambo @ HPC machines
The GPL version of Yambo is already installed on different HPC systems around the world, here we report some of them:
- Leonardo at CINECA
- Eurora at CINECA
- Niflheim at Technical University of Denmark
- SP6 at CINECA
- Arina at SGI at Universidad del Pais Vasco.
- Core.Sam at University of Pittsburgh.
Below are some configure options that have been used in the past. Of course, since compilers and architectures vary a lot, there are no guarantees that they will work on your system. Be particularly careful when specifying FCFLAGS, as you may override settings which are necessary for compilation, e.g. -nofor_main with ifort.
CINECA HPC centre
M100
module ...
G100
module purge module load autoload module load intel/oneapi-2021--binary module load mkl/oneapi-2021--binary module load intelmpi/oneapi-2021--binary module load intel-oneapi-compilers/2021.4.0 module load intel-oneapi-mpi/2021.4.0 module load intel-oneapi-mkl/2021.4.0 module load hdf5/1.10.7--intel-oneapi-mpi--2021.4.0--intel--2021.4.0 module load netcdf-c/4.8.1--intel-oneapi-mpi--2021.4.0--intel--2021.4.0 module load netcdf-fortran/4.5.3--intel-oneapi-mpi--2021.4.0--intel--2021.4.0 module load libxc/5.1.5--intel--oneapi-2021--binary export FC=ifort export F77=ifort export CPP="icc -E -ansi" export CC=icc export FPP="fpp -free -P" export F90SUFFIX=".f90" export MPIFC=mpiifort export MPIF77=mpiifort export MPICC=mpiicc export MPICXX=mpiicpc ./configure \ --enable-open-mp --enable-mpi --enable-slepc-linalg --enable-hdf5-par-io --enable-par-linalg \ --enable-msgs-comps --enable-time-profile --enable-memory-profile \ --with-blas-libs="-L${MKLROOT}/lib/intel64 -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl" \ --with-lapack-libs="-L${MKLROOT}/lib/intel64 -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl" \ --with-scalapack-libs="-L${MKLROOT}/lib/intel64 -lmkl_scalapack_lp64" \ --with-blacs-libs="-L${MKLROOT}/lib/intel64 -lmkl_blacs_intelmpi_lp64" \ --with-fft-includedir="${MKLROOT}/include" \ --with-fft-libs="-L${MKLROOT}/lib/intel64 -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl" \ --with-netcdf-path=$NETCDF_C_HOME \ --with-netcdff-path=$NETCDF_FORTRAN_HOME \ --with-hdf5-path=$HDF5_HOME \ --with-libxc-path=$LIBXC_HOME \ --with-extlibs-path=${HOME}/opt/ext-libs/oneapi-2021
EURORA SP6
EURORA is a hybrid supercomputer, with Intel Xeon andyBridge processors and GPU NVIDIA Tesla K20 accelerators
module load autoload/0.1 intel/cs-xe-2013--binary intelmpi/4.1.0--binary mkl/11.0.1--binary gnu/4.6.3 cuda/5.0.35 qe/5.0.3 netcdf/4.1.3--intel--cs-xe-2013--binary hdf5/1.8.9_ser--intel--cs-xe-2013--binary szip/2.1--gnu--4.6.3 zlib/1.2.7--gnu--4.6.3 ./configure --with-p2y=5.0 \ --with-iotk=/cineca/prod/build/applications/qe/5.0.3/cuda--5.0.35/BA_WORK/espresso-5.0.3/iotk/ \ --with-netcdf-lib=/cineca/prod/libraries/netcdf/4.1.3/intel--cs-xe-2013--binary/lib/ \ --with-netcdf-include=/cineca/prod/libraries/netcdf/4.1.3/intel--cs-xe-2013--binary/include \ --with-netcdf-link="-L/cineca/prod/libraries/hdf5/1.8.9_ser/intel--cs-xe-2013--binary/lib -L/cineca/prod/libraries/szip/2.1/gnu--4.6.3/lib -lhdf5_fortran -lhdf5_hl -lhdf5 -lnetcdff -lnetcdf -lcurl -lsz -lz"
IBM AIX and xlf
Linking with netCDF, PWscf, FFTW. Production runs
export CPP=cpp export CC=xlc_r export F77=xlf_r export FC=xlf90_r export FCFLAGS='-O2 -q64 -qstrict -qarch=pwr6 -qtune=pwr6 -qmaxmem=-1 -qsuffix=f=f' ./configure --build=powerpc-ibm --with-fftw=/cineca/prod/libraries/fftw/3.2.2/xl--10.1/lib --with-netcdf-lib=/cineca/prod/libraries/netcdf/4.0.1/xl--10.1/lib --with-netcdf-include=/cineca/prod/libraries/netcdf/4.0.1/xl--10.1/include --with-iotk=/cineca/prod/build/applications/QuantumESPRESSO/4.1/xl--10.1/BA_WORK/QuantumESPRESSO-4.1/iotk --with-p2y=4.0