Skip to content

Dev222 #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 12 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
101 changes: 49 additions & 52 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,62 +32,42 @@ jobs:
fail-fast: false

matrix:
compiler: [gcc, clang]
cuda: [with, without]
compiler: [gcc]
cuda: [with]
openmp: [with]
link: [both]
include:
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
- compiler: clang
compiler-pkgs: "clang libomp-dev"
cc: "clang"
cxx: "clang++"
# Clang seems to generally require less cache size (smaller object files?).
cuda: with
openmp: with
link: both
- compiler: gcc
ccache-max: 600M
cuda: without
openmp: with
link: both
- compiler: clang
ccache-max: 500M
- cuda: with
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
cuda-cmake-flags:
-DSUITESPARSE_USE_CUDA=ON
-DSUITESPARSE_USE_STRICT=ON
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
cuda: with
openmp: with
link: both
- compiler: clang
cuda: without
openmp: with
link: both
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
ccache-max: 600M
cuda: without
openmp: without
openmp-cmake-flags: "-DSUITESPARSE_USE_OPENMP=OFF"
link: both
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
ccache-max: 600M
cuda: with
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
cuda-cmake-flags:
-DSUITESPARSE_USE_CUDA=ON
-DSUITESPARSE_USE_STRICT=ON
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
openmp: with
link: static
# "Fake" a cross-compilation to exercise that build system path
link-cmake-flags:
-DBUILD_SHARED_LIBS=OFF
-DBUILD_STATIC_LIBS=ON
extra-cmake-flags:
-DCMAKE_SYSTEM_NAME="Linux"

env:
CC: ${{ matrix.cc }}
CXX: ${{ matrix.cxx }}
CC: ${{ matrix.compiler == 'gcc' && 'gcc' || 'clang' }}
CXX: ${{ matrix.compiler == 'gcc' && 'g++' || 'clang++' }}

steps:
- name: get CPU information
Expand All @@ -97,14 +77,15 @@ jobs:
uses: actions/checkout@v4

- name: install dependencies
env:
COMPILER_PKGS: ${{ matrix.compiler-pkgs }}
CUDA_PKGS: ${{ matrix.cuda-pkgs }}
run: |
sudo apt -qq update
sudo apt install -y ${COMPILER_PKGS} autoconf automake ccache cmake \
sudo apt install -y \
${{ matrix.compiler == 'gcc' && 'g++ gcc' || 'clang' }} \
${{ matrix.compiler == 'clang' && matrix.openmp == 'with' && 'libomp-dev' || '' }} \
autoconf automake ccache cmake \
dvipng gfortran libgmp-dev liblapack-dev libmpfr-dev valgrind \
libopenblas-dev ${CUDA_PKGS}
libopenblas-dev \
${{ matrix.cuda == 'with' && 'nvidia-cuda-dev nvidia-cuda-toolkit' || '' }}

- name: prepare ccache
# create key with human readable timestamp
Expand Down Expand Up @@ -144,7 +125,8 @@ jobs:
CCACHE_MAX: ${{ matrix.ccache-max }}
run: |
test -d ~/.ccache || mkdir ~/.ccache
echo "max_size = $CCACHE_MAX" >> ~/.ccache/ccache.conf
# Clang seems to generally require less cache size (smaller object files?).
echo "max_size = ${{ matrix.compiler == 'gcc' && '600M' || '500M' }}" >> ~/.ccache/ccache.conf
echo "compression = true" >> ~/.ccache/ccache.conf
ccache -s
echo "/usr/lib/ccache" >> $GITHUB_PATH
Expand All @@ -156,21 +138,29 @@ jobs:
printf " \033[0;32m==>\033[0m Building library \033[0;32m${lib}\033[0m\n"
echo "::group::Configure $lib"
cd ${GITHUB_WORKSPACE}/${lib}/build
cmake -DCMAKE_BUILD_TYPE="Release" \
cmake -DCMAKE_BUILD_TYPE=${{ matrix.compiler == 'clang' && 'Debug' || 'Release' }} \
-DCMAKE_INSTALL_PREFIX="${GITHUB_WORKSPACE}" \
-DCMAKE_C_COMPILER_LAUNCHER="ccache" \
-DCMAKE_CXX_COMPILER_LAUNCHER="ccache" \
-DCMAKE_Fortran_COMPILER_LAUNCHER="ccache" \
-DBLA_VENDOR="OpenBLAS" \
-DSUITESPARSE_DEMOS=OFF \
-DBUILD_TESTING=OFF \
${{ matrix.cuda-cmake-flags }} \
${{ matrix.openmp-cmake-flags }} \
${{ matrix.link-cmake-flags }} \
${{ matrix.cuda == 'with'
&& '-DSUITESPARSE_USE_CUDA=ON \
-DSUITESPARSE_USE_STRICT=ON \
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
|| '-DSUITESPARSE_USE_CUDA=OFF' }} \
-DSUITESPARSE_USE_OPENMP=${{ matrix.openmp == 'without' && 'OFF' || 'ON' }} \
${{ matrix.link == 'static'
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
|| '' }} \
${{ matrix.extra-cmake-flags }} \
..
echo "::endgroup::"
echo "::group::Build $lib"
cmake --build . --config Release
cmake --build .
echo "::endgroup::"
done

Expand Down Expand Up @@ -214,8 +204,15 @@ jobs:
cmake \
-DCMAKE_PREFIX_PATH="${GITHUB_WORKSPACE}/lib/cmake" \
-DBLA_VENDOR="OpenBLAS" \
${{ matrix.cuda-cmake-flags }} \
${{ matrix.link-cmake-flags }} \
${{ matrix.cuda == 'with'
&& '-DSUITESPARSE_USE_CUDA=ON \
-DSUITESPARSE_USE_STRICT=ON \
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
|| '' }} \
${{ matrix.link == 'static'
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
|| '' }} \
..
echo "::endgroup::"
printf "::group::\033[0;32m==>\033[0m Building example\n"
Expand Down
87 changes: 40 additions & 47 deletions .github/workflows/root-cmakelists.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,50 +30,29 @@ jobs:
fail-fast: false

matrix:
compiler: [gcc, clang]
cuda: [with, without]
compiler: [gcc]
cuda: [with]
link: [both]
include:
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
- compiler: clang
compiler-pkgs: "clang libomp-dev"
cc: "clang"
cxx: "clang++"
# Clang seems to generally require less cache size (smaller object files?).
cuda: with
link: both
- compiler: gcc
ccache-max: 600M
cuda: without
link: both
- compiler: clang
ccache-max: 500M
- cuda: with
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
cuda-cmake-flags:
-DSUITESPARSE_USE_CUDA=ON
-DSUITESPARSE_USE_STRICT=ON
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
cuda: with
link: both
- compiler: clang
cuda: without
link: both
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
ccache-max: 600M
cuda: with
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
cuda-cmake-flags:
-DSUITESPARSE_USE_CUDA=ON
-DSUITESPARSE_USE_STRICT=ON
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
link: static
link-cmake-flags:
-DBUILD_SHARED_LIBS=OFF
-DBUILD_STATIC_LIBS=ON

env:
CC: ${{ matrix.cc }}
CXX: ${{ matrix.cxx }}
CC: ${{ matrix.compiler == 'gcc' && 'gcc' || 'clang' }}
CXX: ${{ matrix.compiler == 'gcc' && 'g++' || 'clang++' }}

steps:
- name: get CPU information
Expand All @@ -83,14 +62,15 @@ jobs:
uses: actions/checkout@v4

- name: install dependencies
env:
COMPILER_PKGS: ${{ matrix.compiler-pkgs }}
CUDA_PKGS: ${{ matrix.cuda-pkgs }}
run: |
sudo apt -qq update
sudo apt install -y ${COMPILER_PKGS} autoconf automake ccache cmake \
sudo apt install -y \
${{ matrix.compiler == 'gcc' && 'g++ gcc' || 'clang' }} \
${{ matrix.compiler == 'clang' && 'libomp-dev' || '' }} \
autoconf automake ccache cmake \
dvipng gfortran libgmp-dev liblapack-dev libmpfr-dev \
libopenblas-dev ${CUDA_PKGS}
libopenblas-dev \
${{ matrix.cuda == 'with' && 'nvidia-cuda-dev nvidia-cuda-toolkit' || '' }}

- name: prepare ccache
# create key with human readable timestamp
Expand Down Expand Up @@ -126,11 +106,10 @@ jobs:
sudo mv ./libpthread.a /usr/lib/x86_64-linux-gnu/libpthread.a

- name: configure ccache
env:
CCACHE_MAX: ${{ matrix.ccache-max }}
run: |
test -d ~/.ccache || mkdir ~/.ccache
echo "max_size = $CCACHE_MAX" >> ~/.ccache/ccache.conf
# Clang seems to generally require less cache size (smaller object files?).
echo "max_size = ${{ matrix.compiler == 'gcc' && '600M' || '500M' }}" >> ~/.ccache/ccache.conf
echo "compression = true" >> ~/.ccache/ccache.conf
ccache -s
echo "/usr/lib/ccache" >> $GITHUB_PATH
Expand All @@ -154,16 +133,23 @@ jobs:
- name: configure
run: |
mkdir -p ${GITHUB_WORKSPACE}/build && cd ${GITHUB_WORKSPACE}/build
cmake -DCMAKE_BUILD_TYPE="Release" \
cmake -DCMAKE_BUILD_TYPE=${{ matrix.compiler == 'clang' && 'Debug' || 'Release' }} \
-DCMAKE_INSTALL_PREFIX=".." \
-DCMAKE_C_COMPILER_LAUNCHER="ccache" \
-DCMAKE_CXX_COMPILER_LAUNCHER="ccache" \
-DCMAKE_Fortran_COMPILER_LAUNCHER="ccache" \
-DBLA_VENDOR="OpenBLAS" \
-DSUITESPARSE_DEMOS=OFF \
-DBUILD_TESTING=OFF \
${{ matrix.cuda-cmake-flags }} \
${{ matrix.link-cmake-flags }} \
${{ matrix.cuda == 'with'
&& '-DSUITESPARSE_USE_CUDA=ON \
-DSUITESPARSE_USE_STRICT=ON \
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
|| '' }} \
${{ matrix.link == 'static'
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
|| '' }} \
..

- name: build libraries
Expand Down Expand Up @@ -213,8 +199,15 @@ jobs:
cmake \
-DCMAKE_PREFIX_PATH="${GITHUB_WORKSPACE}/lib/cmake" \
-DBLA_VENDOR="OpenBLAS" \
${{ matrix.cuda-cmake-flags }} \
${{ matrix.link-cmake-flags }} \
${{ matrix.cuda == 'with'
&& '-DSUITESPARSE_USE_CUDA=ON \
-DSUITESPARSE_USE_STRICT=ON \
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
|| '' }} \
${{ matrix.link == 'static'
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
|| '' }} \
..
echo "::endgroup::"
printf "::group::\033[0;32m==>\033[0m Building example\n"
Expand Down
12 changes: 3 additions & 9 deletions CHOLMOD/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -573,15 +573,9 @@ endif ( )

# CHOLMOD_CUDA
if ( CHOLMOD_HAS_CUDA )
# cmake now configures cholmod.h with "#define CHOLMOD_HAS_CUDA" if CHOLMOD is
# being compiled with CUDA, so the -DCHOLMOD_HAS_CUDA flag is no longer needed.
# if ( BUILD_SHARED_LIBS )
# target_compile_definitions ( CHOLMOD PUBLIC "CHOLMOD_HAS_CUDA" )
# endif ( )
# set ( CHOLMOD_CFLAGS "${CHOLMOD_CFLAGS} -DCHOLMOD_HAS_CUDA" )
# if ( BUILD_STATIC_LIBS )
# target_compile_definitions ( CHOLMOD_static PUBLIC "CHOLMOD_HAS_CUDA" )
# endif ( )
if ( NOT CHOLMOD_HAS_OPENMP )
message ( FATAL_ERROR "CHOLMOD_CUDA requires OpenMP. But it has been disabled, or no working OpenMP could be found." )
endif ()
if ( BUILD_SHARED_LIBS )
target_link_libraries ( CHOLMOD PRIVATE CUDA::nvrtc CUDA::cudart_static CUDA::cublas )
target_include_directories ( CHOLMOD INTERFACE
Expand Down
26 changes: 26 additions & 0 deletions ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,29 @@
Dec 20, 2024: version 7.9.0

* GraphBLAS v9.4.3: Added the GxB_IndexBinaryOp. Added new JIT kernels.
Disabled more FactoryKernels to reduce compiled library size.
* Package versions in this release: (* denotes a new version)
SuiteSparse_config 7.9.0 *
AMD 3.3.3
BTF 2.3.2
CAMD 3.3.3
CCOLAMD 3.3.4
CHOLMOD 5.3.0
COLAMD 3.3.4
CSparse 4.3.2
CXSparse 4.4.1
Example 1.8.4 *
GraphBLAS 9.4.3 *
KLU 2.3.5
LDL 3.3.2
LAGraph 1.1.4
SuiteSparse_Mongoose 3.3.4
ParU 1.0.0
RBio 4.3.4
SPEX 3.2.1
SPQR 4.3.4
UMFPACK 6.3.5

Oct 10, 2024: version 7.8.3

* ParU 1.0.0: first stable release. No change since last version
Expand Down
8 changes: 4 additions & 4 deletions Example/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ message ( STATUS "MY prefix path: ${CMAKE_PREFIX_PATH}" )
#-------------------------------------------------------------------------------

# cmake inserts the date and version number into Include/my.h:
set ( MY_DATE "Oct 10, 2024" )
set ( MY_DATE "Dec 20, 2024" )
set ( MY_VERSION_MAJOR 1 )
set ( MY_VERSION_MINOR 8 )
set ( MY_VERSION_PATCH 3 )
set ( MY_VERSION_PATCH 4 )

message ( STATUS "Building MY library version: v"
${MY_VERSION_MAJOR}.
Expand Down Expand Up @@ -87,15 +87,15 @@ project ( my
#-------------------------------------------------------------------------------

# look for all SuiteSparse packages:
find_package ( SuiteSparse_config 7.8.3 REQUIRED )
find_package ( SuiteSparse_config 7.9.0 REQUIRED )
find_package ( AMD 3.3.3 REQUIRED )
find_package ( BTF 2.3.2 REQUIRED )
find_package ( CAMD 3.3.3 REQUIRED )
find_package ( CCOLAMD 3.3.4 REQUIRED )
find_package ( CHOLMOD 5.3.0 REQUIRED )
find_package ( COLAMD 3.3.4 REQUIRED )
find_package ( CXSparse 4.4.1 REQUIRED )
find_package ( GraphBLAS 9.3.1 )
find_package ( GraphBLAS 9.4.3 )
find_package ( KLU 2.3.5 REQUIRED )
find_package ( KLU_CHOLMOD 2.3.5 REQUIRED )
find_package ( LDL 3.3.2 REQUIRED )
Expand Down
Loading
Loading