Skip to content

Commit fb801d5

Browse files
committed
fix build issues
1 parent e6ff82d commit fb801d5

File tree

6 files changed

+202
-7
lines changed

6 files changed

+202
-7
lines changed

.github/workflows/build.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,8 @@ jobs:
184184
path: /Users/runner/Library/Caches/ccache
185185
key: ccache-${{ runner.os }}-${{ matrix.py-version }}
186186

187-
- name: Setup GNU Fortran
188-
uses: fortran-lang/setup-fortran@2bbdcb313d2b7e72d1d07caf87f3ef9f376d9224 # v1.6.3
187+
# - name: Setup GNU Fortran
188+
# uses: fortran-lang/setup-fortran@2bbdcb313d2b7e72d1d07caf87f3ef9f376d9224 # v1.6.3
189189

190190
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
191191
with:

base/PyNucleus_base/CSR_LinearOperator_{SCALAR}.pxi

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -704,3 +704,7 @@ cdef class {SCALAR_label}CSR_VectorLinearOperator({SCALAR_label}VectorLinearOper
704704
for i in range(self.data.shape[0]):
705705
for j in range(self.data.shape[1]):
706706
self.data[i, j] = 0.
707+
708+
def getOp(self, INDEX_t opNo):
709+
assert 0 <= opNo < self.vectorSize
710+
return {SCALAR_label}CSR_LinearOperator(self.indices, self.indptr, np.ascontiguousarray(self.data[:, opNo]))

base/PyNucleus_base/DenseLinearOperator_{SCALAR}.pxi

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -246,15 +246,15 @@ cdef class {SCALAR_label}Dense_VectorLinearOperator({SCALAR_label}VectorLinearOp
246246

247247
@staticmethod
248248
def zeros(INDEX_t num_rows, INDEX_t num_columns, INDEX_t vectorSize):
249-
return Dense_LinearOperator(np.zeros((num_rows, num_columns, vectorSize), dtype={SCALAR}))
249+
return {SCALAR_label}Dense_VectorLinearOperator(np.zeros((num_rows, num_columns, vectorSize), dtype={SCALAR}))
250250

251251
@staticmethod
252252
def ones(INDEX_t num_rows, INDEX_t num_columns, INDEX_t vectorSize):
253-
return Dense_LinearOperator(np.ones((num_rows, num_columns, vectorSize), dtype={SCALAR}))
253+
return {SCALAR_label}Dense_VectorLinearOperator(np.ones((num_rows, num_columns, vectorSize), dtype={SCALAR}))
254254

255255
@staticmethod
256256
def empty(INDEX_t num_rows, INDEX_t num_columns, INDEX_t vectorSize):
257-
return Dense_LinearOperator(uninitialized((num_rows, num_columns, vectorSize), dtype={SCALAR}))
257+
return {SCALAR_label}Dense_VectorLinearOperator(uninitialized((num_rows, num_columns, vectorSize), dtype={SCALAR}))
258258

259259
def getMemorySize(self):
260260
return self.data.shape[0]*self.data.shape[1]*self.data.shape[2]*sizeof({SCALAR}_t)
@@ -272,3 +272,7 @@ cdef class {SCALAR_label}Dense_VectorLinearOperator({SCALAR_label}VectorLinearOp
272272
self.num_columns,
273273
self.vectorSize,
274274
self.__class__.__name__)
275+
276+
def getOp(self, INDEX_t opNo):
277+
assert 0 <= opNo < self.vectorSize
278+
return {SCALAR_label}Dense_LinearOperator(np.ascontiguousarray(self.data[:, :, opNo]))

base/PyNucleus_base/LinearOperator_decl_{SCALAR}.pxi

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,13 @@ cdef class {SCALAR_label}VectorLinearOperator:
121121
cdef void setEntry(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val)
122122

123123

124+
cdef class {SCALAR_label}SumVectorLinearOperator({SCALAR_label}VectorLinearOperator):
125+
cdef:
126+
public {SCALAR_label}VectorLinearOperator M1, M2
127+
public {SCALAR}_t facM1, facM2
128+
{SCALAR}_t[:, ::1] z
129+
130+
124131
cdef class {SCALAR_label}Transpose_Linear_Operator({SCALAR_label}LinearOperator):
125132
cdef:
126133
public {SCALAR_label}LinearOperator A

base/PyNucleus_base/LinearOperator_{SCALAR}.pxi

Lines changed: 180 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ cdef class {SCALAR_label}LinearOperator:
271271
elif node.attrs['type'] == 'multiIntervalInterpolationOperator':
272272
return multiIntervalInterpolationOperator.HDF5read(node)
273273
elif node.attrs['type'] == 'h2':
274-
from PyNucleus_nl.clusterMethodCy import H2Matrix
274+
from PyNucleus_nl.clusterMethod import H2Matrix
275275
return H2Matrix.HDF5read(node)
276276
else:
277277
raise NotImplementedError(node.attrs['type'])
@@ -687,6 +687,15 @@ cdef class {SCALAR_label}VectorLinearOperator:
687687
else:
688688
self.matvecTrans(x, y)
689689

690+
def __add__(self, x):
691+
if isinstance(x, {SCALAR_label}VectorLinearOperator):
692+
return {SCALAR_label}SumVectorLinearOperator(M1=self, M2=x, facM1=1.0, facM2=1.0)
693+
else:
694+
raise NotImplementedError('Cannot add with {}'.format(x))
695+
696+
def __sub__(self, x):
697+
return self + (-1.*x)
698+
690699
def __mul__(self, x):
691700
cdef:
692701
np.ndarray[{SCALAR}_t, ndim=2] y
@@ -774,6 +783,176 @@ cdef class {SCALAR_label}VectorLinearOperator:
774783
def getEntry_py(self, INDEX_t I, INDEX_t J, {SCALAR}_t[::1] val):
775784
return self.getEntry(I, J, val)
776785

786+
def getOp(self, INDEX_t opNo):
787+
raise NotImplementedError()
788+
789+
790+
cdef void scaleScalar2D{SCALAR_label}({SCALAR}_t[:, ::1] x, {SCALAR}_t fac):
791+
cdef:
792+
INDEX_t i, j
793+
for i in range(x.shape[0]):
794+
for j in range(x.shape[1]):
795+
x[i, j] *= fac
796+
797+
798+
cdef void assign3_2D{SCALAR_label}({SCALAR}_t[:, ::1] z, {SCALAR}_t[:, ::1] x, {SCALAR}_t alpha, {SCALAR}_t[:, ::1] y, {SCALAR}_t beta):
799+
cdef:
800+
INDEX_t i, j
801+
for i in range(x.shape[0]):
802+
for j in range(x.shape[1]):
803+
z[i, j] = alpha*x[i, j]+beta*y[i, j]
804+
805+
806+
cdef class {SCALAR_label}SumVectorLinearOperator({SCALAR_label}VectorLinearOperator):
807+
def __init__(self,
808+
{SCALAR_label}VectorLinearOperator M1,
809+
{SCALAR_label}VectorLinearOperator M2,
810+
{SCALAR}_t facM1=1.0,
811+
{SCALAR}_t facM2=1.0):
812+
assert M1.num_columns == M2.num_columns
813+
assert M1.num_rows == M2.num_rows
814+
assert M1.vectorSize == M2.vectorSize
815+
super({SCALAR_label}SumVectorLinearOperator, self).__init__(M1.num_rows, M1.num_columns, M1.vectorSize)
816+
self.M1 = M1
817+
self.M2 = M2
818+
self.facM1 = facM1
819+
self.facM2 = facM2
820+
self.z = uninitialized((self.M1.shape[0], self.M1.vectorSize), dtype={SCALAR})
821+
822+
cdef INDEX_t matvec(self,
823+
{SCALAR}_t[::1] x,
824+
{SCALAR}_t[:, ::1] y) except -1:
825+
if self.facM2 != 0.:
826+
self.M2.matvec(x, y)
827+
if self.facM2 != 1.0:
828+
scaleScalar2D{SCALAR_label}(y, self.facM2)
829+
if self.facM1 == 1.0:
830+
self.M1.matvec_no_overwrite(x, y)
831+
else:
832+
self.M1.matvec(x, self.z)
833+
assign3_2D{SCALAR_label}(y, y, 1.0, self.z, self.facM1)
834+
else:
835+
if self.facM1 == 1.0:
836+
self.M1.matvec(x, y)
837+
else:
838+
self.M1.matvec(x, y)
839+
scaleScalar2D{SCALAR_label}(y, self.facM1)
840+
return 0
841+
842+
cdef INDEX_t matvec_no_overwrite(self,
843+
{SCALAR}_t[::1] x,
844+
{SCALAR}_t[:, ::1] y) except -1:
845+
if self.facM2 == 1.0:
846+
self.M2.matvec_no_overwrite(x, y)
847+
elif self.facM2 != 0.:
848+
self.M2.matvec(x, self.z)
849+
assign3_2D{SCALAR_label}(y, y, 1.0, self.z, self.facM2)
850+
if self.facM1 == 1.0:
851+
self.M1.matvec_no_overwrite(x, y)
852+
elif self.facM1 != 0.:
853+
self.M1.matvec(x, self.z)
854+
assign3_2D{SCALAR_label}(y, y, 1.0, self.z, self.facM1)
855+
return 0
856+
857+
cdef INDEX_t matvecTrans(self,
858+
{SCALAR}_t[::1] x,
859+
{SCALAR}_t[:, ::1] y) except -1:
860+
if self.facM2 != 0.:
861+
self.M2.matvecTrans(x, y)
862+
if self.facM2 != 1.0:
863+
scaleScalar2D{SCALAR_label}(y, self.facM2)
864+
if self.facM1 == 1.0:
865+
self.M1.matvecTrans_no_overwrite(x, y)
866+
else:
867+
self.M1.matvecTrans(x, self.z)
868+
assign3_2D{SCALAR_label}(y, y, 1.0, self.z, self.facM1)
869+
return 0
870+
871+
cdef INDEX_t matvecTrans_no_overwrite(self,
872+
{SCALAR}_t[::1] x,
873+
{SCALAR}_t[:, ::1] y) except -1:
874+
if self.facM2 == 1.0:
875+
self.M2.matvecTrans_no_overwrite(x, y)
876+
elif self.facM2 != 0.:
877+
self.M2.matvecTrans(x, self.z)
878+
assign3_2D{SCALAR_label}(y, y, 1.0, self.z, self.facM2)
879+
if self.facM1 == 1.0:
880+
self.M1.matvecTrans_no_overwrite(x, y)
881+
elif self.facM1 != 0.:
882+
self.M1.matvecTrans(x, self.z)
883+
assign3_2D{SCALAR_label}(y, y, 1.0, self.z, self.facM1)
884+
return 0
885+
886+
def get_diagonal(self):
887+
return (self.facM1*np.array(self.M1.diagonal, copy=False) +
888+
self.facM2*np.array(self.M2.diagonal, copy=False))
889+
890+
diagonal = property(fget=get_diagonal)
891+
892+
def __repr__(self):
893+
if np.real(self.facM2) >= 0:
894+
if self.facM1 != 1.0:
895+
if self.facM2 != 1.0:
896+
return '{}*{} + {}*{}'.format(self.facM1, self.M1, self.facM2, self.M2)
897+
else:
898+
return '{}*{} + {}'.format(self.facM1, self.M1, self.M2)
899+
else:
900+
if self.facM2 != 1.0:
901+
return '{} + {}*{}'.format(self.M1, self.facM2, self.M2)
902+
else:
903+
return '{} + {}'.format(self.M1, self.M2)
904+
else:
905+
if self.facM1 != 1.0:
906+
if self.facM2 != -1.0:
907+
return '{}*{} - {}*{}'.format(self.facM1, self.M1, -self.facM2, self.M2)
908+
else:
909+
return '{}*{} - {}'.format(self.facM1, self.M1, self.M2)
910+
else:
911+
if self.facM2 != -1.0:
912+
return '{} - {}*{}'.format(self.M1, -self.facM2, self.M2)
913+
else:
914+
return '{} - {}'.format(self.M1, self.M2)
915+
916+
def to_csr_linear_operator(self):
917+
if isinstance(self.M2, {SCALAR_label}Dense_LinearOperator):
918+
return {SCALAR_label}Dense_LinearOperator(self.facM1*self.M1.toarray() + self.facM2*self.M2.toarray())
919+
else:
920+
B = self.facM1*self.M1.to_csr() + self.facM2*self.M2.to_csr()
921+
B.eliminate_zeros()
922+
C = {SCALAR_label}CSR_LinearOperator(B.indices, B.indptr, B.data)
923+
C.num_columns = self.M2.num_columns
924+
return C
925+
926+
def isSparse(self):
927+
return self.M1.isSparse() and self.M2.isSparse()
928+
929+
def to_csr(self):
930+
cdef {SCALAR_label}CSR_LinearOperator csr
931+
csr = self.to_csr_linear_operator()
932+
return csr.to_csr()
933+
934+
def toarray(self):
935+
return self.facM1*self.M1.toarray() + self.facM2*self.M2.toarray()
936+
937+
def getnnz(self):
938+
return self.M1.nnz+self.M2.nnz
939+
940+
nnz = property(fget=getnnz)
941+
942+
def __mul__(self, x):
943+
if isinstance(self, {SCALAR_label}SumVectorLinearOperator) and isinstance(x, ({SCALAR}, float, int)):
944+
return {SCALAR_label}SumVectorLinearOperator(M1=self.M1, M2=self.M2, facM1=self.facM1*x, facM2=self.facM2*x)
945+
elif isinstance(x, {SCALAR_label}SumVectorLinearOperator) and isinstance(self, ({SCALAR}, float, int)):
946+
return {SCALAR_label}SumVectorLinearOperator(M1=x.M1, M2=x.M2, facM1=x.facM1*self, facM2=x.facM2*self)
947+
else:
948+
return super({SCALAR_label}SumVectorLinearOperator, self).__mul__(x)
949+
950+
def getMemorySize(self):
951+
return self.M1.getMemorySize()+self.M2.getMemorySize()
952+
953+
def getOp(self, INDEX_t opNo):
954+
return self.facM1*self.M1.getOp(opNo)+self.facM2*self.M2.getOp(opNo)
955+
777956

778957
cdef class {SCALAR_label}Transpose_Linear_Operator({SCALAR_label}LinearOperator):
779958
def __init__(self,

multilevelSolver/PyNucleus_multilevelSolver/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
from . connectors import (inputConnector,
1010
repartitionConnector,
1111
pCoarsenConnector)
12-
from . multigrid import V, W, FMG_V, FMG_W
12+
from . multigrid import CycleType
13+
globals().update(getattr(CycleType, '__members__'))
1314
from . geometricMG import paramsForMG, paramsForSerialMG
1415

1516
from PyNucleus_base import solverFactory

0 commit comments

Comments
 (0)