@@ -237,7 +237,6 @@ class Eig(Op):
237
237
238
238
"""
239
239
240
- _numop = staticmethod (np .linalg .eig )
241
240
__props__ : Union [Tuple , Tuple [str ]] = ()
242
241
243
242
def make_node (self , x ):
@@ -250,7 +249,7 @@ def make_node(self, x):
250
249
def perform (self , node , inputs , outputs ):
251
250
(x ,) = inputs
252
251
(w , v ) = outputs
253
- w [0 ], v [0 ] = (z .astype (x .dtype ) for z in self . _numop (x ))
252
+ w [0 ], v [0 ] = (z .astype (x .dtype ) for z in np . linalg . eig (x ))
254
253
255
254
def infer_shape (self , fgraph , node , shapes ):
256
255
n = shapes [0 ][0 ]
@@ -266,7 +265,6 @@ class Eigh(Eig):
266
265
267
266
"""
268
267
269
- _numop = staticmethod (np .linalg .eigh )
270
268
__props__ = ("UPLO" ,)
271
269
272
270
def __init__ (self , UPLO = "L" ):
@@ -281,15 +279,15 @@ def make_node(self, x):
281
279
# LAPACK. Rather than trying to reproduce the (rather
282
280
# involved) logic, we just probe linalg.eigh with a trivial
283
281
# input.
284
- w_dtype = self . _numop ([[np .dtype (x .dtype ).type ()]])[0 ].dtype .name
282
+ w_dtype = np . linalg . eigh ([[np .dtype (x .dtype ).type ()]])[0 ].dtype .name
285
283
w = vector (dtype = w_dtype )
286
284
v = matrix (dtype = w_dtype )
287
285
return Apply (self , [x ], [w , v ])
288
286
289
287
def perform (self , node , inputs , outputs ):
290
288
(x ,) = inputs
291
289
(w , v ) = outputs
292
- w [0 ], v [0 ] = self . _numop (x , self .UPLO )
290
+ w [0 ], v [0 ] = np . linalg . eigh (x , self .UPLO )
293
291
294
292
def grad (self , inputs , g_outputs ):
295
293
r"""The gradient function should return
@@ -412,7 +410,6 @@ class QRFull(Op):
412
410
413
411
"""
414
412
415
- _numop = staticmethod (np .linalg .qr )
416
413
__props__ = ("mode" ,)
417
414
418
415
def __init__ (self , mode ):
@@ -444,7 +441,7 @@ def make_node(self, x):
444
441
def perform (self , node , inputs , outputs ):
445
442
(x ,) = inputs
446
443
assert x .ndim == 2 , "The input of qr function should be a matrix."
447
- res = self . _numop (x , self .mode )
444
+ res = np . linalg . qr (x , self .mode )
448
445
if self .mode != "r" :
449
446
outputs [0 ][0 ], outputs [1 ][0 ] = res
450
447
else :
@@ -513,7 +510,6 @@ class SVD(Op):
513
510
"""
514
511
515
512
# See doc in the docstring of the function just after this class.
516
- _numop = staticmethod (np .linalg .svd )
517
513
__props__ = ("full_matrices" , "compute_uv" )
518
514
519
515
def __init__ (self , full_matrices = True , compute_uv = True ):
@@ -541,10 +537,10 @@ def perform(self, node, inputs, outputs):
541
537
assert x .ndim == 2 , "The input of svd function should be a matrix."
542
538
if self .compute_uv :
543
539
u , s , vt = outputs
544
- u [0 ], s [0 ], vt [0 ] = self . _numop (x , self .full_matrices , self .compute_uv )
540
+ u [0 ], s [0 ], vt [0 ] = np . linalg . svd (x , self .full_matrices , self .compute_uv )
545
541
else :
546
542
(s ,) = outputs
547
- s [0 ] = self . _numop (x , self .full_matrices , self .compute_uv )
543
+ s [0 ] = np . linalg . svd (x , self .full_matrices , self .compute_uv )
548
544
549
545
def infer_shape (self , fgraph , node , shapes ):
550
546
(x_shape ,) = shapes
@@ -696,7 +692,6 @@ class TensorInv(Op):
696
692
Aesara utilization of numpy.linalg.tensorinv;
697
693
"""
698
694
699
- _numop = staticmethod (np .linalg .tensorinv )
700
695
__props__ = ("ind" ,)
701
696
702
697
def __init__ (self , ind = 2 ):
@@ -710,7 +705,7 @@ def make_node(self, a):
710
705
def perform (self , node , inputs , outputs ):
711
706
(a ,) = inputs
712
707
(x ,) = outputs
713
- x [0 ] = self . _numop (a , self .ind )
708
+ x [0 ] = np . linalg . tensorinv (a , self .ind )
714
709
715
710
def infer_shape (self , fgraph , node , shapes ):
716
711
sp = shapes [0 ][self .ind :] + shapes [0 ][: self .ind ]
@@ -756,7 +751,6 @@ class TensorSolve(Op):
756
751
757
752
"""
758
753
759
- _numop = staticmethod (np .linalg .tensorsolve )
760
754
__props__ = ("axes" ,)
761
755
762
756
def __init__ (self , axes = None ):
@@ -770,12 +764,9 @@ def make_node(self, a, b):
770
764
return Apply (self , [a , b ], [x ])
771
765
772
766
def perform (self , node , inputs , outputs ):
773
- (
774
- a ,
775
- b ,
776
- ) = inputs
767
+ (a , b ) = inputs
777
768
(x ,) = outputs
778
- x [0 ] = self . _numop (a , b , self .axes )
769
+ x [0 ] = np . linalg . tensorsolve (a , b , self .axes )
779
770
780
771
781
772
def tensorsolve (a , b , axes = None ):
0 commit comments