42
42
# least_squares 0.17
43
43
44
44
45
- from scipy .optimize import differential_evolution as scipy_diffev
45
+ from scipy .optimize import differential_evolution
46
46
47
47
# check for scipy.opitimize.least_squares
48
48
HAS_LEAST_SQUARES = False
@@ -707,7 +707,7 @@ def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
707
707
raise ValueError ('differential_evolution requires finite '
708
708
'bound for all varying parameters' )
709
709
710
- internal_bounds = [(- np .pi / 2. , np .pi / 2. )] * len (vars )
710
+ _bounds = [(- np .pi / 2. , np .pi / 2. )] * len (vars )
711
711
kwargs = dict (args = (), strategy = 'best1bin' , maxiter = None ,
712
712
popsize = 15 , tol = 0.01 , mutation = (0.5 , 1 ),
713
713
recombination = 0.7 , seed = None , callback = None ,
@@ -716,7 +716,7 @@ def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
716
716
for k , v in fmin_kws .items ():
717
717
if k in kwargs :
718
718
kwargs [k ] = v
719
- ret = scipy_diffev (self .penalty , internal_bounds , ** kwargs )
719
+ ret = differential_evolution (self .penalty , _bounds , ** kwargs )
720
720
else :
721
721
ret = scipy_minimize (self .penalty , vars , ** fmin_kws )
722
722
@@ -920,8 +920,8 @@ def emcee(self, params=None, steps=1000, nwalkers=100, burn=0, thin=1,
920
920
`is_weighted is False` then the data uncertainty, `s_n`, will be
921
921
treated as a nuisance parameter and will be marginalized out. This is
922
922
achieved by employing a strictly positive uncertainty
923
- (homoscedasticity) for each data point, :math:`s_n = \exp(\_lnsigma)`.
924
- `_lnsigma ` will be present in `MinimizerResult.params`, as well as
923
+ (homoscedasticity) for each data point, :math:`s_n = \exp(\_\ _lnsigma)`.
924
+ `__lnsigma ` will be present in `MinimizerResult.params`, as well as
925
925
`Minimizer.chain`, `nvarys` will also be increased by one.
926
926
927
927
References
0 commit comments