diff --git a/pyproject.toml b/pyproject.toml index 67f92428..c22f77fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ [project] name = "PyVBMC" -dynamic = ["version"] # use git tags for version, via setuptools_scm +dynamic = ["version"] # use git tags for version, via setuptools_scm description = "Variational Bayesian Monte Carlo in Python." readme = "README.md" license = { file = "LICENSE" } @@ -12,8 +12,8 @@ dependencies = [ "dill >= 0.3.5.1", "gpyreg >= 0.1.0", "imageio >= 2.13.5", - "matplotlib >= 3.5.1", - "numpy >= 1.22.1", + "matplotlib >= 3.9.0", + "numpy < 2.0.0", "plotly >= 5.11.0", "pytest >= 6.2.5", "pytest-mock >= 3.6.1", @@ -26,7 +26,7 @@ requires-python = ">=3.9" include-package-data = true # Include examples in binary/wheel distribution: packages = ["pyvbmc", "pyvbmc.examples"] -package-dir = {"pyvbmc.examples" = "examples"} +package-dir = { "pyvbmc.examples" = "examples" } [tool.setuptools.package-data] # Make sure to include example Notebooks: @@ -45,10 +45,7 @@ dev = [ ] [build-system] -requires = [ - "setuptools >= 45", - "setuptools_scm[toml] >= 6.2", -] +requires = ["setuptools >= 45", "setuptools_scm[toml] >= 6.2"] build-backend = "setuptools.build_meta" [tool.black] diff --git a/pyvbmc/acquisition_functions/abstract_acq_fcn.py b/pyvbmc/acquisition_functions/abstract_acq_fcn.py index 9ad8db23..b49ad19b 100644 --- a/pyvbmc/acquisition_functions/abstract_acq_fcn.py +++ b/pyvbmc/acquisition_functions/abstract_acq_fcn.py @@ -135,7 +135,7 @@ def __call__( np.any(X_orig < optim_state.get("lb_eps_orig"), axis=1), np.any(X_orig > optim_state.get("ub_eps_orig"), axis=1), ) - acq[idx_bounds] = np.Inf + acq[idx_bounds] = np.inf # Re-shape to 1-D, if necessary (to avoid errors in cma.fmin) if acq.ndim > 1: diff --git a/pyvbmc/stats/get_hpd.py b/pyvbmc/stats/get_hpd.py index 2b2546fe..46b3397a 100644 --- a/pyvbmc/stats/get_hpd.py +++ b/pyvbmc/stats/get_hpd.py @@ -40,6 +40,6 @@ def get_hpd(X: np.ndarray, y: np.ndarray, hpd_frac: float = 0.8): if hpd_N > 0: hpd_range = np.max(hpd_X, axis=0) - np.min(hpd_X, axis=0) else: - hpd_range = np.full((D), np.NaN) + hpd_range = np.full((D), np.nan) return hpd_X, hpd_y, hpd_range, indices diff --git a/pyvbmc/stats/kde_1d.py b/pyvbmc/stats/kde_1d.py index 2a6bf3f9..e74bfb1b 100644 --- a/pyvbmc/stats/kde_1d.py +++ b/pyvbmc/stats/kde_1d.py @@ -42,8 +42,8 @@ def _fixed_point( Note that the factor of 2.0 in the definition of f is correct. See longer discussion here: https://github.com/tommyod/KDEpy/issues/95 """ - i_range_squared = np.asfarray(i_range_squared, dtype=np.float64) - a2 = np.asfarray(a2, dtype=np.float64) + i_range_squared = np.asarray(i_range_squared, dtype=np.float64) + a2 = np.asarray(a2, dtype=np.float64) ell = 7 f = ( 2.0 @@ -59,7 +59,7 @@ def _fixed_point( return -1 for s in reversed(range(2, ell)): - odd_numbers_prod = np.product( + odd_numbers_prod = np.prod( np.arange(1, 2 * s + 1, 2, dtype=np.float64) ) K0 = odd_numbers_prod / np.sqrt(2.0 * np.pi) diff --git a/pyvbmc/testing/vbmc/test_vbmc_loop_termination.py b/pyvbmc/testing/vbmc/test_vbmc_loop_termination.py index 46b41fd7..6ac0dd33 100644 --- a/pyvbmc/testing/vbmc/test_vbmc_loop_termination.py +++ b/pyvbmc/testing/vbmc/test_vbmc_loop_termination.py @@ -37,7 +37,7 @@ def test_vbmc_check_termination_conditions_max_fun_evals(mocker): mocker.patch.object( vbmc, "_compute_reliability_index", - return_value=(np.Inf, np.NaN), + return_value=(np.inf, np.nan), ) terminated, __ = vbmc._check_termination_conditions() assert terminated @@ -60,7 +60,7 @@ def test_vbmc_check_termination_conditions_max_iter(mocker): mocker.patch.object( vbmc, "_compute_reliability_index", - return_value=(np.Inf, np.NaN), + return_value=(np.inf, np.nan), ) terminated, __ = vbmc._check_termination_conditions() assert terminated @@ -83,7 +83,7 @@ def test_vbmc_check_termination_conditions_prevent_early_termination(mocker): mocker.patch.object( vbmc, "_compute_reliability_index", - return_value=(np.Inf, np.NaN), + return_value=(np.inf, np.nan), ) terminated, __ = vbmc._check_termination_conditions() assert not terminated @@ -101,7 +101,7 @@ def test_vbmc_check_termination_conditions_prevent_early_termination(mocker): mocker.patch.object( vbmc, "_compute_reliability_index", - return_value=(np.Inf, np.NaN), + return_value=(np.inf, np.nan), ) terminated, __ = vbmc._check_termination_conditions() assert not terminated @@ -185,7 +185,7 @@ def test_vbmc_compute_reliability_index_less_than_2_iter(): vbmc = create_vbmc(3, 3, 1, 5, 2, 4) vbmc.optim_state["iter"] = 1 r_index, ELCBO_improvement = vbmc._compute_reliability_index(6) - assert r_index == np.Inf + assert r_index == np.inf assert np.isnan(ELCBO_improvement) diff --git a/pyvbmc/vbmc/active_sample.py b/pyvbmc/vbmc/active_sample.py index 70babec2..1dac2c0b 100644 --- a/pyvbmc/vbmc/active_sample.py +++ b/pyvbmc/vbmc/active_sample.py @@ -130,7 +130,7 @@ def active_sample( Xs = np.append(Xs, random_Xs, axis=0) ys = np.append( ys, - np.full(sample_count - provided_sample_count, np.NaN), + np.full(sample_count - provided_sample_count, np.nan), axis=0, ) @@ -690,7 +690,7 @@ def _get_search_points( D = ub_search.shape[1] - search_X = np.full((0, D), np.NaN) + search_X = np.full((0, D), np.nan) idx_cache = np.array([]) parameter_transformer = function_logger.parameter_transformer @@ -708,7 +708,7 @@ def _get_search_points( # Randomly sample remaining points if x0.shape[0] < number_of_points: N_random_points = number_of_points - x0.shape[0] - random_Xs = np.full((0, D), np.NaN) + random_Xs = np.full((0, D), np.nan) N_search_cache = round( options.get("search_cache_frac") * N_random_points diff --git a/pyvbmc/vbmc/option_configs/advanced_vbmc_options.ini b/pyvbmc/vbmc/option_configs/advanced_vbmc_options.ini index e39d22cd..1899996f 100644 --- a/pyvbmc/vbmc/option_configs/advanced_vbmc_options.ini +++ b/pyvbmc/vbmc/option_configs/advanced_vbmc_options.ini @@ -70,7 +70,7 @@ ns_gp_max = 80 # Max GP hyperparameter samples during warmup ns_gp_max_warmup = 8 # Max GP hyperparameter samples during main algorithm -ns_gp_max_main = np.Inf +ns_gp_max_main = np.inf # Fcn evals without improvement before stopping warmup warmup_no_impro_threshold = 20 + 5 * D # Also check for max fcn value improvement before stopping warmup @@ -78,7 +78,7 @@ warmup_check_max = True # Force stable GP hyperparameter sampling (reduce samples or start optimizing) stable_gp_sampling = 200 + 10 * D # Force stable GP hyperparameter sampling after reaching this number of components -stable_gp_vp_k = np.Inf +stable_gp_vp_k = np.inf # Number of GP samples when GP is stable (0 = optimize) stable_gp_samples = 0 # Thinning for GP hyperparameter sampling diff --git a/pyvbmc/vbmc/vbmc.py b/pyvbmc/vbmc/vbmc.py index a59d25ed..d8d61181 100644 --- a/pyvbmc/vbmc/vbmc.py +++ b/pyvbmc/vbmc/vbmc.py @@ -158,7 +158,7 @@ def __init__( provided, PLB and PUB need to be specified.""" ) else: - x0 = np.full((plausible_lower_bounds.shape), np.NaN) + x0 = np.full((plausible_lower_bounds.shape), np.nan) if x0.ndim == 1: logging.warning("Reshaping x0 to row vector.") @@ -660,7 +660,7 @@ def _init_optim_state(self): if self.options.get("ns_gp_max") > 0: optim_state["stop_sampling"] = 0 else: - optim_state["stop_sampling"] = np.Inf + optim_state["stop_sampling"] = np.inf # Fully recompute variational posterior optim_state["recompute_var_post"] = True @@ -693,7 +693,7 @@ def _init_optim_state(self): optim_state["run_mean"] = [] optim_state["run_cov"] = [] # Last time running average was updated - optim_state["last_run_avg"] = np.NaN + optim_state["last_run_avg"] = np.nan # Current number of components for variational posterior optim_state["vp_K"] = self.options.get("k_warmup") @@ -1526,7 +1526,7 @@ def optimize(self): ): self.logger.info( display_format.format( - np.Inf, + np.inf, self.function_logger.func_count, self.optim_state["N"], elbo, @@ -1540,7 +1540,7 @@ def optimize(self): else: self.logger.info( display_format.format( - np.Inf, + np.inf, self.function_logger.func_count, elbo, elbo_sd, @@ -1653,7 +1653,7 @@ def _check_warmup_end_conditions(self): if len(self.optim_state.get("data_trim_list")) > 0: last_data_trim = self.optim_state.get("data_trim_list")[-1] else: - last_data_trim = -1 * np.Inf + last_data_trim = -1 * np.inf no_recent_trim_flag = ( self.optim_state.get("N") - last_data_trim @@ -1713,7 +1713,7 @@ def _setup_vbmc_after_warmup(self): idx_keep = (y_max - self.function_logger.y_orig) < threshold if np.sum(idx_keep) < n_keep_min: y_temp = np.copy(self.function_logger.y_orig) - y_temp[~np.isfinite(y_temp)] = -np.Inf + y_temp[~np.isfinite(y_temp)] = -np.inf order = np.argsort(y_temp * -1, axis=0) idx_keep[ order[: min(n_keep_min, self.function_logger.Xn + 1)] @@ -1849,8 +1849,8 @@ def _compute_reliability_index(self, tol_stable_iters): iteration_idx = self.optim_state.get("iter") # Was < 3 in MATLAB due to different indexing. if self.optim_state.get("iter") < 2: - r_index = np.Inf - ELCBO_improvement = np.NaN + r_index = np.inf + ELCBO_improvement = np.nan return r_index, ELCBO_improvement sn = np.sqrt(self.optim_state.get("sn2_hpd")) @@ -1862,7 +1862,7 @@ def _compute_reliability_index(self, tol_stable_iters): self.options.get("tol_sd") * 10, ) - r_index_vec = np.full((3), np.NaN) + r_index_vec = np.full((3), np.nan) r_index_vec[0] = ( np.abs( self.iteration_history.get("elbo")[iteration_idx] @@ -2017,7 +2017,7 @@ def final_boost(self, vp: VariationalPosterior, gp: gpr.GP): options.__setitem__("ns_ent", n_sent_boost, force=True) options.__setitem__("ns_ent_fast", n_sent_fast_boost, force=True) options.__setitem__("ns_ent_fine", n_sent_fine_boost, force=True) - options.__setitem__("max_iter_stochastic", np.Inf, force=True) + options.__setitem__("max_iter_stochastic", np.inf, force=True) self.optim_state["entropy_alpha"] = 0 stable_flag = np.copy(vp.stats["stable"]) @@ -2298,7 +2298,7 @@ def _create_result_dict( else: output["convergence_status"] = "no" - output["overhead"] = np.NaN + output["overhead"] = np.nan output["rng_state"] = "rng" output["algorithm"] = "Variational Bayesian Monte Carlo" try: