Skip to content

Commit 0e59196

Browse files
authored
Merge pull request #660 from bashtage/upgrade-minimums
MAINT: Bump Python to 3.9
2 parents f34bf20 + e163a75 commit 0e59196

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+314
-243
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -200,13 +200,13 @@ cov_est.cov.long_run
200200
These requirements reflect the testing environment. It is possible
201201
that arch will work with older versions.
202202

203-
- Python (3.8+)
203+
- Python (3.9+)
204204
- NumPy (1.19+)
205205
- SciPy (1.5+)
206206
- Pandas (1.1+)
207207
- statsmodels (0.12+)
208208
- matplotlib (3+), optional
209-
- property-cached (1.6.4+), optional
209+
210210

211211
### Optional Requirements
212212

arch/bootstrap/_samplers.pyx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,11 @@
33

44
import numpy as np
55

6-
cimport cython
76
cimport numpy as np
87

98
np.import_array()
109

10+
1111
def stationary_bootstrap_sample(np.int64_t[:] indices,
1212
double[:] u,
1313
double p):

arch/bootstrap/base.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,7 @@
11
from __future__ import annotations
22

3-
from typing import (
4-
Any,
5-
Callable,
6-
Dict,
7-
Generator as PyGenerator,
8-
List,
9-
Mapping,
10-
Sequence,
11-
Tuple,
12-
Union,
13-
cast,
14-
)
3+
from collections.abc import Generator as PyGenerator, Mapping, Sequence
4+
from typing import Any, Callable, Union, cast
155
import warnings
166

177
import numpy as np
@@ -1290,7 +1280,7 @@ def _resample(self) -> tuple[tuple[ArrayLike, ...], dict[str, ArrayLike]]:
12901280
"""
12911281
Resample all data using the values in _index
12921282
"""
1293-
indices = cast(Union[Int64Array, Tuple[Int64Array, ...]], self._index)
1283+
indices = cast(Union[Int64Array, tuple[Int64Array, ...]], self._index)
12941284
pos_data: list[NDArray | pd.Series | pd.DataFrame] = []
12951285
for values in self._args:
12961286
if isinstance(values, (pd.Series, pd.DataFrame)):
@@ -1458,7 +1448,7 @@ def _resample(self) -> tuple[tuple[ArrayLike, ...], dict[str, ArrayLike]]:
14581448
Resample all data using the values in _index
14591449
"""
14601450
pos_indices, kw_indices = cast(
1461-
Tuple[List[Int64Array], Dict[str, Int64Array]], self._index
1451+
tuple[list[Int64Array], dict[str, Int64Array]], self._index
14621452
)
14631453
pos_data: list[np.ndarray | pd.DataFrame | pd.Series] = []
14641454
for i, values in enumerate(self._args):

arch/bootstrap/multiple_comparison.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
from __future__ import annotations
22

3+
from collections.abc import Hashable, Sequence
34
import copy
4-
from typing import Hashable, Sequence, cast
5+
from typing import cast
56
import warnings
67

78
import numpy as np

arch/compat/statsmodels.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1-
from typing import Any, Union
1+
from __future__ import annotations
2+
3+
from typing import Any
24

35
from numpy import recarray
46
from pandas import DataFrame
57

68

7-
def dataset_loader(dataset: Any) -> Union[recarray, DataFrame]:
9+
def dataset_loader(dataset: Any) -> recarray | DataFrame:
810
"""Load a dataset using the new syntax is possible"""
911
try:
1012
return dataset.load(as_pandas=True).data

arch/tests/bootstrap/test_bootstrap.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
1+
from __future__ import annotations
2+
13
import copy
2-
from typing import Callable, NamedTuple, Union
4+
from typing import Callable, NamedTuple
35
import warnings
46

57
import numpy as np
@@ -42,7 +44,7 @@ class BSData(NamedTuple):
4244
y_series: pd.Series
4345
z_df: pd.DataFrame
4446

45-
func: Callable[[np.ndarray, int], Union[float, np.ndarray]]
47+
func: Callable[[np.ndarray, int], float | np.ndarray]
4648

4749

4850
@pytest.fixture(scope="function", params=[1234, "gen", "rs"])

arch/tests/covariance/test_covariance.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
from itertools import product
2-
from typing import Tuple, Type
32

43
import numpy as np
54
from numpy.testing import assert_allclose
@@ -64,7 +63,7 @@ def data(request) -> ArrayLike:
6463
rs = np.random.RandomState([3894830, 432841, 323297, 8927821])
6564
ndim, use_pandas = request.param
6665
burn = 100
67-
size: Tuple[int, ...] = (500 + burn,)
66+
size: tuple[int, ...] = (500 + burn,)
6867
if ndim == 2:
6968
size += (3,)
7069
e = rs.standard_normal(size)
@@ -90,7 +89,7 @@ def estimator(request) -> CovarianceEstimator:
9089
return request.param
9190

9291

93-
def test_covariance_smoke(data: ArrayLike, estimator: Type[CovarianceEstimator]):
92+
def test_covariance_smoke(data: ArrayLike, estimator: type[CovarianceEstimator]):
9493
cov = estimator(data)
9594
est_cov = cov.cov
9695
ndim = data.shape[1] if data.ndim > 1 else 1
@@ -103,7 +102,7 @@ def test_covariance_smoke(data: ArrayLike, estimator: Type[CovarianceEstimator])
103102
assert isinstance(repr(cov), str)
104103

105104

106-
def test_covariance_errors(data: ArrayLike, estimator: Type[CovarianceEstimator]):
105+
def test_covariance_errors(data: ArrayLike, estimator: type[CovarianceEstimator]):
107106
with pytest.raises(ValueError, match="Degrees of freedom is <= 0"):
108107
estimator(data, df_adjust=data.shape[0] + 1)
109108
with pytest.raises(ValueError, match="df_adjust must be a non-negative"):
@@ -139,10 +138,10 @@ def test_bartlett_auto(data: ArrayLike):
139138
assert_allclose(expected_oss, np.squeeze(nw.cov.one_sided_strict))
140139
assert_allclose(expected_cov, np.squeeze(nw.cov.long_run))
141140
ce = CovarianceEstimate(
142-
short_run=nw.cov.short_run,
143-
one_sided_strict=nw.cov.one_sided_strict,
144-
long_run=nw.cov.long_run,
145-
one_sided=nw.cov.one_sided,
141+
short_run=np.asarray(nw.cov.short_run),
142+
one_sided_strict=np.asarray(nw.cov.one_sided_strict),
143+
long_run=np.asarray(nw.cov.long_run),
144+
one_sided=np.asarray(nw.cov.one_sided),
146145
)
147146
assert_allclose(ce.short_run, nw.cov.short_run)
148147
assert_allclose(ce.one_sided_strict, nw.cov.one_sided_strict)
@@ -155,7 +154,7 @@ def test_parzen_auto(data: ArrayLike):
155154

156155
if data.ndim == 1:
157156
# This test is noisy
158-
expected_bw: Tuple[int, ...] = (18, 19)
157+
expected_bw: tuple[int, ...] = (18, 19)
159158
expected_weights = [
160159
1.0000e00,
161160
9.8575e-01,
@@ -239,27 +238,27 @@ def test_qs_auto(data: ArrayLike):
239238
assert_allclose(qs.kernel_weights[:10], np.array(expected_weights))
240239

241240

242-
def test_force_int(data: ArrayLike, estimator: Type[CovarianceEstimator]):
241+
def test_force_int(data: ArrayLike, estimator: type[CovarianceEstimator]):
243242
bw = estimator(data, force_int=False).bandwidth
244243
bw_int = estimator(data, force_int=True).bandwidth
245244
assert bw_int >= bw
246245
assert bw_int == int(bw_int)
247246

248247

249-
def test_first_weights(data: ArrayLike, estimator: Type[CovarianceEstimator]):
248+
def test_first_weights(data: ArrayLike, estimator: type[CovarianceEstimator]):
250249
w = estimator(data).kernel_weights
251250
assert w[0] == 1.0
252251

253252

254-
def test_constants(data: ArrayLike, estimator: Type[CovarianceEstimator]):
253+
def test_constants(data: ArrayLike, estimator: type[CovarianceEstimator]):
255254
cov_est = estimator(data)
256255
kc, bs, rate = KERNEL_PARAMS[estimator.__name__]
257256
assert_allclose(cov_est.kernel_const, kc)
258257
assert cov_est.bandwidth_scale == bs
259258
assert_allclose(cov_est.rate, rate)
260259

261260

262-
def test_weight_len(data: ArrayLike, estimator: Type[CovarianceEstimator]):
261+
def test_weight_len(data: ArrayLike, estimator: type[CovarianceEstimator]):
263262
cov_est = estimator(data, force_int=True)
264263
name = estimator.__name__
265264
is_qs = name in ("QuadraticSpectral", "Andrews")
@@ -270,7 +269,7 @@ def test_weight_len(data: ArrayLike, estimator: Type[CovarianceEstimator]):
270269
assert cov_est.kernel_weights.shape[0] == exp_len
271270

272271

273-
def test_kernel_weights(data: ArrayLike, estimator: Type[CovarianceEstimator]):
272+
def test_kernel_weights(data: ArrayLike, estimator: type[CovarianceEstimator]):
274273
if data.ndim == 1:
275274
return
276275
weights = np.arange(1, data.shape[1] + 1)
@@ -280,7 +279,7 @@ def test_kernel_weights(data: ArrayLike, estimator: Type[CovarianceEstimator]):
280279
assert wcov.bandwidth != cov.bandwidth
281280

282281

283-
def test_center(data: ArrayLike, estimator: Type[CovarianceEstimator]):
282+
def test_center(data: ArrayLike, estimator: type[CovarianceEstimator]):
284283
centered_cov = estimator(data, center=False, force_int=False)
285284
cov = estimator(data, force_int=False)
286285
assert centered_cov.bandwidth != cov.bandwidth

arch/tests/unitroot/cointegration_data.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
from typing import Tuple
2-
31
import numpy as np
42
import pandas as pd
53
import pytest
@@ -8,7 +6,7 @@
86

97

108
@pytest.fixture(scope="module", params=[True, False])
11-
def data(request) -> Tuple[Float64Array, Float64Array]:
9+
def data(request) -> tuple[Float64Array, Float64Array]:
1210
g = np.random.RandomState([12839028, 3092183, 902813])
1311
e = g.standard_normal((2000, 2))
1412
phi = g.random_sample((3, 2, 2))
@@ -31,7 +29,7 @@ def data(request) -> Tuple[Float64Array, Float64Array]:
3129

3230

3331
@pytest.fixture(scope="module", params=[True, False], ids=["pandas", "numpy"])
34-
def trivariate_data(request) -> Tuple[ArrayLike2D, ArrayLike2D]:
32+
def trivariate_data(request) -> tuple[ArrayLike2D, ArrayLike2D]:
3533
rs = np.random.RandomState([922019, 12882912, 192010, 10189, 109981])
3634
nobs = 1000
3735
burn = 100

arch/tests/unitroot/test_phillips_ouliaris.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
from typing import Tuple
2-
31
import numpy as np
42
from numpy.testing import assert_allclose
53
import pytest
@@ -15,7 +13,7 @@
1513
from arch.utility.timeseries import add_trend
1614

1715

18-
def z_tests(z: Float64Array, lag: int, trend: UnitRootTrend) -> Tuple[float, float]:
16+
def z_tests(z: Float64Array, lag: int, trend: UnitRootTrend) -> tuple[float, float]:
1917
z = add_trend(z, trend=trend)
2018
u = z
2119
if z.shape[1] > 1:
@@ -37,7 +35,7 @@ def z_tests(z: Float64Array, lag: int, trend: UnitRootTrend) -> Tuple[float, flo
3735
return float(z_a), float(z_t)
3836

3937

40-
def p_tests(z: Float64Array, lag: int, trend: UnitRootTrend) -> Tuple[float, float]:
38+
def p_tests(z: Float64Array, lag: int, trend: UnitRootTrend) -> tuple[float, float]:
4139
x, y = z[:, 1:], z[:, 0]
4240
nobs = x.shape[0]
4341
x = add_trend(x, trend=trend)

arch/tests/unitroot/test_unitroot.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
# TODO: Tests for features that are just called
22
# TODO: Test for trend='ctt'
3+
from __future__ import annotations
4+
35
from arch.compat.statsmodels import dataset_loader
46

57
import os

arch/tests/univariate/test_mean.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,6 @@
7272
RTOL = 1e-4 if struct.calcsize("P") < 8 else 1e-6
7373
DISPLAY: Literal["off", "final"] = "off"
7474
UPDATE_FREQ = 0 if DISPLAY == "off" else 3
75-
SP_LT_14 = parse(scipy.__version__) < parse("1.4")
7675
SP500 = 100 * sp500.load()["Adj Close"].pct_change().dropna()
7776

7877

@@ -929,14 +928,9 @@ def test_convergence_warning(self):
929928
)
930929
am = arch_model(y, mean="ARX", lags=10, p=5, q=0)
931930

932-
if SP_LT_14:
933-
with pytest.warns(ConvergenceWarning):
934-
am.fit(disp=DISPLAY)
935-
am.fit(show_warning=True, disp=DISPLAY)
936-
else:
937-
with pytest.warns(DataScaleWarning):
938-
am.fit(disp=DISPLAY)
939-
am.fit(show_warning=True, disp=DISPLAY)
931+
with pytest.warns(DataScaleWarning):
932+
am.fit(disp=DISPLAY)
933+
am.fit(show_warning=True, disp=DISPLAY)
940934

941935
with pytest.warns(DataScaleWarning):
942936
am.fit(show_warning=False, disp=DISPLAY)

arch/tests/univariate/test_recursions.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import pickle
33
import timeit
44
import types
5-
from typing import List
65

76
import numpy as np
87
from numpy.random import RandomState
@@ -58,7 +57,7 @@ def __init__(
5857
self.repeat = repeat
5958
self.number = number
6059
self._run = False
61-
self.times: List[float] = []
60+
self.times: list[float] = []
6261
self._codes = [first, second]
6362
self.ratio = np.inf
6463

@@ -1242,10 +1241,12 @@ def test_figarch_performance(self):
12421241
"""
12431242

12441243
midas_first = """
1245-
recpy.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
1246-
"""
1244+
recpy.figarch_recursion(parameters, fresids, sigma2, p, q,
1245+
nobs, trunc_lag, backcast, var_bounds)
1246+
"""
12471247
midas_second = """
1248-
rec.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
1248+
rec.figarch_recursion(parameters, fresids, sigma2, p, q,
1249+
nobs, trunc_lag, backcast, var_bounds)
12491250
"""
12501251
timer = Timer(
12511252
midas_first,

arch/tests/univariate/test_variance_forecasting.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ def _compare_truncated_forecasts(full, trunc, start):
4545

4646
class PreservedState:
4747
"""
48-
Context manager that will save NumPy's random generator's state when entering and restore
49-
the original state when exiting.
48+
Context manager that will save NumPy's random generator's state when entering and
49+
restore the original state when exiting.
5050
"""
5151

5252
def __init__(self, random_state):

arch/tests/utility/test_cov.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from __future__ import annotations
2+
13
from arch.compat.statsmodels import dataset_loader
24

35
from numpy import diff, log

arch/typing.py

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,8 @@
11
from __future__ import annotations
22

3+
from collections.abc import Hashable
34
import datetime as dt
4-
from typing import (
5-
TYPE_CHECKING,
6-
Any,
7-
Callable,
8-
Dict,
9-
Hashable,
10-
List,
11-
Literal,
12-
Optional,
13-
Tuple,
14-
TypeVar,
15-
Union,
16-
)
5+
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, TypeVar, Union
176

187
import numpy as np
198
from pandas import DataFrame, Series, Timestamp
@@ -61,11 +50,11 @@
6150
) = Float64Array = Int64Array = Int32Array = BoolArray = AnyArray = NDArray
6251

6352
BootstrapIndexT = Union[
64-
Int64Array, Tuple[Int64Array, ...], Tuple[List[Int64Array], Dict[str, Int64Array]]
53+
Int64Array, tuple[Int64Array, ...], tuple[list[Int64Array], dict[str, Int64Array]]
6554
]
66-
RandomStateState = Tuple[str, Uint32Array, int, int, float]
55+
RandomStateState = tuple[str, Uint32Array, int, int, float]
6756

68-
RNGType = Callable[[Union[int, Tuple[int, ...]]], Float64Array]
57+
RNGType = Callable[[Union[int, tuple[int, ...]]], Float64Array]
6958
ArrayLike1D = Union[NDArray, Series]
7059
ArrayLike2D = Union[NDArray, DataFrame]
7160
ArrayLike = Union[NDArray, DataFrame, Series]

arch/unitroot/_phillips_ouliaris.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,8 @@ def phillips_ouliaris(
241241
.. math::
242242
243243
\hat{\omega}_{11\cdot 2} = \hat{\omega}_{11}
244-
- \hat{\omega}'_{21} \hat{\Omega}_{22}^{-1} \hat{\omega}_{21}
244+
- \hat{\omega}'_{21} \hat{\Omega}_{22}^{-1}
245+
\hat{\omega}_{21}
245246
246247
and
247248

0 commit comments

Comments
 (0)