diff --git a/causalpy/__init__.py b/causalpy/__init__.py index 26921565..8fbeaee9 100644 --- a/causalpy/__init__.py +++ b/causalpy/__init__.py @@ -23,11 +23,12 @@ from .data import load_data from .experiments.diff_in_diff import DifferenceInDifferences from .experiments.instrumental_variable import InstrumentalVariable +from .experiments.interrupted_time_series import InterruptedTimeSeries from .experiments.inverse_propensity_weighting import InversePropensityWeighting -from .experiments.prepostfit import InterruptedTimeSeries, SyntheticControl from .experiments.prepostnegd import PrePostNEGD from .experiments.regression_discontinuity import RegressionDiscontinuity from .experiments.regression_kink import RegressionKink +from .experiments.synthetic_control import SyntheticControl az.style.use("arviz-darkgrid") diff --git a/causalpy/experiments/base.py b/causalpy/experiments/base.py index 431e824d..abc284c7 100644 --- a/causalpy/experiments/base.py +++ b/causalpy/experiments/base.py @@ -81,7 +81,7 @@ def _ols_plot(self, *args, **kwargs): raise NotImplementedError("_ols_plot method not yet implemented") def get_plot_data(self, *args, **kwargs) -> pd.DataFrame: - """Recover the data of a PrePostFit experiment along with the prediction and causal impact information. + """Recover the data of an experiment along with the prediction and causal impact information. Internally, this function dispatches to either :func:`get_plot_data_bayesian` or :func:`get_plot_data_ols` depending on the model type. diff --git a/causalpy/experiments/interrupted_time_series.py b/causalpy/experiments/interrupted_time_series.py new file mode 100644 index 00000000..14ec3749 --- /dev/null +++ b/causalpy/experiments/interrupted_time_series.py @@ -0,0 +1,403 @@ +# Copyright 2022 - 2025 The PyMC Labs Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Interrupted Time Series Analysis +""" + +from typing import List, Union + +import arviz as az +import numpy as np +import pandas as pd +from matplotlib import pyplot as plt +from patsy import build_design_matrices, dmatrices +from sklearn.base import RegressorMixin + +from causalpy.custom_exceptions import BadIndexException +from causalpy.plot_utils import get_hdi_to_df, plot_xY +from causalpy.pymc_models import PyMCModel +from causalpy.utils import round_num + +from .base import BaseExperiment + +LEGEND_FONT_SIZE = 12 + + +class InterruptedTimeSeries(BaseExperiment): + """ + The class for interrupted time series analysis. + + :param data: + A pandas dataframe + :param treatment_time: + The time when treatment occurred, should be in reference to the data index + :param formula: + A statistical model formula + :param model: + A PyMC model + + Example + -------- + >>> import causalpy as cp + >>> df = ( + ... cp.load_data("its") + ... .assign(date=lambda x: pd.to_datetime(x["date"])) + ... .set_index("date") + ... ) + >>> treatment_time = pd.to_datetime("2017-01-01") + >>> seed = 42 + >>> result = cp.InterruptedTimeSeries( + ... df, + ... treatment_time, + ... formula="y ~ 1 + t + C(month)", + ... model=cp.pymc_models.LinearRegression( + ... sample_kwargs={ + ... "target_accept": 0.95, + ... "random_seed": seed, + ... "progressbar": False, + ... } + ... ), + ... ) + """ + + expt_type = "Interrupted Time Series" + supports_ols = True + supports_bayes = True + + def __init__( + self, + data: pd.DataFrame, + treatment_time: Union[int, float, pd.Timestamp], + formula: str, + model=None, + **kwargs, + ) -> None: + super().__init__(model=model) + self.input_validation(data, treatment_time) + self.treatment_time = treatment_time + # set experiment type - usually done in subclasses + self.expt_type = "Pre-Post Fit" + # split data in to pre and post intervention + self.datapre = data[data.index < self.treatment_time] + self.datapost = data[data.index >= self.treatment_time] + + self.formula = formula + + # set things up with pre-intervention data + y, X = dmatrices(formula, self.datapre) + self.outcome_variable_name = y.design_info.column_names[0] + self._y_design_info = y.design_info + self._x_design_info = X.design_info + self.labels = X.design_info.column_names + self.pre_y, self.pre_X = np.asarray(y), np.asarray(X) + # process post-intervention data + (new_y, new_x) = build_design_matrices( + [self._y_design_info, self._x_design_info], self.datapost + ) + self.post_X = np.asarray(new_x) + self.post_y = np.asarray(new_y) + + # fit the model to the observed (pre-intervention) data + if isinstance(self.model, PyMCModel): + COORDS = {"coeffs": self.labels, "obs_indx": np.arange(self.pre_X.shape[0])} + self.model.fit(X=self.pre_X, y=self.pre_y, coords=COORDS) + elif isinstance(self.model, RegressorMixin): + self.model.fit(X=self.pre_X, y=self.pre_y) + else: + raise ValueError("Model type not recognized") + + # score the goodness of fit to the pre-intervention data + self.score = self.model.score(X=self.pre_X, y=self.pre_y) + + # get the model predictions of the observed (pre-intervention) data + self.pre_pred = self.model.predict(X=self.pre_X) + + # calculate the counterfactual + self.post_pred = self.model.predict(X=self.post_X) + self.pre_impact = self.model.calculate_impact(self.pre_y[:, 0], self.pre_pred) + self.post_impact = self.model.calculate_impact( + self.post_y[:, 0], self.post_pred + ) + self.post_impact_cumulative = self.model.calculate_cumulative_impact( + self.post_impact + ) + + def input_validation(self, data, treatment_time): + """Validate the input data and model formula for correctness""" + if isinstance(data.index, pd.DatetimeIndex) and not isinstance( + treatment_time, pd.Timestamp + ): + raise BadIndexException( + "If data.index is DatetimeIndex, treatment_time must be pd.Timestamp." + ) + if not isinstance(data.index, pd.DatetimeIndex) and isinstance( + treatment_time, pd.Timestamp + ): + raise BadIndexException( + "If data.index is not DatetimeIndex, treatment_time must be pd.Timestamp." # noqa: E501 + ) + + def summary(self, round_to=None) -> None: + """Print summary of main results and model coefficients. + + :param round_to: + Number of decimals used to round results. Defaults to 2. Use "None" to return raw numbers + """ + print(f"{self.expt_type:=^80}") + print(f"Formula: {self.formula}") + self.print_coefficients(round_to) + + def _bayesian_plot( + self, round_to=None, **kwargs + ) -> tuple[plt.Figure, List[plt.Axes]]: + """ + Plot the results + + :param round_to: + Number of decimals used to round results. Defaults to 2. Use "None" to return raw numbers. + """ + counterfactual_label = "Counterfactual" + + fig, ax = plt.subplots(3, 1, sharex=True, figsize=(7, 8)) + # TOP PLOT -------------------------------------------------- + # pre-intervention period + h_line, h_patch = plot_xY( + self.datapre.index, + self.pre_pred["posterior_predictive"].mu, + ax=ax[0], + plot_hdi_kwargs={"color": "C0"}, + ) + handles = [(h_line, h_patch)] + labels = ["Pre-intervention period"] + + (h,) = ax[0].plot(self.datapre.index, self.pre_y, "k.", label="Observations") + handles.append(h) + labels.append("Observations") + + # post intervention period + h_line, h_patch = plot_xY( + self.datapost.index, + self.post_pred["posterior_predictive"].mu, + ax=ax[0], + plot_hdi_kwargs={"color": "C1"}, + ) + handles.append((h_line, h_patch)) + labels.append(counterfactual_label) + + ax[0].plot(self.datapost.index, self.post_y, "k.") + # Shaded causal effect + h = ax[0].fill_between( + self.datapost.index, + y1=az.extract( + self.post_pred, group="posterior_predictive", var_names="mu" + ).mean("sample"), + y2=np.squeeze(self.post_y), + color="C0", + alpha=0.25, + ) + handles.append(h) + labels.append("Causal impact") + + ax[0].set( + title=f""" + Pre-intervention Bayesian $R^2$: {round_num(self.score.r2, round_to)} + (std = {round_num(self.score.r2_std, round_to)}) + """ + ) + + # MIDDLE PLOT ----------------------------------------------- + plot_xY( + self.datapre.index, + self.pre_impact, + ax=ax[1], + plot_hdi_kwargs={"color": "C0"}, + ) + plot_xY( + self.datapost.index, + self.post_impact, + ax=ax[1], + plot_hdi_kwargs={"color": "C1"}, + ) + ax[1].axhline(y=0, c="k") + ax[1].fill_between( + self.datapost.index, + y1=self.post_impact.mean(["chain", "draw"]), + color="C0", + alpha=0.25, + label="Causal impact", + ) + ax[1].set(title="Causal Impact") + + # BOTTOM PLOT ----------------------------------------------- + ax[2].set(title="Cumulative Causal Impact") + plot_xY( + self.datapost.index, + self.post_impact_cumulative, + ax=ax[2], + plot_hdi_kwargs={"color": "C1"}, + ) + ax[2].axhline(y=0, c="k") + + # Intervention line + for i in [0, 1, 2]: + ax[i].axvline( + x=self.treatment_time, + ls="-", + lw=3, + color="r", + ) + + ax[0].legend( + handles=(h_tuple for h_tuple in handles), + labels=labels, + fontsize=LEGEND_FONT_SIZE, + ) + + return fig, ax + + def _ols_plot(self, round_to=None, **kwargs) -> tuple[plt.Figure, List[plt.Axes]]: + """ + Plot the results + + :param round_to: + Number of decimals used to round results. Defaults to 2. Use "None" to return raw numbers. + """ + counterfactual_label = "Counterfactual" + + fig, ax = plt.subplots(3, 1, sharex=True, figsize=(7, 8)) + + ax[0].plot(self.datapre.index, self.pre_y, "k.") + ax[0].plot(self.datapost.index, self.post_y, "k.") + + ax[0].plot(self.datapre.index, self.pre_pred, c="k", label="model fit") + ax[0].plot( + self.datapost.index, + self.post_pred, + label=counterfactual_label, + ls=":", + c="k", + ) + ax[0].set( + title=f"$R^2$ on pre-intervention data = {round_num(self.score, round_to)}" + ) + + ax[1].plot(self.datapre.index, self.pre_impact, "k.") + ax[1].plot( + self.datapost.index, + self.post_impact, + "k.", + label=counterfactual_label, + ) + ax[1].axhline(y=0, c="k") + ax[1].set(title="Causal Impact") + + ax[2].plot(self.datapost.index, self.post_impact_cumulative, c="k") + ax[2].axhline(y=0, c="k") + ax[2].set(title="Cumulative Causal Impact") + + # Shaded causal effect + ax[0].fill_between( + self.datapost.index, + y1=np.squeeze(self.post_pred), + y2=np.squeeze(self.post_y), + color="C0", + alpha=0.25, + label="Causal impact", + ) + ax[1].fill_between( + self.datapost.index, + y1=np.squeeze(self.post_impact), + color="C0", + alpha=0.25, + label="Causal impact", + ) + + # Intervention line + # TODO: make this work when treatment_time is a datetime + for i in [0, 1, 2]: + ax[i].axvline( + x=self.treatment_time, + ls="-", + lw=3, + color="r", + label="Treatment time", + ) + + ax[0].legend(fontsize=LEGEND_FONT_SIZE) + + return (fig, ax) + + def get_plot_data_bayesian(self, hdi_prob: float = 0.94) -> pd.DataFrame: + """ + Recover the data of the experiment along with the prediction and causal impact information. + + :param hdi_prob: + Prob for which the highest density interval will be computed. The default value is defined as the default from the :func:`arviz.hdi` function. + """ + if isinstance(self.model, PyMCModel): + hdi_pct = int(round(hdi_prob * 100)) + + pred_lower_col = f"pred_hdi_lower_{hdi_pct}" + pred_upper_col = f"pred_hdi_upper_{hdi_pct}" + impact_lower_col = f"impact_hdi_lower_{hdi_pct}" + impact_upper_col = f"impact_hdi_upper_{hdi_pct}" + + pre_data = self.datapre.copy() + post_data = self.datapost.copy() + + pre_data["prediction"] = ( + az.extract(self.pre_pred, group="posterior_predictive", var_names="mu") + .mean("sample") + .values + ) + post_data["prediction"] = ( + az.extract(self.post_pred, group="posterior_predictive", var_names="mu") + .mean("sample") + .values + ) + pre_data[[pred_lower_col, pred_upper_col]] = get_hdi_to_df( + self.pre_pred["posterior_predictive"].mu, hdi_prob=hdi_prob + ).set_index(pre_data.index) + post_data[[pred_lower_col, pred_upper_col]] = get_hdi_to_df( + self.post_pred["posterior_predictive"].mu, hdi_prob=hdi_prob + ).set_index(post_data.index) + + pre_data["impact"] = self.pre_impact.mean(dim=["chain", "draw"]).values + post_data["impact"] = self.post_impact.mean(dim=["chain", "draw"]).values + pre_data[[impact_lower_col, impact_upper_col]] = get_hdi_to_df( + self.pre_impact, hdi_prob=hdi_prob + ).set_index(pre_data.index) + post_data[[impact_lower_col, impact_upper_col]] = get_hdi_to_df( + self.post_impact, hdi_prob=hdi_prob + ).set_index(post_data.index) + + self.plot_data = pd.concat([pre_data, post_data]) + + return self.plot_data + else: + raise ValueError("Unsupported model type") + + def get_plot_data_ols(self) -> pd.DataFrame: + """ + Recover the data of the experiment along with the prediction and causal impact information. + """ + pre_data = self.datapre.copy() + post_data = self.datapost.copy() + pre_data["prediction"] = self.pre_pred + post_data["prediction"] = self.post_pred + pre_data["impact"] = self.pre_impact + post_data["impact"] = self.post_impact + self.plot_data = pd.concat([pre_data, post_data]) + + return self.plot_data diff --git a/causalpy/experiments/prepostfit.py b/causalpy/experiments/synthetic_control.py similarity index 85% rename from causalpy/experiments/prepostfit.py rename to causalpy/experiments/synthetic_control.py index adaa0b84..a1bdecbb 100644 --- a/causalpy/experiments/prepostfit.py +++ b/causalpy/experiments/synthetic_control.py @@ -1,4 +1,4 @@ -# Copyright 2022 - 2025 The PyMC Labs Developers +# Copyright 2025 - 2025 The PyMC Labs Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Pre/post intervention fit experiment designs +Synthetic Control Experiment """ from typing import List, Union @@ -34,12 +34,42 @@ LEGEND_FONT_SIZE = 12 -class PrePostFit(BaseExperiment): - """ - A base class for quasi-experimental designs where parameter estimation is based on - just pre-intervention data. This class is not directly invoked by the user. +class SyntheticControl(BaseExperiment): + """The class for the synthetic control experiment. + + :param data: + A pandas dataframe + :param treatment_time: + The time when treatment occurred, should be in reference to the data index + :param formula: + A statistical model formula + :param model: + A PyMC model + + Example + -------- + >>> import causalpy as cp + >>> df = cp.load_data("sc") + >>> treatment_time = 70 + >>> seed = 42 + >>> result = cp.SyntheticControl( + ... df, + ... treatment_time, + ... formula="actual ~ 0 + a + b + c + d + e + f + g", + ... model=cp.pymc_models.WeightedSumFitter( + ... sample_kwargs={ + ... "target_accept": 0.95, + ... "random_seed": seed, + ... "progressbar": False, + ... } + ... ), + ... ) """ + expt_type = "SyntheticControl" + supports_ols = True + supports_bayes = True + def __init__( self, data: pd.DataFrame, @@ -229,6 +259,17 @@ def _bayesian_plot( fontsize=LEGEND_FONT_SIZE, ) + # code above: same as `PrePostFit._bayesian_plot` ------------------------------- + # code below: additional for the synthetic control experiment ------------------ + + plot_predictors = kwargs.get("plot_predictors", False) + if plot_predictors: + # plot control units as well + ax[0].plot(self.datapre.index, self.pre_X, "-", c=[0.8, 0.8, 0.8], zorder=1) + ax[0].plot( + self.datapost.index, self.post_X, "-", c=[0.8, 0.8, 0.8], zorder=1 + ) + return fig, ax def _ols_plot(self, round_to=None, **kwargs) -> tuple[plt.Figure, List[plt.Axes]]: @@ -303,9 +344,23 @@ def _ols_plot(self, round_to=None, **kwargs) -> tuple[plt.Figure, List[plt.Axes] return (fig, ax) + def get_plot_data_ols(self) -> pd.DataFrame: + """ + Recover the data of the experiment along with the prediction and causal impact information. + """ + pre_data = self.datapre.copy() + post_data = self.datapost.copy() + pre_data["prediction"] = self.pre_pred + post_data["prediction"] = self.post_pred + pre_data["impact"] = self.pre_impact + post_data["impact"] = self.post_impact + self.plot_data = pd.concat([pre_data, post_data]) + + return self.plot_data + def get_plot_data_bayesian(self, hdi_prob: float = 0.94) -> pd.DataFrame: """ - Recover the data of a PrePostFit experiment along with the prediction and causal impact information. + Recover the data of the PrePostFit experiment along with the prediction and causal impact information. :param hdi_prob: Prob for which the highest density interval will be computed. The default value is defined as the default from the :func:`arviz.hdi` function. @@ -352,120 +407,3 @@ def get_plot_data_bayesian(self, hdi_prob: float = 0.94) -> pd.DataFrame: return self.plot_data else: raise ValueError("Unsupported model type") - - def get_plot_data_ols(self) -> pd.DataFrame: - """ - Recover the data of a PrePostFit experiment along with the prediction and causal impact information. - """ - pre_data = self.datapre.copy() - post_data = self.datapost.copy() - pre_data["prediction"] = self.pre_pred - post_data["prediction"] = self.post_pred - pre_data["impact"] = self.pre_impact - post_data["impact"] = self.post_impact - self.plot_data = pd.concat([pre_data, post_data]) - - return self.plot_data - - -class InterruptedTimeSeries(PrePostFit): - """ - A wrapper around PrePostFit class - - :param data: - A pandas dataframe - :param treatment_time: - The time when treatment occurred, should be in reference to the data index - :param formula: - A statistical model formula - :param model: - A PyMC model - - Example - -------- - >>> import causalpy as cp - >>> df = ( - ... cp.load_data("its") - ... .assign(date=lambda x: pd.to_datetime(x["date"])) - ... .set_index("date") - ... ) - >>> treatment_time = pd.to_datetime("2017-01-01") - >>> seed = 42 - >>> result = cp.InterruptedTimeSeries( - ... df, - ... treatment_time, - ... formula="y ~ 1 + t + C(month)", - ... model=cp.pymc_models.LinearRegression( - ... sample_kwargs={ - ... "target_accept": 0.95, - ... "random_seed": seed, - ... "progressbar": False, - ... } - ... ), - ... ) - """ - - expt_type = "Interrupted Time Series" - supports_ols = True - supports_bayes = True - - -class SyntheticControl(PrePostFit): - """A wrapper around the PrePostFit class - - :param data: - A pandas dataframe - :param treatment_time: - The time when treatment occurred, should be in reference to the data index - :param formula: - A statistical model formula - :param model: - A PyMC model - - Example - -------- - >>> import causalpy as cp - >>> df = cp.load_data("sc") - >>> treatment_time = 70 - >>> seed = 42 - >>> result = cp.SyntheticControl( - ... df, - ... treatment_time, - ... formula="actual ~ 0 + a + b + c + d + e + f + g", - ... model=cp.pymc_models.WeightedSumFitter( - ... sample_kwargs={ - ... "target_accept": 0.95, - ... "random_seed": seed, - ... "progressbar": False, - ... } - ... ), - ... ) - """ - - expt_type = "SyntheticControl" - supports_ols = True - supports_bayes = True - - def _bayesian_plot(self, *args, **kwargs) -> tuple[plt.Figure, List[plt.Axes]]: - """ - Plot the results - - :param round_to: - Number of decimals used to round results. Defaults to 2. Use "None" to - return raw numbers. - :param plot_predictors: - Whether to plot the control units as well. Defaults to False. - """ - # call the super class method - fig, ax = super()._bayesian_plot(*args, **kwargs) - - # additional plotting functionality for the synthetic control experiment - plot_predictors = kwargs.get("plot_predictors", False) - if plot_predictors: - # plot control units as well - ax[0].plot(self.datapre.index, self.pre_X, "-", c=[0.8, 0.8, 0.8], zorder=1) - ax[0].plot( - self.datapost.index, self.post_X, "-", c=[0.8, 0.8, 0.8], zorder=1 - ) - - return fig, ax diff --git a/causalpy/pymc_experiments.py b/causalpy/pymc_experiments.py index 9a2861fc..ddf92958 100644 --- a/causalpy/pymc_experiments.py +++ b/causalpy/pymc_experiments.py @@ -27,20 +27,20 @@ from .experiments.instrumental_variable import ( InstrumentalVariable as NewInstrumentalVariable, ) -from .experiments.inverse_propensity_weighting import ( - InversePropensityWeighting as NewInversePropensityWeighting, -) -from .experiments.prepostfit import ( +from .experiments.interrupted_time_series import ( InterruptedTimeSeries as NewInterruptedTimeSeries, ) -from .experiments.prepostfit import ( - SyntheticControl as NewSyntheticControl, +from .experiments.inverse_propensity_weighting import ( + InversePropensityWeighting as NewInversePropensityWeighting, ) from .experiments.prepostnegd import PrePostNEGD as NewPrePostNEGD from .experiments.regression_discontinuity import ( RegressionDiscontinuity as NewRegressionDiscontinuity, ) from .experiments.regression_kink import RegressionKink as NewRegressionKink +from .experiments.synthetic_control import ( + SyntheticControl as NewSyntheticControl, +) # Ensure deprecation warnings are always shown in Jupyter Notebooks warnings.simplefilter("always", DeprecationWarning) diff --git a/causalpy/skl_experiments.py b/causalpy/skl_experiments.py index d086ccb3..7ae430a6 100644 --- a/causalpy/skl_experiments.py +++ b/causalpy/skl_experiments.py @@ -24,15 +24,15 @@ from .experiments.diff_in_diff import ( DifferenceInDifferences as NewDifferenceInDifferences, ) -from .experiments.prepostfit import ( +from .experiments.interrupted_time_series import ( InterruptedTimeSeries as NewInterruptedTimeSeries, ) -from .experiments.prepostfit import ( - SyntheticControl as NewSyntheticControl, -) from .experiments.regression_discontinuity import ( RegressionDiscontinuity as NewRegressionDiscontinuity, ) +from .experiments.synthetic_control import ( + SyntheticControl as NewSyntheticControl, +) # Ensure deprecation warnings are always shown in Jupyter Notebooks warnings.simplefilter("always", DeprecationWarning) diff --git a/causalpy/tests/test_api_stability.py b/causalpy/tests/test_api_stability.py index 495ee4f5..f86ec3bb 100644 --- a/causalpy/tests/test_api_stability.py +++ b/causalpy/tests/test_api_stability.py @@ -19,7 +19,7 @@ import pandas as pd import causalpy as cp -from causalpy.experiments.prepostfit import SyntheticControl +from causalpy.experiments.synthetic_control import SyntheticControl from causalpy.pymc_models import WeightedSumFitter sample_kwargs = {"tune": 20, "draws": 20, "chains": 2, "cores": 2} diff --git a/docs/source/_static/classes.png b/docs/source/_static/classes.png index 012109d8..e737a024 100644 Binary files a/docs/source/_static/classes.png and b/docs/source/_static/classes.png differ diff --git a/docs/source/_static/interrogate_badge.svg b/docs/source/_static/interrogate_badge.svg index c698e26b..f6fdb5f3 100644 --- a/docs/source/_static/interrogate_badge.svg +++ b/docs/source/_static/interrogate_badge.svg @@ -1,5 +1,5 @@ - interrogate: 90.4% + interrogate: 90.6% @@ -12,8 +12,8 @@ interrogate interrogate - 90.4% - 90.4% + 90.6% + 90.6% diff --git a/docs/source/_static/packages.png b/docs/source/_static/packages.png index a285c3b3..891aba45 100644 Binary files a/docs/source/_static/packages.png and b/docs/source/_static/packages.png differ