From 7ac3158418d605eed8a5363666a8bf31500a231e Mon Sep 17 00:00:00 2001 From: deven367 Date: Wed, 20 Aug 2025 13:57:21 -0400 Subject: [PATCH] fix broken links --- README.md | 24 ++++---- nbs/docs/how-to-guides/cross_validation.ipynb | 26 ++++----- .../how-to-guides/prediction_intervals.ipynb | 48 +++++++-------- .../how-to-guides/transfer_learning.ipynb | 6 +- .../electricity_load_forecasting.ipynb | 58 +++++++++---------- ...tion_intervals_in_forecasting_models.ipynb | 30 +++++----- pyproject.toml | 2 +- 7 files changed, 97 insertions(+), 97 deletions(-) diff --git a/README.md b/README.md index 3fa19174..015c8a04 100644 --- a/README.md +++ b/README.md @@ -32,15 +32,15 @@ data using remote clusters. `conda install -c conda-forge mlforecast` For more detailed instructions you can refer to the [installation -page](https://nixtla.github.io/mlforecast/docs/getting-started/install.html). +page](https://nixtlaverse.nixtla.io/mlforecast/docs/getting-started/install). ## Quick Start **Get Started with this [quick -guide](https://nixtla.github.io/mlforecast/docs/getting-started/quick_start_local.html).** +guide](https://nixtlaverse.github.io/mlforecast/docs/getting-started/quick_start_local.html).** **Follow this [end-to-end -walkthrough](https://nixtla.github.io/mlforecast/docs/getting-started/end_to_end_walkthrough.html) +walkthrough](https://nixtlaverse.nixtla.io/mlforecast/docs/getting-started/end_to_end_walkthrough) for best practices.** ### Videos @@ -61,7 +61,7 @@ for best practices.** Current Python alternatives for machine learning models are slow, inaccurate and don’t scale well. So we created a library that can be used to forecast in production environments. -[`MLForecast`](https://Nixtla.github.io/mlforecast/forecast.html#mlforecast) +[`MLForecast`](https://nixtlaverse.nixtla.io/mlforecast/forecast#class-mlforecast) includes efficient feature engineering to train any machine learning model (with `fit` and `predict` methods such as [`sklearn`](https://scikit-learn.org/stable/)) to fit millions of time @@ -83,36 +83,36 @@ Missing something? Please open an issue or write us in ## Examples and Guides 📚 [End to End -Walkthrough](https://nixtla.github.io/mlforecast/docs/getting-started/end_to_end_walkthrough.html): +Walkthrough](https://nixtlaverse.nixtla.io/mlforecast/docs/getting-started/end_to_end_walkthrough): model training, evaluation and selection for multiple time series. 🔎 [Probabilistic -Forecasting](https://nixtla.github.io/mlforecast/docs/how-to-guides/prediction_intervals.html): +Forecasting](https://nixtlaverse.nixtla.io/mlforecast/docs/tutorials/prediction_intervals_in_forecasting_models): use Conformal Prediction to produce prediciton intervals. 👩‍🔬 [Cross -Validation](https://nixtla.github.io/mlforecast/docs/how-to-guides/cross_validation.html): +Validation](https://nixtlaverse.nixtla.io/mlforecast/docs/how-to-guides/cross_validation): robust model’s performance evaluation. 🔌 [Predict Demand -Peaks](https://nixtla.github.io/mlforecast/docs/tutorials/electricity_peak_forecasting.html): +Peaks](https://nixtlaverse.nixtla.io/mlforecast/docs/tutorials/electricity_peak_forecasting): electricity load forecasting for detecting daily peaks and reducing electric bills. 📈 [Transfer -Learning](https://nixtla.github.io/mlforecast/docs/how-to-guides/transfer_learning.html): +Learning](https://nixtlaverse.nixtla.io/mlforecast/docs/how-to-guides/transfer_learning): pretrain a model using a set of time series and then predict another one using that pretrained model. 🌡️ [Distributed -Training](https://nixtla.github.io/mlforecast/docs/getting-started/quick_start_distributed.html): +Training](https://nixtlaverse.nixtla.io/mlforecast/docs/getting-started/quick_start_distributed): use a Dask, Ray or Spark cluster to train models at scale. ## How to use The following provides a very basic overview, for a more detailed description see the -[documentation](https://nixtla.github.io/mlforecast/). +[documentation](https://nixtlaverse.nixtla.io/mlforecast/). ### Data setup @@ -165,7 +165,7 @@ models = [ ### Forecast object Now instantiate an -[`MLForecast`](https://Nixtla.github.io/mlforecast/forecast.html#mlforecast) +[`MLForecast`](https://nixtlaverse.nixtla.io/mlforecast/forecast#class-mlforecast) object with the models and the features that you want to use. The features can be lags, transformations on the lags and date features. You can also define transformations to apply to the target before fitting, diff --git a/nbs/docs/how-to-guides/cross_validation.ipynb b/nbs/docs/how-to-guides/cross_validation.ipynb index eaf34970..03076601 100644 --- a/nbs/docs/how-to-guides/cross_validation.ipynb +++ b/nbs/docs/how-to-guides/cross_validation.ipynb @@ -17,7 +17,7 @@ "\n", "## Prerequesites\n", "\n", - "This tutorial assumes basic familiarity with `MLForecast`. For a minimal example visit the [Quick Start](quick_start_local.html) \n", + "This tutorial assumes basic familiarity with `MLForecast`. For a minimal example visit the [Quick Start](https://nixtlaverse.nixtla.io/mlforecast/docs/getting-started/quick_start_local)\n", ":::" ] }, @@ -36,7 +36,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "[MLForecast](https://nixtla.github.io/mlforecast/) has an implementation of time series cross-validation that is fast and easy to use. This implementation makes cross-validation a efficient operation, which makes it less time-consuming. In this notebook, we'll use it on a subset of the [M4 Competition](https://www.sciencedirect.com/science/article/pii/S0169207019301128) hourly dataset. " + "[MLForecast](https://nixtlaverse.nixtla.io/mlforecast/) has an implementation of time series cross-validation that is fast and easy to use. This implementation makes cross-validation a efficient operation, which makes it less time-consuming. In this notebook, we'll use it on a subset of the [M4 Competition](https://www.sciencedirect.com/science/article/pii/S0169207019301128) hourly dataset. " ] }, { @@ -72,7 +72,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We assume that you have `MLForecast` already installed. If not, check this guide for instructions on [how to install MLForecast](../getting-started/install.html)." + "We assume that you have `MLForecast` already installed. If not, check this guide for instructions on [how to install MLForecast](https://nixtlaverse.nixtla.io/mlforecast/docs/getting-started/install)" ] }, { @@ -88,11 +88,11 @@ "metadata": {}, "outputs": [], "source": [ - "import pandas as pd \n", + "import pandas as pd\n", "\n", "from utilsforecast.plotting import plot_series\n", "\n", - "from mlforecast import MLForecast # required to instantiate MLForecast object and use cross-validation method " + "from mlforecast import MLForecast # required to instantiate MLForecast object and use cross-validation method" ] }, { @@ -190,8 +190,8 @@ } ], "source": [ - "Y_df = pd.read_csv('https://datasets-nixtla.s3.amazonaws.com/m4-hourly.csv') # load the data \n", - "Y_df.head() " + "Y_df = pd.read_csv('https://datasets-nixtla.s3.amazonaws.com/m4-hourly.csv') # load the data\n", + "Y_df.head()" ] }, { @@ -252,9 +252,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For this example, we'll use LightGBM. We first need to import it and then we need to instantiate a new [MLForecast](../../forecast.html#mlforecast) object. \n", + "For this example, we'll use LightGBM. We first need to import it and then we need to instantiate a new [MLForecast](https://nixtlaverse.nixtla.io/mlforecast/forecast#class-mlforecast) object. \n", "\n", - "In this example, we are only using `differences` and `lags` to produce features. See [the full documentation](https://nixtla.github.io/mlforecast) to see all available features.\n", + "In this example, we are only using `differences` and `lags` to produce features. See [the full documentation](https://nixtlaverse.nixtla.io/mlforecast) to see all available features.\n", "\n", "Any settings are passed into the constructor. Then you call its `fit` method and pass in the historical data frame `df`. " ] @@ -278,8 +278,8 @@ "models = [lgb.LGBMRegressor(verbosity=-1)]\n", "\n", "mlf = MLForecast(\n", - " models=models, \n", - " freq=1,# our series have integer timestamps, so we'll just add 1 in every timeste, \n", + " models=models,\n", + " freq=1,# our series have integer timestamps, so we'll just add 1 in every timeste,\n", " target_transforms=[Differences([24])],\n", " lags=range(1, 25)\n", ")" @@ -296,7 +296,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once the `MLForecast` object has been instantiated, we can use the [cross_validation method](../../forecast.html#mlforecast.cross_validation)." + "Once the `MLForecast` object has been instantiated, we can use the [cross_validation method](https://nixtlaverse.nixtla.io/mlforecast/forecast#method-cross-validation)" ] }, { @@ -504,7 +504,7 @@ "outputs": [], "source": [ "from utilsforecast.evaluation import evaluate\n", - "from utilsforecast.losses import rmse " + "from utilsforecast.losses import rmse" ] }, { diff --git a/nbs/docs/how-to-guides/prediction_intervals.ipynb b/nbs/docs/how-to-guides/prediction_intervals.ipynb index 96ec3aa1..a1c57950 100644 --- a/nbs/docs/how-to-guides/prediction_intervals.ipynb +++ b/nbs/docs/how-to-guides/prediction_intervals.ipynb @@ -19,7 +19,7 @@ "\n", "## Prerequesites\n", "\n", - "This tutorial assumes basic familiarity with MLForecast. For a minimal example visit the [Quick Start](https://nixtla.github.io/mlforecast/docs/quick_start_local.html) \n", + "This tutorial assumes basic familiarity with MLForecast. For a minimal example visit the [Quick Start](https://nixtlaverse.nixtla.io/mlforecast/docs/getting-started/quick_start_local)\n", ":::" ] }, @@ -323,7 +323,7 @@ "metadata": {}, "outputs": [], "source": [ - "n_series = 8 \n", + "n_series = 8\n", "uids = train['unique_id'].unique()[:n_series] # select first n_series of the dataset\n", "train = train.query('unique_id in @uids')\n", "test = test.query('unique_id in @uids')" @@ -415,7 +415,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Create a list of models and instantiation parameters \n", + "# Create a list of models and instantiation parameters\n", "models = [\n", " KNeighborsRegressor(),\n", " Lasso(),\n", @@ -765,11 +765,11 @@ "outputs": [], "source": [ "fig = plot_series(\n", - " train, \n", - " test, \n", - " plot_random=False, \n", - " models=['KNeighborsRegressor'], \n", - " level=levels, \n", + " train,\n", + " test,\n", + " plot_random=False,\n", + " models=['KNeighborsRegressor'],\n", + " level=levels,\n", " max_insample_length=48\n", ")" ] @@ -809,11 +809,11 @@ "outputs": [], "source": [ "fig = plot_series(\n", - " train, \n", - " test, \n", - " plot_random=False, \n", + " train,\n", + " test,\n", + " plot_random=False,\n", " models=['Lasso'],\n", - " level=levels, \n", + " level=levels,\n", " max_insample_length=48\n", ")" ] @@ -853,11 +853,11 @@ "outputs": [], "source": [ "fig = plot_series(\n", - " train, \n", - " test, \n", - " plot_random=False, \n", + " train,\n", + " test,\n", + " plot_random=False,\n", " models=['LinearRegression'],\n", - " level=levels, \n", + " level=levels,\n", " max_insample_length=48\n", ")" ] @@ -897,11 +897,11 @@ "outputs": [], "source": [ "fig = plot_series(\n", - " train, \n", - " test, \n", - " plot_random=False, \n", + " train,\n", + " test,\n", + " plot_random=False,\n", " models=['MLPRegressor'],\n", - " level=levels, \n", + " level=levels,\n", " max_insample_length=48\n", ")" ] @@ -941,11 +941,11 @@ "outputs": [], "source": [ "fig = plot_series(\n", - " train, \n", - " test, \n", - " plot_random=False, \n", + " train,\n", + " test,\n", + " plot_random=False,\n", " models=['Ridge'],\n", - " level=levels, \n", + " level=levels,\n", " max_insample_length=48\n", ")" ] diff --git a/nbs/docs/how-to-guides/transfer_learning.ipynb b/nbs/docs/how-to-guides/transfer_learning.ipynb index 8c33569a..12622b29 100644 --- a/nbs/docs/how-to-guides/transfer_learning.ipynb +++ b/nbs/docs/how-to-guides/transfer_learning.ipynb @@ -153,7 +153,7 @@ "- `differences`: Differences to take of the target before computing the features. These are restored at the forecasting step.\n", "- `lags`: Lags of the target to use as features.\n", "\n", - "In this example, we are only using `differences` and `lags` to produce features. See [the full documentation](https://nixtla.github.io/mlforecast/forecast.html) to see all available features.\n", + "In this example, we are only using `differences` and `lags` to produce features. See [the full documentation](https://nixtlaverse.nixtla.io/mlforecast/forecast) to see all available features.\n", "\n", "Any settings are passed into the constructor. Then you call its `fit` method and pass in the historical data frame `Y_df_M3`. " ] @@ -165,7 +165,7 @@ "outputs": [], "source": [ "fcst = MLForecast(\n", - " models=models, \n", + " models=models,\n", " lags=range(1, 13),\n", " freq='MS',\n", " target_transforms=[Differences([1, 12])],\n", @@ -190,7 +190,7 @@ "source": [ "Y_df = pd.read_csv('https://datasets-nixtla.s3.amazonaws.com/air-passengers.csv', parse_dates=['ds'])\n", "\n", - "# We define the train df. \n", + "# We define the train df.\n", "Y_train_df = Y_df[Y_df.ds<='1959-12-31'] # 132 train\n", "Y_test_df = Y_df[Y_df.ds>'1959-12-31'] # 12 test" ] diff --git a/nbs/docs/tutorials/electricity_load_forecasting.ipynb b/nbs/docs/tutorials/electricity_load_forecasting.ipynb index 6aea4dbd..b002f994 100644 --- a/nbs/docs/tutorials/electricity_load_forecasting.ipynb +++ b/nbs/docs/tutorials/electricity_load_forecasting.ipynb @@ -45,9 +45,9 @@ "In this example we will use the following libraries:\n", "\n", "\n", - "- [`mlforecast`](https://nixtla.github.io/mlforecast/). Accurate and ⚡️ fast forecasting withc lassical machine learning models. \n", + "- [`mlforecast`](https://nixtlaverse.nixtla.io/mlforecast/). Accurate and ⚡️ fast forecasting withc lassical machine learning models. \n", "- [`prophet`](https://github.com/facebook/prophet). Benchmark model developed by Facebook.\n", - "- [`utilsforecast`](https://nixtla.github.io/utilsforecast/). Library with different functions for forecasting evaluation." + "- [`utilsforecast`](https://nixtlaverse.nixtla.io/utilsforecast/). Library with different functions for forecasting evaluation." ] }, { @@ -345,17 +345,17 @@ " for d in differences:\n", " fcst = MLForecast(\n", " models=[], # we're not interested in modeling yet\n", - " freq='H', # our series have hourly frequency \n", + " freq='H', # our series have hourly frequency\n", " target_transforms=[Differences([d])],\n", " )\n", " df_ = fcst.preprocess(df)\n", " df_['unique_id'] = df_['unique_id'] + f'_{d}'\n", " prep.append(df_)\n", - " \n", + "\n", " # Plot combined Differences\n", " fcst = MLForecast(\n", " models=[], # we're not interested in modeling yet\n", - " freq='H', # our series have hourly frequency \n", + " freq='H', # our series have hourly frequency\n", " target_transforms=[Differences([24, 24*7])],\n", " )\n", " df_ = fcst.preprocess(df)\n", @@ -547,7 +547,7 @@ "source": [ "fcst = MLForecast(\n", " models=[], # we're not interested in modeling yet\n", - " freq='H', # our series have hourly frequency \n", + " freq='H', # our series have hourly frequency\n", " target_transforms=[Differences([24, 24*7])],\n", ")\n", "prep = fcst.preprocess(df_train)\n", @@ -597,7 +597,7 @@ "metadata": {}, "source": [ "We can test many models simoultaneously using MLForecast `cross_validation`. We can import `lightgbm` and `scikit-learn` models and try different combinations of them, alongside different target transformations (as the ones we created previously) and historical variables. \n", - "You can see an in-depth tutorial on how to use `MLForecast` [Cross Validation methods here](https://nixtla.github.io/mlforecast/docs/cross_validation.html)" + "You can see an in-depth tutorial on how to use `MLForecast` [Cross Validation methods here](https://nixtlaverse.nixtla.io/mlforecast/docs/how-to-guides/cross_validation)" ] }, { @@ -664,7 +664,7 @@ " 'lin_reg': LinearRegression(),\n", " 'ridge': Ridge(),\n", " 'knn': KNeighborsRegressor(),\n", - " 'mlp': MLPRegressor(), \n", + " 'mlp': MLPRegressor(),\n", " 'rf': RandomForestRegressor()\n", " }\n" ] @@ -693,7 +693,7 @@ "id": "00802906", "metadata": {}, "source": [ - "Lag transforms are defined as a dictionary where the keys are the lags and the values are lists of the transformations that we want to apply to that lag. You can refer to the [lag transformations guide](../how-to-guides/lag_transforms_guide.ipynb) for more details." + "Lag transforms are defined as a dictionary where the keys are the lags and the values are lists of the transformations that we want to apply to that lag. You can refer to the [lag transformations guide](https://nixtlaverse.nixtla.io/mlforecast/docs/how-to-guides/lag_transforms_guide) for more details." ] }, { @@ -717,11 +717,11 @@ "outputs": [], "source": [ "mlf = MLForecast(\n", - " models = models, \n", - " freq='H', # our series have hourly frequency \n", + " models = models,\n", + " freq='H', # our series have hourly frequency\n", " target_transforms=[Differences([24, 24*7])],\n", " lags=[1,12,24], # Lags to be used as features\n", - " lag_transforms={ \n", + " lag_transforms={\n", " 1: [ExpandingMean()],\n", " 24: [RollingMean(window_size=48)],\n", " },\n", @@ -926,7 +926,7 @@ " max_date = df_cv.query('unique_id == @uid & cutoff == @cutoff')['ds'].max()\n", " df[df['ds'] < max_date].query('unique_id == @uid').tail(last_n).set_index('ds').plot(ax=axi, title=uid, y='y')\n", " for m in models.keys():\n", - " df_cv.query('unique_id == @uid & cutoff == @cutoff').set_index('ds').plot(ax=axi, title=uid, y=m) \n", + " df_cv.query('unique_id == @uid & cutoff == @cutoff').set_index('ds').plot(ax=axi, title=uid, y=m)\n", " fig.savefig(f'../../figs/{fname}', bbox_inches='tight')\n", " plt.close()" ] @@ -954,7 +954,7 @@ "id": "716795f2", "metadata": {}, "source": [ - "Visually examining the forecasts can give us some idea of how the model is behaving, yet in order to asses the performace we need to evaluate them trough metrics. For that we use the [utilsforecast](https://nixtla.github.io/utilsforecast/) library that contains many useful metrics and an evaluate function." + "Visually examining the forecasts can give us some idea of how the model is behaving, yet in order to asses the performace we need to evaluate them trough metrics. For that we use the [utilsforecast](https://nixtlaverse.nixtla.io/utilsforecast/) library that contains many useful metrics and an evaluate function." ] }, { @@ -1173,7 +1173,7 @@ } ], "source": [ - "evaluate_crossvalidation(crossvalidation_df, metrics, models) " + "evaluate_crossvalidation(crossvalidation_df, metrics, models)" ] }, { @@ -1197,8 +1197,8 @@ "id": "08014817", "metadata": {}, "source": [ - "Now we are going to evaluate their perfonce in the test set. We can use both of them for forecasting the test alongside some prediction intervals. For that we can use the [`PredictionIntervals`](https://nixtla.github.io/mlforecast/utils.html#predictionintervals) function in `mlforecast.utils`. \n", - "You can see an in-depth tutotorial of [Probabilistic Forecasting here](https://nixtla.github.io/mlforecast/docs/prediction_intervals.html)" + "Now we are going to evaluate their perfonce in the test set. We can use both of them for forecasting the test alongside some prediction intervals. For that we can use the [`PredictionIntervals`](https://nixtlaverse.nixtla.io/mlforecast/utils#class-predictionintervals) function in `mlforecast.utils`. \n", + "You can see an in-depth tutotorial of [Probabilistic Forecasting here](https://nixtlaverse.nixtla.io/mlforecast/docs/tutorials/prediction_intervals_in_forecasting_models)" ] }, { @@ -1224,11 +1224,11 @@ " }\n", "\n", "mlf_evaluation = MLForecast(\n", - " models = models_evaluation, \n", - " freq='H', # our series have hourly frequency \n", + " models = models_evaluation,\n", + " freq='H', # our series have hourly frequency\n", " target_transforms=[Differences([24, 24*7])],\n", - " lags=[1,12,24], \n", - " lag_transforms={ \n", + " lags=[1,12,24],\n", + " lag_transforms={\n", " 1: [ExpandingMean()],\n", " 24: [RollingMean(window_size=48)],\n", " },\n", @@ -1733,11 +1733,11 @@ "outputs": [], "source": [ "fig = plot_series(\n", - " df_train, \n", - " test, \n", + " df_train,\n", + " test,\n", " models=['lasso', 'lgbm'],\n", - " plot_random=False, \n", - " level=levels, \n", + " plot_random=False,\n", + " level=levels,\n", " max_insample_length=24\n", ")" ] @@ -1943,7 +1943,7 @@ } ], "source": [ - "time_prophet = (end - init) \n", + "time_prophet = (end - init)\n", "print(f'Prophet Time: {time_prophet:.2f} seconds')" ] }, @@ -2053,11 +2053,11 @@ "}\n", "\n", "mlf_comparison = MLForecast(\n", - " models = models_comparison, \n", - " freq='H', # our series have hourly frequency \n", + " models = models_comparison,\n", + " freq='H', # our series have hourly frequency\n", " target_transforms=[Differences([24, 24*7])],\n", " lags=[1,12,24],\n", - " lag_transforms={ \n", + " lag_transforms={\n", " 1: [ExpandingMean()],\n", " 24: [RollingMean(window_size=48)],\n", " },\n", diff --git a/nbs/docs/tutorials/prediction_intervals_in_forecasting_models.ipynb b/nbs/docs/tutorials/prediction_intervals_in_forecasting_models.ipynb index 69160287..e13c5e71 100644 --- a/nbs/docs/tutorials/prediction_intervals_in_forecasting_models.ipynb +++ b/nbs/docs/tutorials/prediction_intervals_in_forecasting_models.ipynb @@ -261,13 +261,13 @@ "import time\n", "from datetime import datetime, timedelta\n", "\n", - "# \n", + "#\n", "# ==============================================================================\n", "from statsmodels.tsa.stattools import adfuller\n", "import statsmodels.api as sm\n", "import statsmodels.tsa.api as smt\n", - "from statsmodels.tsa.seasonal import seasonal_decompose \n", - "# \n", + "from statsmodels.tsa.seasonal import seasonal_decompose\n", + "#\n", "# ==============================================================================\n", "from utilsforecast.plotting import plot_series\n" ] @@ -839,8 +839,8 @@ "metadata": {}, "outputs": [], "source": [ - "train = df[df.ds<='2015-01-21 13:30:00'] \n", - "test = df[df.ds>'2015-01-21 13:30:00'] " + "train = df[df.ds<='2015-01-21 13:30:00']\n", + "test = df[df.ds>'2015-01-21 13:30:00']" ] }, { @@ -942,9 +942,9 @@ "outputs": [], "source": [ "mlf = MLForecast(models=model1,\n", - " freq='30min', \n", + " freq='30min',\n", " target_transforms=[Differences([1])],\n", - " ) " + " )" ] }, { @@ -1142,10 +1142,10 @@ "outputs": [], "source": [ "mlf = MLForecast(models=model1,\n", - " freq='30min', \n", + " freq='30min',\n", " lags=[1,24],\n", " target_transforms=[Differences([1])],\n", - " ) " + " )" ] }, { @@ -1340,11 +1340,11 @@ "outputs": [], "source": [ "mlf = MLForecast(models=model1,\n", - " freq='30min', \n", + " freq='30min',\n", " lags=[1,24],\n", " lag_transforms={1: [ExpandingMean()], 24: [RollingMean(window_size=7)]},\n", " target_transforms=[Differences([1])],\n", - " ) " + " )" ] }, { @@ -1549,7 +1549,7 @@ "outputs": [], "source": [ "mlf = MLForecast(models=model1,\n", - " freq='30min', \n", + " freq='30min',\n", " lags=[1,24],\n", " lag_transforms={1: [ExpandingMean()], 24: [RollingMean(window_size=7)]},\n", " target_transforms=[Differences([1])],\n", @@ -1807,8 +1807,8 @@ ], "source": [ "# fit the models\n", - "mlf.fit(df, \n", - " fitted=True, \n", + "mlf.fit(df,\n", + " fitted=True,\n", "prediction_intervals=PredictionIntervals(n_windows=5, h=30, method=\"conformal_distribution\" ) )" ] }, @@ -2216,7 +2216,7 @@ "1. Changquan Huang • Alla Petukhina. Springer series (2022). Applied Time Series Analysis and Forecasting with Python. \n", "2. Ivan Svetunkov. [Forecasting and Analytics with the Augmented Dynamic Adaptive Model (ADAM)](https://openforecast.org/adam/)\n", "3. [James D. Hamilton. Time Series Analysis Princeton University Press, Princeton, New Jersey, 1st Edition, 1994.](https://press.princeton.edu/books/hardcover/9780691042893/time-series-analysis)\n", - "4. [Nixtla Parameters for Mlforecast](https://nixtla.github.io/mlforecast/forecast.html).\n", + "4. [Nixtla Parameters for Mlforecast](https://nixtlaverse.nixtla.io/mlforecast/forecast)\n", "5. [Pandas available frequencies](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases).\n", "6. [Rob J. Hyndman and George Athanasopoulos (2018). “Forecasting principles and practice, Time series cross-validation”.](https://otexts.com/fpp3/tscv.html).\n", "7. [Seasonal periods- Rob J Hyndman](https://robjhyndman.com/hyndsight/seasonal-periods/)." diff --git a/pyproject.toml b/pyproject.toml index 42c5f37c..89b3f7b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ dependencies = [ [project.urls] Homepage = "https://github.com/Nixtla/mlforecast" Repository = "https://github.com/Nixtla/mlforecast" -Documentation = "https://Nixtla.github.io/mlforecast/" +Documentation = "https://nixtlaverse.nixtla.io/mlforecast/" [project.optional-dependencies]