Skip to content

Commit b375aed

Browse files
AzulGarzaMMenchero
authored andcommitted
fix: recover finetune and model changes (#274)
1 parent 9402f93 commit b375aed

18 files changed

+444
-489
lines changed

nbs/distributed.timegpt.ipynb

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -161,14 +161,14 @@
161161
" X_df: Optional[fugue.AnyDataFrame] = None,\n",
162162
" level: Optional[List[Union[int, float]]] = None,\n",
163163
" quantiles: Optional[List[float]] = None,\n",
164-
" fewshot_steps: int = 0,\n",
165-
" fewshot_loss: str = 'default',\n",
164+
" finetune_steps: int = 0,\n",
165+
" finetune_loss: str = 'default',\n",
166166
" clean_ex_first: bool = True,\n",
167167
" validate_token: bool = False,\n",
168168
" add_history: bool = False,\n",
169169
" date_features: Union[bool, List[str]] = False,\n",
170170
" date_features_to_one_hot: Union[bool, List[str]] = True,\n",
171-
" model: str = 'short-horizon',\n",
171+
" model: str = 'timegpt-1',\n",
172172
" num_partitions: Optional[int] = None,\n",
173173
" ) -> fugue.AnyDataFrame:\n",
174174
" kwargs = dict(\n",
@@ -179,8 +179,8 @@
179179
" target_col=target_col,\n",
180180
" level=level,\n",
181181
" quantiles=quantiles,\n",
182-
" fewshot_steps=fewshot_steps,\n",
183-
" fewshot_loss=fewshot_loss,\n",
182+
" finetune_steps=finetune_steps,\n",
183+
" finetune_loss=finetune_loss,\n",
184184
" clean_ex_first=clean_ex_first,\n",
185185
" validate_token=validate_token,\n",
186186
" add_history=add_history,\n",
@@ -217,7 +217,7 @@
217217
" validate_token: bool = False,\n",
218218
" date_features: Union[bool, List[str]] = False,\n",
219219
" date_features_to_one_hot: Union[bool, List[str]] = True,\n",
220-
" model: str = 'short-horizon',\n",
220+
" model: str = 'timegpt-1',\n",
221221
" num_partitions: Optional[int] = None,\n",
222222
" ) -> fugue.AnyDataFrame:\n",
223223
" kwargs = dict(\n",
@@ -254,13 +254,13 @@
254254
" target_col: str = 'y',\n",
255255
" level: Optional[List[Union[int, float]]] = None,\n",
256256
" quantiles: Optional[List[float]] = None,\n",
257-
" fewshot_steps: int = 0,\n",
258-
" fewshot_loss: str = 'default',\n",
257+
" finetune_steps: int = 0,\n",
258+
" finetune_loss: str = 'default',\n",
259259
" clean_ex_first: bool = True,\n",
260260
" validate_token: bool = False,\n",
261261
" date_features: Union[bool, List[str]] = False,\n",
262262
" date_features_to_one_hot: Union[bool, List[str]] = True,\n",
263-
" model: str = 'short-horizon',\n",
263+
" model: str = 'timegpt-1',\n",
264264
" n_windows: int = 1,\n",
265265
" step_size: Optional[int] = None,\n",
266266
" num_partitions: Optional[int] = None,\n",
@@ -273,8 +273,8 @@
273273
" target_col=target_col,\n",
274274
" level=level,\n",
275275
" quantiles=quantiles,\n",
276-
" fewshot_steps=fewshot_steps,\n",
277-
" fewshot_loss=fewshot_loss,\n",
276+
" finetune_steps=finetune_steps,\n",
277+
" finetune_loss=finetune_loss,\n",
278278
" clean_ex_first=clean_ex_first,\n",
279279
" validate_token=validate_token,\n",
280280
" date_features=date_features,\n",
@@ -448,7 +448,7 @@
448448
" num_partitions=1,\n",
449449
" id_col=id_col,\n",
450450
" time_col=time_col,\n",
451-
" model='short-horizon',\n",
451+
" model='timegpt-1',\n",
452452
" **fcst_kwargs\n",
453453
" )\n",
454454
" fcst_df = fa.as_pandas(fcst_df)\n",
@@ -771,7 +771,7 @@
771771
" num_partitions=1,\n",
772772
" id_col=id_col,\n",
773773
" time_col=time_col,\n",
774-
" model='short-horizon',\n",
774+
" model='timegpt-1',\n",
775775
" **anomalies_kwargs\n",
776776
" )\n",
777777
" anomalies_df = fa.as_pandas(anomalies_df)\n",

nbs/docs/getting-started/1_getting_started_short.ipynb

Lines changed: 23 additions & 32 deletions
Large diffs are not rendered by default.

nbs/docs/how-to-guides/0_distributed_fcst_spark.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@
203203
"#| hide\n",
204204
"# test different results for different models\n",
205205
"fcst_df_1 = fcst_df.toPandas()\n",
206-
"fcst_df_2 = timegpt.forecast(spark_df, h=12, model='long-horizon')\n",
206+
"fcst_df_2 = timegpt.forecast(spark_df, h=12, model='timegpt-1-long-horizon')\n",
207207
"fcst_df_2 = fcst_df_2.toPandas()\n",
208208
"test_fail(\n",
209209
" lambda: pd.testing.assert_frame_equal(fcst_df_1[['TimeGPT']], fcst_df_2[['TimeGPT']]),\n",

nbs/docs/misc/0_faqs.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,13 +167,13 @@
167167
"\n",
168168
"`TimeGPT` was trained on the largest publicly available time series dataset, covering a wide range of domains such as finance, retail, healthcare, and more. This comprehensive training enables `TimeGPT` to produce accurate forecasts for new time series without additional training, a capability known as zero-shot learning. \n",
169169
"\n",
170-
"While the zero-shot model provides a solid baseline, the performance of `TimeGPT` can often be improved through fine-tuning. During this process, the `TimeGPT` model undergoes additional training using your specific dataset, starting from the pre-trained paramaters. The updated model then produces the forecasts. You can control the number of training iterations and the loss function for fine-tuning with the `fewshot_steps` and the `fewshot_loss` parameters in the `forecast` method from the `TimeGPT` class, respectively. \n",
170+
"While the zero-shot model provides a solid baseline, the performance of `TimeGPT` can often be improved through fine-tuning. During this process, the `TimeGPT` model undergoes additional training using your specific dataset, starting from the pre-trained paramaters. The updated model then produces the forecasts. You can control the number of training iterations and the loss function for fine-tuning with the `finetune_steps` and the `finetune_loss` parameters in the `forecast` method from the `TimeGPT` class, respectively. \n",
171171
"\n",
172172
"For a comprehensive guide on how to apply fine-tuning, please refer to the [fine-tuning](https://docs.nixtla.io/docs/finetuning) and the [fine-tuning with a specific loss function](https://docs.nixtla.io/docs/fine_tuning_with_a_specific_loss_function) tutorials. \n",
173173
"\n",
174174
"### Do I have to fine-tune every series? \n",
175175
"\n",
176-
"No, you do not need to fine-tune every series individually. When using the `fewshot_steps` parameter, the model undergoes fine-tuning across all series in your dataset simultaneously. This method uses a cross-learning approach, allowing the model to learn from multiple series at once, which can improve individual forecasts.\n",
176+
"No, you do not need to fine-tune every series individually. When using the `finetune_steps` parameter, the model undergoes fine-tuning across all series in your dataset simultaneously. This method uses a cross-learning approach, allowing the model to learn from multiple series at once, which can improve individual forecasts.\n",
177177
"\n",
178178
"Keep in mind that selecting the right number of fine-tuning steps may require some trial and error. As the number of fine-tuning steps increases, the model becomes more specialized to your dataset, but will take longer to train and may be more prone to overfitting. \n",
179179
"\n",
@@ -323,5 +323,5 @@
323323
}
324324
},
325325
"nbformat": 4,
326-
"nbformat_minor": 2
326+
"nbformat_minor": 4
327327
}

nbs/docs/tutorials/0_anomaly_detection.ipynb

Lines changed: 15 additions & 11 deletions
Large diffs are not rendered by default.

nbs/docs/tutorials/11_loss_function_finetuning.ipynb

Lines changed: 9 additions & 9 deletions
Large diffs are not rendered by default.

nbs/docs/tutorials/12_longhorizon.ipynb

Lines changed: 7 additions & 7 deletions
Large diffs are not rendered by default.

nbs/docs/tutorials/1_exogenous_variables.ipynb

Lines changed: 41 additions & 48 deletions
Large diffs are not rendered by default.

nbs/docs/tutorials/2_holidays.ipynb

Lines changed: 16 additions & 23 deletions
Large diffs are not rendered by default.

nbs/docs/tutorials/4_prediction_intervals.ipynb

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -266,35 +266,35 @@
266266
" <tr>\n",
267267
" <th>0</th>\n",
268268
" <td>1961-01-01</td>\n",
269-
" <td>437.837921</td>\n",
270-
" <td>415.826453</td>\n",
271-
" <td>423.783707</td>\n",
272-
" <td>431.987061</td>\n",
273-
" <td>443.688782</td>\n",
274-
" <td>451.892136</td>\n",
275-
" <td>459.849389</td>\n",
269+
" <td>437.837952</td>\n",
270+
" <td>415.826484</td>\n",
271+
" <td>423.783737</td>\n",
272+
" <td>431.987091</td>\n",
273+
" <td>443.688812</td>\n",
274+
" <td>451.892166</td>\n",
275+
" <td>459.849419</td>\n",
276276
" </tr>\n",
277277
" <tr>\n",
278278
" <th>1</th>\n",
279279
" <td>1961-02-01</td>\n",
280-
" <td>426.062714</td>\n",
281-
" <td>402.833523</td>\n",
282-
" <td>407.694061</td>\n",
283-
" <td>412.704926</td>\n",
284-
" <td>439.420502</td>\n",
285-
" <td>444.431366</td>\n",
286-
" <td>449.291904</td>\n",
280+
" <td>426.062744</td>\n",
281+
" <td>402.833553</td>\n",
282+
" <td>407.694092</td>\n",
283+
" <td>412.704956</td>\n",
284+
" <td>439.420532</td>\n",
285+
" <td>444.431396</td>\n",
286+
" <td>449.291935</td>\n",
287287
" </tr>\n",
288288
" <tr>\n",
289289
" <th>2</th>\n",
290290
" <td>1961-03-01</td>\n",
291-
" <td>463.116547</td>\n",
292-
" <td>423.434062</td>\n",
293-
" <td>430.316862</td>\n",
294-
" <td>437.412534</td>\n",
295-
" <td>488.820560</td>\n",
296-
" <td>495.916231</td>\n",
297-
" <td>502.799032</td>\n",
291+
" <td>463.116577</td>\n",
292+
" <td>423.434092</td>\n",
293+
" <td>430.316893</td>\n",
294+
" <td>437.412564</td>\n",
295+
" <td>488.820590</td>\n",
296+
" <td>495.916261</td>\n",
297+
" <td>502.799062</td>\n",
298298
" </tr>\n",
299299
" <tr>\n",
300300
" <th>3</th>\n",
@@ -324,16 +324,16 @@
324324
],
325325
"text/plain": [
326326
" timestamp TimeGPT TimeGPT-lo-99.7 TimeGPT-lo-90 TimeGPT-lo-80 \\\n",
327-
"0 1961-01-01 437.837921 415.826453 423.783707 431.987061 \n",
328-
"1 1961-02-01 426.062714 402.833523 407.694061 412.704926 \n",
329-
"2 1961-03-01 463.116547 423.434062 430.316862 437.412534 \n",
327+
"0 1961-01-01 437.837952 415.826484 423.783737 431.987091 \n",
328+
"1 1961-02-01 426.062744 402.833553 407.694092 412.704956 \n",
329+
"2 1961-03-01 463.116577 423.434092 430.316893 437.412564 \n",
330330
"3 1961-04-01 478.244507 444.885193 446.776764 448.726837 \n",
331331
"4 1961-05-01 505.646484 465.736694 471.976787 478.409872 \n",
332332
"\n",
333333
" TimeGPT-hi-80 TimeGPT-hi-90 TimeGPT-hi-99.7 \n",
334-
"0 443.688782 451.892136 459.849389 \n",
335-
"1 439.420502 444.431366 449.291904 \n",
336-
"2 488.820560 495.916231 502.799032 \n",
334+
"0 443.688812 451.892166 459.849419 \n",
335+
"1 439.420532 444.431396 449.291935 \n",
336+
"2 488.820590 495.916261 502.799062 \n",
337337
"3 507.762177 509.712250 511.603821 \n",
338338
"4 532.883096 539.316182 545.556275 "
339339
]

0 commit comments

Comments
 (0)