From cd28e12fb4047855d5984b65eb3947d97279f7ff Mon Sep 17 00:00:00 2001 From: Sachin Prasad Date: Fri, 18 Apr 2025 15:20:52 -0700 Subject: [PATCH] Update to kerasHub package --- site/en/gemma/docs/core/lora_tuning.ipynb | 24 +++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/site/en/gemma/docs/core/lora_tuning.ipynb b/site/en/gemma/docs/core/lora_tuning.ipynb index ed8ea92f2..3b9b9edf3 100644 --- a/site/en/gemma/docs/core/lora_tuning.ipynb +++ b/site/en/gemma/docs/core/lora_tuning.ipynb @@ -83,7 +83,7 @@ "\n", "[Low Rank Adaptation (LoRA)](https://arxiv.org/abs/2106.09685) is a fine-tuning technique which greatly reduces the number of trainable parameters for downstream tasks by freezing the weights of the model and inserting a smaller number of new weights into the model. This makes training with LoRA much faster and more memory-efficient, and produces smaller model weights (a few hundred MBs), all while maintaining the quality of the model outputs.\n", "\n", - "This tutorial walks you through using KerasNLP to perform LoRA fine-tuning on a Gemma 2B model using the [Databricks Dolly 15k dataset](https://huggingface.co/datasets/databricks/databricks-dolly-15k). This dataset contains 15,000 high-quality human-generated prompt / response pairs specifically designed for fine-tuning LLMs." + "This tutorial walks you through using KerasHub to perform LoRA fine-tuning on a Gemma 2B model using the [Databricks Dolly 15k dataset](https://huggingface.co/datasets/databricks/databricks-dolly-15k). This dataset contains 15,000 high-quality human-generated prompt / response pairs specifically designed for fine-tuning LLMs." ] }, { @@ -180,7 +180,7 @@ "source": [ "### Install dependencies\n", "\n", - "Install Keras, KerasNLP, and other dependencies." + "Install Keras, KerasHub, and other dependencies." ] }, { @@ -192,8 +192,8 @@ "outputs": [], "source": [ "# Install Keras 3 last. See https://keras.io/getting_started/ for more details.\n", - "!pip install -q -U keras-nlp\n", - "!pip install -q -U \"keras>=3\"" + "!pip install -q -U keras-hub\n", + "!pip install -q -U keras" ] }, { @@ -230,7 +230,7 @@ "source": [ "### Import packages\n", "\n", - "Import Keras and KerasNLP." + "Import Keras and KerasHub." ] }, { @@ -242,7 +242,7 @@ "outputs": [], "source": [ "import keras\n", - "import keras_nlp" + "import keras_hub" ] }, { @@ -329,7 +329,7 @@ "source": [ "## Load Model\n", "\n", - "KerasNLP provides implementations of many popular [model architectures](https://keras.io/api/keras_nlp/models/). In this tutorial, you'll create a model using `GemmaCausalLM`, an end-to-end Gemma model for causal language modeling. A causal language model predicts the next token based on previous tokens.\n", + "KerasHub provides implementations of many popular [model architectures](https://keras.io/api/keras_hub/models/). In this tutorial, you'll create a model using `GemmaCausalLM`, an end-to-end Gemma model for causal language modeling. A causal language model predicts the next token based on previous tokens.\n", "\n", "Create the model using the `from_preset` method:" ] @@ -466,7 +466,7 @@ } ], "source": [ - "gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset(\"gemma2_2b_en\")\n", + "gemma_lm = keras_hub.models.GemmaCausalLM.from_preset(\"gemma2_2b_en\")\n", "gemma_lm.summary()" ] }, @@ -557,7 +557,7 @@ " instruction=\"What should I do on a trip to Europe?\",\n", " response=\"\",\n", ")\n", - "sampler = keras_nlp.samplers.TopKSampler(k=5, seed=2)\n", + "sampler = keras_hub.samplers.TopKSampler(k=5, seed=2)\n", "gemma_lm.compile(sampler=sampler)\n", "print(gemma_lm.generate(prompt, max_length=256))" ] @@ -912,7 +912,7 @@ " instruction=\"What should I do on a trip to Europe?\",\n", " response=\"\",\n", ")\n", - "sampler = keras_nlp.samplers.TopKSampler(k=5, seed=2)\n", + "sampler = keras_hub.samplers.TopKSampler(k=5, seed=2)\n", "gemma_lm.compile(sampler=sampler)\n", "print(gemma_lm.generate(prompt, max_length=256))" ] @@ -993,12 +993,12 @@ "source": [ "## Summary and next steps\n", "\n", - "This tutorial covered LoRA fine-tuning on a Gemma model using KerasNLP. Check out the following docs next:\n", + "This tutorial covered LoRA fine-tuning on a Gemma model using KerasHub. Check out the following docs next:\n", "\n", "* Learn how to [generate text with a Gemma model](https://ai.google.dev/gemma/docs/get_started).\n", "* Learn how to perform [distributed fine-tuning and inference on a Gemma model](https://ai.google.dev/gemma/docs/core/distributed_tuning).\n", "* Learn how to [use Gemma open models with Vertex AI](https://cloud.google.com/vertex-ai/docs/generative-ai/open-models/use-gemma).\n", - "* Learn how to [fine-tune Gemma using KerasNLP and deploy to Vertex AI](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/model_garden/model_garden_gemma_kerasnlp_to_vertexai.ipynb)." + "* Learn how to [fine-tune Gemma using KerasHub and deploy to Vertex AI](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/model_garden/model_garden_gemma_kerasnlp_to_vertexai.ipynb)." ] } ],