diff --git a/site/en/gemma/docs/codegemma/keras_quickstart.ipynb b/site/en/gemma/docs/codegemma/keras_quickstart.ipynb index e220465a3..286e41ea4 100644 --- a/site/en/gemma/docs/codegemma/keras_quickstart.ipynb +++ b/site/en/gemma/docs/codegemma/keras_quickstart.ipynb @@ -21,7 +21,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "cellView": "form", "id": "tuOe1ymfHZPu" @@ -88,7 +88,7 @@ "* A 7B instruction-tuned code model\n", "* A 2B model, trained specifically for code infilling and open-ended generation.\n", "\n", - "This guide walks you through using the CodeGemma 2B model with KerasNLP for a code completion task.\n" + "This guide walks you through using the CodeGemma 2B model with KerasHub for a code completion task.\n" ] }, { @@ -161,7 +161,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": { "id": "DrBoa_Urw9Vx" }, @@ -185,13 +185,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "id": "KWOQ2sJocj-w" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m792.1/792.1 kB\u001b[0m \u001b[31m16.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m53.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h" + ] + } + ], "source": [ - "!pip install -q -U keras-nlp" + "!pip install -q -U keras-hub\n", + "!pip install -q -U keras" ] }, { @@ -216,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { "id": "ww83zI9ToPso" }, @@ -233,18 +244,18 @@ "source": [ "### Import packages\n", "\n", - "Import Keras and KerasNLP." + "Import Keras and KerasHub." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "id": "oQkqsyE1a2YD" }, "outputs": [], "source": [ - "import keras_nlp\n", + "import keras_hub\n", "import keras\n", "\n", "# Run at half precision.\n", @@ -259,30 +270,72 @@ "source": [ "### Load Model\n", "\n", - "KerasNLP provides implementations of many popular [model architectures](https://keras.io/api/keras_nlp/models/){:.external}. In this tutorial, you'll create a model using `GemmaCausalLM`, an end-to-end Gemma model for causal language modeling. A causal language model predicts the next token based on previous tokens.\n", + "KerasHub provides implementations of many popular [model architectures](https://keras.io/api/keras_nlp/models/){:.external}. In this tutorial, you'll create a model using `GemmaCausalLM`, an end-to-end Gemma model for causal language modeling. A causal language model predicts the next token based on previous tokens.\n", "\n", "Create the model using the `from_preset` method:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": { "id": "yygIK9DEIldp" }, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/2/download/config.json...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 785/785 [00:00<00:00, 1.64MB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/2/download/model.weights.h5...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 4.67G/4.67G [00:50<00:00, 99.2MB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/2/download/tokenizer.json...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 591/591 [00:00<00:00, 946kB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/2/download/assets/tokenizer/vocabulary.spm...\n" + ] + }, { "name": "stderr", "output_type": "stream", "text": [ - "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/1/download/config.json...\n", - "100%|██████████| 554/554 [00:00<00:00, 1.41MB/s]\n", - "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/1/download/model.weights.h5...\n", - "100%|██████████| 4.67G/4.67G [05:06<00:00, 16.4MB/s]\n", - "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/1/download/tokenizer.json...\n", - "100%|██████████| 401/401 [00:00<00:00, 382kB/s]\n", - "Downloading from https://www.kaggle.com/api/v1/models/keras/codegemma/keras/code_gemma_2b_en/1/download/assets/tokenizer/vocabulary.spm...\n", - "100%|██████████| 4.04M/4.04M [00:01<00:00, 2.41MB/s]\n" + "100%|██████████| 4.04M/4.04M [00:00<00:00, 43.1MB/s]\n" ] }, { @@ -301,19 +354,19 @@ { "data": { "text/html": [ - "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", - "┃ Tokenizer (type) ┃ Vocab # ┃\n", - "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│ gemma_tokenizer (GemmaTokenizer) │ 256,000 │\n", - "└────────────────────────────────────────────────────┴─────────────────────────────────────────────────────┘\n", + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃ Layer (type) ┃ Config ┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│ gemma_tokenizer (GemmaTokenizer) │ Vocab size: 256,000 │\n", + "└───────────────────────────────────────────────────────────────┴──────────────────────────────────────────┘\n", "\n" ], "text/plain": [ - "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", - "┃\u001b[1m \u001b[0m\u001b[1mTokenizer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Vocab #\u001b[0m\u001b[1m \u001b[0m┃\n", - "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│ gemma_tokenizer (\u001b[38;5;33mGemmaTokenizer\u001b[0m) │ \u001b[38;5;34m256,000\u001b[0m │\n", - "└────────────────────────────────────────────────────┴─────────────────────────────────────────────────────┘\n" + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Config\u001b[0m\u001b[1m \u001b[0m┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│ gemma_tokenizer (\u001b[38;5;33mGemmaTokenizer\u001b[0m) │ Vocab size: \u001b[38;5;34m256,000\u001b[0m │\n", + "└───────────────────────────────────────────────────────────────┴──────────────────────────────────────────┘\n" ] }, "metadata": {}, @@ -410,7 +463,7 @@ } ], "source": [ - "gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset(\"code_gemma_2b_en\")\n", + "gemma_lm = keras_hub.models.GemmaCausalLM.from_preset(\"code_gemma_2b_en\")\n", "gemma_lm.summary()" ] }, @@ -448,7 +501,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": { "id": "tGby-fi8n-Hv" }, @@ -471,7 +524,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": { "id": "k1ousdBnr2j8" }, @@ -507,7 +560,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": { "id": "N7UlgjSt5QnF" }, @@ -542,7 +595,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": { "id": "aae5GHrdpj2_" }, @@ -556,7 +609,7 @@ "'<|fim_prefix|>import <|fim_suffix|>if __name__ == \"__main__\":\\n sys.exit(0)<|fim_middle|>sys\\n<|file_separator|>'" ] }, - "execution_count": 12, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -582,7 +635,7 @@ "source": [ "## Summary\n", "\n", - "This tutorial walked you through using CodeGemma to infill code based on the surrounding context. Next, check out the [AI Assisted Programming with CodeGemma and KerasNLP notebook](https://ai.google.dev/gemma/docs/codegemma/code_assist_keras) for more examples on how you can use CodeGemma.\n", + "This tutorial walked you through using CodeGemma to infill code based on the surrounding context. Next, check out the [AI Assisted Programming with CodeGemma and KerasHub notebook](https://ai.google.dev/gemma/docs/codegemma/code_assist_keras) for more examples on how you can use CodeGemma.\n", "\n", "Also refer to The [CodeGemma model card](https://ai.google.dev/gemma/docs/codegemma/model_card) for the technical specs of the CodeGemma models.\n" ]