diff --git a/site/en/gemma/docs/codegemma/codegemma_flax_inference.ipynb b/site/en/gemma/docs/codegemma/codegemma_flax_inference.ipynb index f121b7aee..85dc0f08e 100644 --- a/site/en/gemma/docs/codegemma/codegemma_flax_inference.ipynb +++ b/site/en/gemma/docs/codegemma/codegemma_flax_inference.ipynb @@ -48,13 +48,13 @@ "source": [ "\n", " \n", " \n", " \n", "
\n", - " View on ai.google.dev\n", + " View on ai.google.dev\n", " \n", - " Run in Google Colab\n", + " Run in Google Colab\n", " \n", - " View source on GitHub\n", + " View source on GitHub\n", "
" ] @@ -148,11 +148,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "XpSw-_4EEcoY", - "outputId": "ff9d2cab-80e1-4e5f-b976-94769cd3e730" + "id": "XpSw-_4EEcoY" }, "outputs": [ { @@ -229,11 +225,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "j_QdPAGyO5zl", - "outputId": "8181d17f-da02-4d1b-ce34-cbd048362007" + "id": "j_QdPAGyO5zl" }, "outputs": [ { @@ -257,11 +249,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "cjnXlLkWcHIy", - "outputId": "79cfb87d-fef7-4eb5-f452-48294c352bd6" + "id": "cjnXlLkWcHIy" }, "outputs": [ { @@ -301,11 +289,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "JAwXvpzbuiB5", - "outputId": "b526c792-1105-47ea-932d-3c8d3a1919bc" + "id": "JAwXvpzbuiB5" }, "outputs": [ { @@ -366,11 +350,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "TpyG5YW1EcoY", - "outputId": "be890773-f521-45a5-d379-4036c9cbb3de" + "id": "TpyG5YW1EcoY" }, "outputs": [ { @@ -511,11 +491,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "S5F3fk22Ecod", - "outputId": "283d6e07-1ea4-4240-ebc8-464263df9a4c" + "id": "S5F3fk22Ecod" }, "outputs": [ { @@ -558,11 +534,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "6zIQEruE5_FC", - "outputId": "29e4e090-fd05-432d-ca13-bbc42443b958" + "id": "6zIQEruE5_FC" }, "outputs": [ { @@ -604,11 +576,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "SvaV4GU76M3t", - "outputId": "76d55cf4-a586-4faa-cb8a-7968b7e8ada0" + "id": "SvaV4GU76M3t" }, "outputs": [ { @@ -675,15 +643,12 @@ "metadata": { "accelerator": "TPU", "colab": { - "gpuType": "V28", - "provenance": [] + "name": "codegemma_flax_inference.ipynb", + "toc_visible": true }, "kernelspec": { "display_name": "Python 3", "name": "python3" - }, - "language_info": { - "name": "python" } }, "nbformat": 4, diff --git a/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb b/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb index 173d43115..dbb09ed6a 100644 --- a/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb +++ b/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb @@ -13,6 +13,7 @@ "cell_type": "code", "execution_count": null, "metadata": { + "cellView": "form", "id": "_fEE8rM9BUfS" }, "outputs": [], @@ -38,17 +39,17 @@ "source": [ "# Fine-tune PaliGemma with JAX and Flax\n", "\n", - "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", - "\u003ctd\u003e\n", - "\u003ca target=\"_blank\" href=\"https://ai.google.dev/gemma/docs/paligemma/fine-tuning-paligemma\"\u003e\u003cimg src=\"https://ai.google.dev/static/site-assets/images/docs/notebook-site-button.png\" height=\"32\" width=\"32\" /\u003eView on ai.google.dev\u003c/a\u003e\n", - "\u003c/td\u003e\n", - "\u003ctd\u003e\n", - "\u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/google/generative-ai-docs/blob/main/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", - "\u003c/td\u003e\n", - "\u003ctd\u003e\n", - "\u003ca target=\"_blank\" href=\"https://github.com/google/generative-ai-docs/blob/main/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", - "\u003c/td\u003e\n", - "\u003c/table\u003e\n" + "\n", + "\n", + "\n", + "\n", + "
\n", + "View on ai.google.dev\n", + "\n", + "Run in Google Colab\n", + "\n", + "View source on GitHub\n", + "
\n" ] }, { @@ -117,8 +118,7 @@ "\n", "To generate a Kaggle API key, open your [**Settings** page in Kaggle](https://www.kaggle.com/settings) and click **Create New Token**. This triggers the download of a `kaggle.json` file containing your API credentials.\n", "\n", - "Then, in Colab, select **Secrets** (🔑) in the left pane and add your Kaggle username and Kaggle API key. Store your username under the name `KAGGLE_USERNAME` and your API key under the name `KAGGLE_KEY`.\n", - "\n" + "Then, in Colab, select **Secrets** (🔑) in the left pane and add your Kaggle username and Kaggle API key. Store your username under the name `KAGGLE_USERNAME` and your API key under the name `KAGGLE_KEY`.\n" ] }, { @@ -482,7 +482,7 @@ "\n", " image = tf.constant(image)\n", " image = tf.image.resize(image, (size, size), method='bilinear', antialias=True)\n", - " return image.numpy() / 127.5 - 1.0 # [0, 255]-\u003e[-1,1]\n", + " return image.numpy() / 127.5 - 1.0 # [0, 255]->[-1,1]\n", "\n", "def preprocess_tokens(prefix, suffix=None, seqlen=None):\n", " # Model has been trained to handle tokenized text composed of a prefix with\n", @@ -622,12 +622,12 @@ " return f\"data:image/jpeg;base64,{image_b64}\"\n", "\n", "def render_example(image, caption):\n", - " image = ((image + 1)/2 * 255).astype(np.uint8) # [-1,1] -\u003e [0, 255]\n", + " image = ((image + 1)/2 * 255).astype(np.uint8) # [-1,1] -> [0, 255]\n", " return f\"\"\"\n", - " \u003cdiv style=\"display: inline-flex; align-items: center; justify-content: center;\"\u003e\n", - " \u003cimg style=\"width:128px; height:128px;\" src=\"{render_inline(image, resize=(64,64))}\" /\u003e\n", - " \u003cp style=\"width:256px; margin:10px; font-size:small;\"\u003e{html.escape(caption)}\u003c/p\u003e\n", - " \u003c/div\u003e\n", + "
\n", + " \n", + "

{html.escape(caption)}

\n", + "
\n", " \"\"\"\n", "\n", "html_out = \"\"\n", @@ -744,7 +744,7 @@ " # Append to html output.\n", " for example, response in zip(examples, responses):\n", " outputs.append((example[\"image\"], response))\n", - " if num_examples and len(outputs) \u003e= num_examples:\n", + " if num_examples and len(outputs) >= num_examples:\n", " return outputs" ] }, @@ -852,34 +852,12 @@ ], "metadata": { "colab": { - "gpuType": "T4", - "last_runtime": { - "build_target": "//learning/grp/tools/ml_python:ml_notebook", - "kind": "private" - }, - "private_outputs": true, - "provenance": [ - { - "file_id": "17AiK8gRY7oiquQGkBH0d08PFQo3Kyx1I", - "timestamp": 1715287187925 - }, - { - "file_id": "1qZlJfPyfKRrNcz2shxQ93HnnE5Ge1LLn", - "timestamp": 1715019972450 - }, - { - "file_id": "1JFnlD2kSiTNexdPw_NYRtuW6uuSTI0kD", - "timestamp": 1714585741026 - } - ], + "name": "fine-tuning-paligemma.ipynb", "toc_visible": true }, "kernelspec": { "display_name": "Python 3", "name": "python3" - }, - "language_info": { - "name": "python" } }, "nbformat": 4,