diff --git a/site/en/gemma/docs/paligemma/inference-with-keras.ipynb b/site/en/gemma/docs/paligemma/inference-with-keras.ipynb index 6500d9fce..2cf76f803 100644 --- a/site/en/gemma/docs/paligemma/inference-with-keras.ipynb +++ b/site/en/gemma/docs/paligemma/inference-with-keras.ipynb @@ -119,7 +119,7 @@ "Before using PaliGemma for the first time, you must request access to the model through Kaggle by completing the following steps:\n", "\n", "1. Log in to [Kaggle](https://www.kaggle.com), or create a new Kaggle account if you don't already have one.\n", - "1. Go to the [PaliGemma model card](https://www.kaggle.com/models/google/paligemma/) and click **Request Access**.\n", + "1. Go to the [PaliGemma model card](https://www.kaggle.com/models/google/paligemma-2/) and click **Request Access**.\n", "1. Complete the consent form and accept the terms and conditions." ] }, @@ -200,7 +200,7 @@ }, "outputs": [], "source": [ - "!pip install -U -q keras-nlp" + "!pip install -U -q keras-nlp kagglehub" ] }, { @@ -260,7 +260,7 @@ }, "outputs": [], "source": [ - "paligemma = keras_hub.models.PaliGemmaCausalLM.from_preset(\"pali_gemma_3b_mix_224\")\n", + "paligemma = keras_hub.models.PaliGemmaCausalLM.from_preset(\"kaggle://keras/paligemma2/keras/pali_gemma_2_mix_3b_224\")\n", "paligemma.summary()" ] }, @@ -302,7 +302,7 @@ " image = PIL.Image.open(contents)\n", " image = crop_and_resize(image, target_size)\n", " image = np.array(image)\n", - " # Remove alpha channel if neccessary.\n", + " # Remove alpha channel if necessary.\n", " if image.shape[2] == 4:\n", " image = image[:, :, :3]\n", " return image\n", @@ -492,7 +492,7 @@ "source": [ "### Use `segment` prompt\n", "\n", - "The following example code uses the `segment` prompt syntax to locate the area of an image occupied by an object. It uses the Google `big_vision` library to interpret the model output and generate a mask for the segemented object.\n", + "The following example code uses the `segment` prompt syntax to locate the area of an image occupied by an object. It uses the Google `big_vision` library to interpret the model output and generate a mask for the segmented object.\n", "\n", "Before getting started, install the `big_vision` library and its dependencies, as shown in this code example:\n" ]