|
161 | 161 | "metadata": {},
|
162 | 162 | "outputs": [],
|
163 | 163 | "source": [
|
164 |
| - "%pip install -qqU diffusers transformers bitsandbytes accelerate ftfy datasets ipywidgets" |
| 164 | + "%pip install -qqU diffusers transformers bitsandbytes accelerate ftfy datasets" |
165 | 165 | ]
|
166 | 166 | },
|
167 | 167 | {
|
|
887 | 887 | "source": [
|
888 | 888 | "import math\n",
|
889 | 889 | "\n",
|
890 |
| - "import bitsandbytes as bnb\n", |
891 | 890 | "import torch.nn.functional as F\n",
|
892 | 891 | "from accelerate import Accelerator\n",
|
893 | 892 | "from accelerate.utils import set_seed\n",
|
|
910 | 909 | "\n",
|
911 | 910 | " # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs\n",
|
912 | 911 | " if args.use_8bit_adam:\n",
|
| 912 | + " import bitsandbytes as bnb\n", |
913 | 913 | " optimizer_class = bnb.optim.AdamW8bit\n",
|
914 | 914 | " else:\n",
|
915 | 915 | " optimizer_class = torch.optim.AdamW\n",
|
|
1019 | 1019 | "\n",
|
1020 | 1020 | " # Create the pipeline using using the trained modules and save it.\n",
|
1021 | 1021 | " if accelerator.is_main_process:\n",
|
| 1022 | + " print(f\"Loading pipeline and saving to {args.output_dir}...\")\n", |
1022 | 1023 | " scheduler = PNDMScheduler(\n",
|
1023 | 1024 | " beta_start=0.00085,\n",
|
1024 | 1025 | " beta_end=0.012,\n",
|
|
1185 | 1186 | }
|
1186 | 1187 | ],
|
1187 | 1188 | "source": [
|
1188 |
| - "# Pick a funny prompt here and it will be saved as the default for widget on the Hub!\n", |
| 1189 | + "# Pick a funny prompt here and it will be used as the widget's default \n", |
| 1190 | + "# when we push to the Hub in the next section\n", |
1189 | 1191 | "prompt = f\"a photo of {name_of_your_concept} {type_of_thing} in the Acropolis\"\n",
|
| 1192 | + "\n", |
1190 | 1193 | "# Tune the guidance to control how closely the generations follow the prompt.\n",
|
1191 | 1194 | "# Values between 7-11 usually work best\n",
|
1192 | 1195 | "guidance_scale = 7\n",
|
|
0 commit comments