Skip to content

Commit 0213179

Browse files
authored
Update README and example code for AnyText usage (#11028)
* [Documentation] Update README and example code with additional usage instructions for AnyText * [Documentation] Update README for AnyTextPipeline and improve logging in code * Remove wget command for font file from example docstring in anytext.py
1 parent a7d53a5 commit 0213179

File tree

2 files changed

+23
-9
lines changed

2 files changed

+23
-9
lines changed

examples/research_projects/anytext/README.md

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,27 @@
1-
# AnyTextPipeline Pipeline
1+
# AnyTextPipeline
22

33
Project page: https://aigcdesigngroup.github.io/homepage_anytext
44

55
"AnyText comprises a diffusion pipeline with two primary elements: an auxiliary latent module and a text embedding module. The former uses inputs like text glyph, position, and masked image to generate latent features for text generation or editing. The latter employs an OCR model for encoding stroke data as embeddings, which blend with image caption embeddings from the tokenizer to generate texts that seamlessly integrate with the background. We employed text-control diffusion loss and text perceptual loss for training to further enhance writing accuracy."
66

7-
Each text line that needs to be generated should be enclosed in double quotes. For any usage questions, please refer to the [paper](https://arxiv.org/abs/2311.03054).
7+
> **Note:** Each text line that needs to be generated should be enclosed in double quotes.
88
9+
For any usage questions, please refer to the [paper](https://arxiv.org/abs/2311.03054).
10+
11+
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/tolgacangoz/b87ec9d2f265b448dd947c9d4a0da389/anytext.ipynb)
912

1013
```py
14+
# This example requires the `anytext_controlnet.py` file:
15+
# !git clone --depth 1 https://github.com/huggingface/diffusers.git
16+
# %cd diffusers/examples/research_projects/anytext
17+
# Let's choose a font file shared by an HF staff:
18+
# !wget https://huggingface.co/spaces/ysharma/TranslateQuotesInImageForwards/resolve/main/arial-unicode-ms.ttf
19+
1120
import torch
1221
from diffusers import DiffusionPipeline
1322
from anytext_controlnet import AnyTextControlNetModel
1423
from diffusers.utils import load_image
1524

16-
# I chose a font file shared by an HF staff:
17-
# !wget https://huggingface.co/spaces/ysharma/TranslateQuotesInImageForwards/resolve/main/arial-unicode-ms.ttf
1825

1926
anytext_controlnet = AnyTextControlNetModel.from_pretrained("tolgacangoz/anytext-controlnet", torch_dtype=torch.float16,
2027
variant="fp16",)
@@ -26,6 +33,7 @@ pipe = DiffusionPipeline.from_pretrained("tolgacangoz/anytext", font_path="arial
2633
# generate image
2734
prompt = 'photo of caramel macchiato coffee on the table, top-down perspective, with "Any" "Text" written on it using cream'
2835
draw_pos = load_image("https://raw.githubusercontent.com/tyxsspa/AnyText/refs/heads/main/example_images/gen9.png")
36+
# There are two modes: "generate" and "edit". "edit" mode requires `ori_image` parameter for the image to be edited.
2937
image = pipe(prompt, num_inference_steps=20, mode="generate", draw_pos=draw_pos,
3038
).images[0]
3139
image

examples/research_projects/anytext/anytext.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -146,14 +146,17 @@ def _is_whitespace(self, char):
146146
EXAMPLE_DOC_STRING = """
147147
Examples:
148148
```py
149+
>>> # This example requires the `anytext_controlnet.py` file:
150+
>>> # !git clone --depth 1 https://github.com/huggingface/diffusers.git
151+
>>> # %cd diffusers/examples/research_projects/anytext
152+
>>> # Let's choose a font file shared by an HF staff:
153+
>>> # !wget https://huggingface.co/spaces/ysharma/TranslateQuotesInImageForwards/resolve/main/arial-unicode-ms.ttf
154+
149155
>>> import torch
150156
>>> from diffusers import DiffusionPipeline
151157
>>> from anytext_controlnet import AnyTextControlNetModel
152158
>>> from diffusers.utils import load_image
153159
154-
>>> # I chose a font file shared by an HF staff:
155-
>>> !wget https://huggingface.co/spaces/ysharma/TranslateQuotesInImageForwards/resolve/main/arial-unicode-ms.ttf
156-
157160
>>> anytext_controlnet = AnyTextControlNetModel.from_pretrained("tolgacangoz/anytext-controlnet", torch_dtype=torch.float16,
158161
... variant="fp16",)
159162
>>> pipe = DiffusionPipeline.from_pretrained("tolgacangoz/anytext", font_path="arial-unicode-ms.ttf",
@@ -165,6 +168,7 @@ def _is_whitespace(self, char):
165168
>>> # generate image
166169
>>> prompt = 'photo of caramel macchiato coffee on the table, top-down perspective, with "Any" "Text" written on it using cream'
167170
>>> draw_pos = load_image("https://raw.githubusercontent.com/tyxsspa/AnyText/refs/heads/main/example_images/gen9.png")
171+
>>> # There are two modes: "generate" and "edit". "edit" mode requires `ori_image` parameter for the image to be edited.
168172
>>> image = pipe(prompt, num_inference_steps=20, mode="generate", draw_pos=draw_pos,
169173
... ).images[0]
170174
>>> image
@@ -257,11 +261,11 @@ def forward(
257261
idx = tokenized_text[i] == self.placeholder_token.to(device)
258262
if sum(idx) > 0:
259263
if i >= len(self.text_embs_all):
260-
print("truncation for log images...")
264+
logger.warning("truncation for log images...")
261265
break
262266
text_emb = torch.cat(self.text_embs_all[i], dim=0)
263267
if sum(idx) != len(text_emb):
264-
print("truncation for long caption...")
268+
logger.warning("truncation for long caption...")
265269
text_emb = text_emb.to(embedded_text.device)
266270
embedded_text[i][idx] = text_emb[: sum(idx)]
267271
return embedded_text
@@ -1058,6 +1062,8 @@ def forward(
10581062
raise ValueError(f"Can't read ori_image image from {ori_image}!")
10591063
elif isinstance(ori_image, torch.Tensor):
10601064
ori_image = ori_image.cpu().numpy()
1065+
elif isinstance(ori_image, PIL.Image.Image):
1066+
ori_image = np.array(ori_image.convert("RGB"))
10611067
else:
10621068
if not isinstance(ori_image, np.ndarray):
10631069
raise ValueError(f"Unknown format of ori_image: {type(ori_image)}")

0 commit comments

Comments
 (0)