-
Notifications
You must be signed in to change notification settings - Fork 467
Add Use this model
snippets for top diffusers models
#1642
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from 2 commits
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -434,8 +434,63 @@ pipe = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") | |
pipe.load_textual_inversion("${model.id}")`, | ||
]; | ||
|
||
const diffusers_flux_fill = (model: ModelData) => [ | ||
`import torch | ||
from diffusers import FluxFillPipeline | ||
from diffusers.utils import load_image | ||
|
||
image = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup.png") | ||
mask = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup_mask.png") | ||
|
||
pipe = FluxFillPipeline.from_pretrained("${model.id}", torch_dtype=torch.bfloat16).to("cuda") | ||
image = pipe( | ||
prompt="a white paper cup", | ||
image=image, | ||
mask_image=mask, | ||
height=1632, | ||
width=1232, | ||
guidance_scale=30, | ||
num_inference_steps=50, | ||
max_sequence_length=512, | ||
generator=torch.Generator("cpu").manual_seed(0) | ||
).images[0] | ||
image.save(f"flux-fill-dev.png")`, | ||
]; | ||
|
||
const diffusers_inpainting = (model: ModelData) => [ | ||
`import torch | ||
from diffusers import AutoPipelineForInpainting | ||
from diffusers.utils import load_image | ||
|
||
pipe = AutoPipelineForInpainting.from_pretrained("${model.id}", torch_dtype=torch.float16, variant="fp16").to("cuda") | ||
|
||
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" | ||
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" | ||
|
||
image = load_image(img_url).resize((1024, 1024)) | ||
mask_image = load_image(mask_url).resize((1024, 1024)) | ||
|
||
prompt = "a tiger sitting on a park bench" | ||
generator = torch.Generator(device="cuda").manual_seed(0) | ||
|
||
image = pipe( | ||
prompt=prompt, | ||
image=image, | ||
mask_image=mask_image, | ||
guidance_scale=8.0, | ||
num_inference_steps=20, # steps between 15 and 30 work well for us | ||
strength=0.99, # make sure to use \`strength\` below 1.0 | ||
generator=generator, | ||
).images[0]`, | ||
]; | ||
|
||
export const diffusers = (model: ModelData): string[] => { | ||
if (model.tags.includes("controlnet")) { | ||
if ( | ||
model.tags.includes("StableDiffusionInpaintPipeline") || | ||
model.tags.includes("StableDiffusionXLInpaintPipeline") | ||
) { | ||
return diffusers_inpainting(model); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Example that doesn't need extra libraries to install |
||
} else if (model.tags.includes("controlnet")) { | ||
return diffusers_controlnet(model); | ||
} else if (model.tags.includes("lora")) { | ||
if (model.pipeline_tag === "image-to-image") { | ||
|
@@ -449,6 +504,8 @@ export const diffusers = (model: ModelData): string[] => { | |
} | ||
} else if (model.tags.includes("textual_inversion")) { | ||
return diffusers_textual_inversion(model); | ||
} else if (model.tags.includes("FluxFillPipeline")) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Example that doesn't need extra libraries to install |
||
return diffusers_flux_fill(model); | ||
} else if (model.pipeline_tag === "image-to-video") { | ||
return diffusers_image_to_video(model); | ||
} else if (model.pipeline_tag === "image-to-image") { | ||
|
@@ -642,6 +699,59 @@ pipeline = Pipeline( | |
])`, | ||
]; | ||
|
||
export const hunyuan3d_2 = (model: ModelData): string[] => [ | ||
apolinario marked this conversation as resolved.
Show resolved
Hide resolved
|
||
`# In order to use this model, the Hunyuan3D-2 repo must be installed. | ||
# git clone https://github.com/Tencent-Hunyuan/Hunyuan3D-2.git | ||
# cd Hunyuan3D-2 | ||
# pip install -r requirements.txt | ||
# pip install -e . | ||
# Install custom CUDA kernels for texture generation | ||
# python hy3dgen/texgen/custom_rasterizer/setup.py install | ||
# python hy3dgen/texgen/differentiable_renderer/setup.py install | ||
# cd .. | ||
|
||
# Note: This model requires a GPU with at least 16GB of VRAM. | ||
|
||
import torch | ||
from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline | ||
from hy3dgen.texgen import Hunyuan3DPaintPipeline | ||
from PIL import Image | ||
import requests | ||
from io import BytesIO | ||
|
||
# Ensure you're on a GPU runtime | ||
device = "cuda" if torch.cuda.is_available() else "cpu" | ||
|
||
# Load a sample image | ||
image_url = f"https://raw.githubusercontent.com/Tencent-Hunyuan/Hunyuan3D-2.1/refs/heads/main/assets/example_images/004.png" | ||
response = requests.get(image_url) | ||
image = Image.open(BytesIO(response.content)).convert("RGB") | ||
|
||
# 1. Generate the 3D shape from the image | ||
# Use torch.float16 for lower VRAM usage. | ||
shape_pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained( | ||
"${model.id}", | ||
torch_dtype=torch.float16 | ||
) | ||
shape_pipeline.to(device) | ||
mesh = shape_pipeline(image=image)[0] | ||
|
||
# 2. Generate the texture for the mesh | ||
texture_pipeline = Hunyuan3DPaintPipeline.from_pretrained( | ||
"${model.id}", | ||
torch_dtype=torch.float16 | ||
) | ||
texture_pipeline.to(device) | ||
textured_mesh = texture_pipeline(mesh, image=image) | ||
|
||
# 3. Save the final textured mesh | ||
output_path = "textured_mesh.glb" | ||
textured_mesh.export(output_path) | ||
|
||
print(f"Textured mesh saved to {output_path}") | ||
`, | ||
]; | ||
|
||
export const keras = (model: ModelData): string[] => [ | ||
`# Available backend options are: "jax", "torch", "tensorflow". | ||
import os | ||
|
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.