@@ -434,8 +434,63 @@ pipe = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}")
434
434
pipe.load_textual_inversion("${ model . id } ")` ,
435
435
] ;
436
436
437
+ const diffusers_flux_fill = ( model : ModelData ) => [
438
+ `import torch
439
+ from diffusers import FluxFillPipeline
440
+ from diffusers.utils import load_image
441
+
442
+ image = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup.png")
443
+ mask = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/cup_mask.png")
444
+
445
+ pipe = FluxFillPipeline.from_pretrained("${ model . id } ", torch_dtype=torch.bfloat16).to("cuda")
446
+ image = pipe(
447
+ prompt="a white paper cup",
448
+ image=image,
449
+ mask_image=mask,
450
+ height=1632,
451
+ width=1232,
452
+ guidance_scale=30,
453
+ num_inference_steps=50,
454
+ max_sequence_length=512,
455
+ generator=torch.Generator("cpu").manual_seed(0)
456
+ ).images[0]
457
+ image.save(f"flux-fill-dev.png")` ,
458
+ ] ;
459
+
460
+ const diffusers_inpainting = ( model : ModelData ) => [
461
+ `import torch
462
+ from diffusers import AutoPipelineForInpainting
463
+ from diffusers.utils import load_image
464
+
465
+ pipe = AutoPipelineForInpainting.from_pretrained("${ model . id } ", torch_dtype=torch.float16, variant="fp16").to("cuda")
466
+
467
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
468
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
469
+
470
+ image = load_image(img_url).resize((1024, 1024))
471
+ mask_image = load_image(mask_url).resize((1024, 1024))
472
+
473
+ prompt = "a tiger sitting on a park bench"
474
+ generator = torch.Generator(device="cuda").manual_seed(0)
475
+
476
+ image = pipe(
477
+ prompt=prompt,
478
+ image=image,
479
+ mask_image=mask_image,
480
+ guidance_scale=8.0,
481
+ num_inference_steps=20, # steps between 15 and 30 work well for us
482
+ strength=0.99, # make sure to use \`strength\` below 1.0
483
+ generator=generator,
484
+ ).images[0]` ,
485
+ ] ;
486
+
437
487
export const diffusers = ( model : ModelData ) : string [ ] => {
438
- if ( model . tags . includes ( "controlnet" ) ) {
488
+ if (
489
+ model . tags . includes ( "StableDiffusionInpaintPipeline" ) ||
490
+ model . tags . includes ( "StableDiffusionXLInpaintPipeline" )
491
+ ) {
492
+ return diffusers_inpainting ( model ) ;
493
+ } else if ( model . tags . includes ( "controlnet" ) ) {
439
494
return diffusers_controlnet ( model ) ;
440
495
} else if ( model . tags . includes ( "lora" ) ) {
441
496
if ( model . pipeline_tag === "image-to-image" ) {
@@ -449,6 +504,8 @@ export const diffusers = (model: ModelData): string[] => {
449
504
}
450
505
} else if ( model . tags . includes ( "textual_inversion" ) ) {
451
506
return diffusers_textual_inversion ( model ) ;
507
+ } else if ( model . tags . includes ( "FluxFillPipeline" ) ) {
508
+ return diffusers_flux_fill ( model ) ;
452
509
} else if ( model . pipeline_tag === "image-to-video" ) {
453
510
return diffusers_image_to_video ( model ) ;
454
511
} else if ( model . pipeline_tag === "image-to-image" ) {
0 commit comments