Skip to content

Commit 54043c3

Browse files
authored
Update VAE Decode endpoints (#10939)
1 parent fc4229a commit 54043c3

File tree

1 file changed

+103
-103
lines changed

1 file changed

+103
-103
lines changed

tests/remote/test_remote_decode.py

Lines changed: 103 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ class RemoteAutoencoderKLSDv1Tests(
344344
512,
345345
512,
346346
)
347-
endpoint = "https://bz0b3zkoojf30bhx.us-east-1.aws.endpoints.huggingface.cloud/"
347+
endpoint = "https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/"
348348
dtype = torch.float16
349349
scaling_factor = 0.18215
350350
shift_factor = None
@@ -354,105 +354,105 @@ class RemoteAutoencoderKLSDv1Tests(
354354
return_pt_slice = torch.tensor([-0.2177, 0.0217, -0.2258, 0.0412, -0.1687, -0.1232, -0.2416, -0.2130, -0.0543])
355355

356356

357-
# class RemoteAutoencoderKLSDXLTests(
358-
# RemoteAutoencoderKLMixin,
359-
# unittest.TestCase,
360-
# ):
361-
# shape = (
362-
# 1,
363-
# 4,
364-
# 128,
365-
# 128,
366-
# )
367-
# out_hw = (
368-
# 1024,
369-
# 1024,
370-
# )
371-
# endpoint = "https://fagf07t3bwf0615i.us-east-1.aws.endpoints.huggingface.cloud/"
372-
# dtype = torch.float16
373-
# scaling_factor = 0.13025
374-
# shift_factor = None
375-
# processor_cls = VaeImageProcessor
376-
# output_pt_slice = torch.tensor([104, 52, 23, 114, 61, 35, 108, 87, 38], dtype=torch.uint8)
377-
# partial_postprocess_return_pt_slice = torch.tensor([77, 86, 89, 49, 60, 75, 52, 65, 78], dtype=torch.uint8)
378-
# return_pt_slice = torch.tensor([-0.3945, -0.3289, -0.2993, -0.6177, -0.5259, -0.4119, -0.5898, -0.4863, -0.3845])
379-
380-
381-
# class RemoteAutoencoderKLFluxTests(
382-
# RemoteAutoencoderKLMixin,
383-
# unittest.TestCase,
384-
# ):
385-
# shape = (
386-
# 1,
387-
# 16,
388-
# 128,
389-
# 128,
390-
# )
391-
# out_hw = (
392-
# 1024,
393-
# 1024,
394-
# )
395-
# endpoint = "https://fnohtuwsskxgxsnn.us-east-1.aws.endpoints.huggingface.cloud/"
396-
# dtype = torch.bfloat16
397-
# scaling_factor = 0.3611
398-
# shift_factor = 0.1159
399-
# processor_cls = VaeImageProcessor
400-
# output_pt_slice = torch.tensor([110, 72, 91, 62, 35, 52, 69, 55, 69], dtype=torch.uint8)
401-
# partial_postprocess_return_pt_slice = torch.tensor(
402-
# [202, 203, 203, 197, 195, 193, 189, 188, 178], dtype=torch.uint8
403-
# )
404-
# return_pt_slice = torch.tensor([0.5820, 0.5962, 0.5898, 0.5439, 0.5327, 0.5112, 0.4797, 0.4773, 0.3984])
405-
406-
407-
# class RemoteAutoencoderKLFluxPackedTests(
408-
# RemoteAutoencoderKLMixin,
409-
# unittest.TestCase,
410-
# ):
411-
# shape = (
412-
# 1,
413-
# 4096,
414-
# 64,
415-
# )
416-
# out_hw = (
417-
# 1024,
418-
# 1024,
419-
# )
420-
# height = 1024
421-
# width = 1024
422-
# endpoint = "https://fnohtuwsskxgxsnn.us-east-1.aws.endpoints.huggingface.cloud/"
423-
# dtype = torch.bfloat16
424-
# scaling_factor = 0.3611
425-
# shift_factor = 0.1159
426-
# processor_cls = VaeImageProcessor
427-
# # slices are different due to randn on different shape. we can pack the latent instead if we want the same
428-
# output_pt_slice = torch.tensor([96, 116, 157, 45, 67, 104, 34, 56, 89], dtype=torch.uint8)
429-
# partial_postprocess_return_pt_slice = torch.tensor(
430-
# [168, 212, 202, 155, 191, 185, 150, 180, 168], dtype=torch.uint8
431-
# )
432-
# return_pt_slice = torch.tensor([0.3198, 0.6631, 0.5864, 0.2131, 0.4944, 0.4482, 0.1776, 0.4153, 0.3176])
433-
434-
435-
# class RemoteAutoencoderKLHunyuanVideoTests(
436-
# RemoteAutoencoderKLHunyuanVideoMixin,
437-
# unittest.TestCase,
438-
# ):
439-
# shape = (
440-
# 1,
441-
# 16,
442-
# 3,
443-
# 40,
444-
# 64,
445-
# )
446-
# out_hw = (
447-
# 320,
448-
# 512,
449-
# )
450-
# endpoint = "https://lsx2injm3ts8wbvv.us-east-1.aws.endpoints.huggingface.cloud/"
451-
# dtype = torch.float16
452-
# scaling_factor = 0.476986
453-
# processor_cls = VideoProcessor
454-
# output_pt_slice = torch.tensor([112, 92, 85, 112, 93, 85, 112, 94, 85], dtype=torch.uint8)
455-
# partial_postprocess_return_pt_slice = torch.tensor(
456-
# [149, 161, 168, 136, 150, 156, 129, 143, 149], dtype=torch.uint8
457-
# )
458-
# return_pt_slice = torch.tensor([0.1656, 0.2661, 0.3157, 0.0693, 0.1755, 0.2252, 0.0127, 0.1221, 0.1708])
357+
class RemoteAutoencoderKLSDXLTests(
358+
RemoteAutoencoderKLMixin,
359+
unittest.TestCase,
360+
):
361+
shape = (
362+
1,
363+
4,
364+
128,
365+
128,
366+
)
367+
out_hw = (
368+
1024,
369+
1024,
370+
)
371+
endpoint = "https://x2dmsqunjd6k9prw.us-east-1.aws.endpoints.huggingface.cloud/"
372+
dtype = torch.float16
373+
scaling_factor = 0.13025
374+
shift_factor = None
375+
processor_cls = VaeImageProcessor
376+
output_pt_slice = torch.tensor([104, 52, 23, 114, 61, 35, 108, 87, 38], dtype=torch.uint8)
377+
partial_postprocess_return_pt_slice = torch.tensor([77, 86, 89, 49, 60, 75, 52, 65, 78], dtype=torch.uint8)
378+
return_pt_slice = torch.tensor([-0.3945, -0.3289, -0.2993, -0.6177, -0.5259, -0.4119, -0.5898, -0.4863, -0.3845])
379+
380+
381+
class RemoteAutoencoderKLFluxTests(
382+
RemoteAutoencoderKLMixin,
383+
unittest.TestCase,
384+
):
385+
shape = (
386+
1,
387+
16,
388+
128,
389+
128,
390+
)
391+
out_hw = (
392+
1024,
393+
1024,
394+
)
395+
endpoint = "https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/"
396+
dtype = torch.bfloat16
397+
scaling_factor = 0.3611
398+
shift_factor = 0.1159
399+
processor_cls = VaeImageProcessor
400+
output_pt_slice = torch.tensor([110, 72, 91, 62, 35, 52, 69, 55, 69], dtype=torch.uint8)
401+
partial_postprocess_return_pt_slice = torch.tensor(
402+
[202, 203, 203, 197, 195, 193, 189, 188, 178], dtype=torch.uint8
403+
)
404+
return_pt_slice = torch.tensor([0.5820, 0.5962, 0.5898, 0.5439, 0.5327, 0.5112, 0.4797, 0.4773, 0.3984])
405+
406+
407+
class RemoteAutoencoderKLFluxPackedTests(
408+
RemoteAutoencoderKLMixin,
409+
unittest.TestCase,
410+
):
411+
shape = (
412+
1,
413+
4096,
414+
64,
415+
)
416+
out_hw = (
417+
1024,
418+
1024,
419+
)
420+
height = 1024
421+
width = 1024
422+
endpoint = "https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/"
423+
dtype = torch.bfloat16
424+
scaling_factor = 0.3611
425+
shift_factor = 0.1159
426+
processor_cls = VaeImageProcessor
427+
# slices are different due to randn on different shape. we can pack the latent instead if we want the same
428+
output_pt_slice = torch.tensor([96, 116, 157, 45, 67, 104, 34, 56, 89], dtype=torch.uint8)
429+
partial_postprocess_return_pt_slice = torch.tensor(
430+
[168, 212, 202, 155, 191, 185, 150, 180, 168], dtype=torch.uint8
431+
)
432+
return_pt_slice = torch.tensor([0.3198, 0.6631, 0.5864, 0.2131, 0.4944, 0.4482, 0.1776, 0.4153, 0.3176])
433+
434+
435+
class RemoteAutoencoderKLHunyuanVideoTests(
436+
RemoteAutoencoderKLHunyuanVideoMixin,
437+
unittest.TestCase,
438+
):
439+
shape = (
440+
1,
441+
16,
442+
3,
443+
40,
444+
64,
445+
)
446+
out_hw = (
447+
320,
448+
512,
449+
)
450+
endpoint = "https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud/"
451+
dtype = torch.float16
452+
scaling_factor = 0.476986
453+
processor_cls = VideoProcessor
454+
output_pt_slice = torch.tensor([112, 92, 85, 112, 93, 85, 112, 94, 85], dtype=torch.uint8)
455+
partial_postprocess_return_pt_slice = torch.tensor(
456+
[149, 161, 168, 136, 150, 156, 129, 143, 149], dtype=torch.uint8
457+
)
458+
return_pt_slice = torch.tensor([0.1656, 0.2661, 0.3157, 0.0693, 0.1755, 0.2252, 0.0127, 0.1221, 0.1708])

0 commit comments

Comments
 (0)