|
15 | 15 | import unittest
|
16 | 16 |
|
17 | 17 | import numpy as np
|
18 |
| -import pytest |
19 | 18 | import torch
|
20 | 19 | from diffusers import (
|
21 | 20 | AutoPipelineForImage2Image,
|
|
25 | 24 | )
|
26 | 25 | from diffusers.utils import load_image
|
27 | 26 | from parameterized import parameterized
|
28 |
| -from transformers.testing_utils import require_torch_gpu |
29 | 27 | from utils_tests import MODEL_NAMES, SEED
|
30 | 28 |
|
31 | 29 | from optimum.intel.openvino import (
|
|
34 | 32 | OVPipelineForInpainting,
|
35 | 33 | OVPipelineForText2Image,
|
36 | 34 | )
|
37 |
| -from optimum.utils.testing_utils import grid_parameters, require_diffusers |
| 35 | +from optimum.utils.testing_utils import require_diffusers |
38 | 36 |
|
39 | 37 |
|
40 | 38 | def get_generator(framework, seed):
|
@@ -110,16 +108,14 @@ def test_ort_pipeline_class_dispatch(self, model_arch: str):
|
110 | 108 | @require_diffusers
|
111 | 109 | def test_num_images_per_prompt(self, model_arch: str):
|
112 | 110 | pipeline = self.OVMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
|
113 |
| - self.assertEqual(pipeline.vae_scale_factor, 2) |
114 |
| - self.assertEqual(pipeline.vae_decoder.config["latent_channels"], 4) |
115 |
| - self.assertEqual(pipeline.unet.config["in_channels"], 4) |
116 | 111 |
|
117 |
| - height, width, batch_size = 64, 64, 1 |
118 |
| - inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size) |
119 |
| - |
120 |
| - for num_images in [1, 3]: |
121 |
| - outputs = pipeline(**inputs, num_images_per_prompt=num_images).images |
122 |
| - self.assertEqual(outputs.shape, (batch_size * num_images, height, width, 3)) |
| 112 | + for batch_size in [1, 3]: |
| 113 | + for height in [64, 128]: |
| 114 | + for width in [64, 128]: |
| 115 | + for num_images_per_prompt in [1, 3]: |
| 116 | + inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size) |
| 117 | + outputs = pipeline(**inputs, num_images_per_prompt=num_images_per_prompt).images |
| 118 | + self.assertEqual(outputs.shape, (batch_size * num_images_per_prompt, height, width, 3)) |
123 | 119 |
|
124 | 120 | @parameterized.expand(SUPPORTED_ARCHITECTURES)
|
125 | 121 | @require_diffusers
|
@@ -279,16 +275,14 @@ def test_ort_pipeline_class_dispatch(self, model_arch: str):
|
279 | 275 | @require_diffusers
|
280 | 276 | def test_num_images_per_prompt(self, model_arch: str):
|
281 | 277 | pipeline = self.OVMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
|
282 |
| - self.assertEqual(pipeline.vae_scale_factor, 2) |
283 |
| - self.assertEqual(pipeline.vae_decoder.config["latent_channels"], 4) |
284 |
| - self.assertEqual(pipeline.unet.config["in_channels"], 4) |
285 | 278 |
|
286 |
| - batch_size, height = 1, 32 |
287 |
| - for width in [64, 32]: |
288 |
| - inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size) |
289 |
| - for num_images in [1, 3]: |
290 |
| - outputs = pipeline(**inputs, num_images_per_prompt=num_images).images |
291 |
| - self.assertEqual(outputs.shape, (batch_size * num_images, height, width, 3)) |
| 279 | + for batch_size in [1, 3]: |
| 280 | + for height in [64, 128]: |
| 281 | + for width in [64, 128]: |
| 282 | + for num_images_per_prompt in [1, 3]: |
| 283 | + inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size) |
| 284 | + outputs = pipeline(**inputs, num_images_per_prompt=num_images_per_prompt).images |
| 285 | + self.assertEqual(outputs.shape, (batch_size * num_images_per_prompt, height, width, 3)) |
292 | 286 |
|
293 | 287 | @parameterized.expand(SUPPORTED_ARCHITECTURES)
|
294 | 288 | @require_diffusers
|
@@ -420,16 +414,14 @@ def test_ort_pipeline_class_dispatch(self, model_arch: str):
|
420 | 414 | @require_diffusers
|
421 | 415 | def test_num_images_per_prompt(self, model_arch: str):
|
422 | 416 | pipeline = self.OVMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
|
423 |
| - self.assertEqual(pipeline.vae_scale_factor, 2) |
424 |
| - self.assertEqual(pipeline.vae_decoder.config["latent_channels"], 4) |
425 |
| - self.assertEqual(pipeline.unet.config["in_channels"], 4) |
426 |
| - |
427 |
| - batch_size, height = 1, 32 |
428 |
| - for width in [64, 32]: |
429 |
| - inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size) |
430 |
| - for num_images in [1, 3]: |
431 |
| - outputs = pipeline(**inputs, num_images_per_prompt=num_images).images |
432 |
| - self.assertEqual(outputs.shape, (batch_size * num_images, height, width, 3)) |
| 417 | + |
| 418 | + for batch_size in [1, 3]: |
| 419 | + for height in [64, 128]: |
| 420 | + for width in [64, 128]: |
| 421 | + for num_images_per_prompt in [1, 3]: |
| 422 | + inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size) |
| 423 | + outputs = pipeline(**inputs, num_images_per_prompt=num_images_per_prompt).images |
| 424 | + self.assertEqual(outputs.shape, (batch_size * num_images_per_prompt, height, width, 3)) |
433 | 425 |
|
434 | 426 | @parameterized.expand(SUPPORTED_ARCHITECTURES)
|
435 | 427 | @require_diffusers
|
|
0 commit comments