Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Tests] Speed up slow tests #1040

Merged
merged 3 commits into from
Oct 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions tests/pipelines/dance_diffusion/test_dance_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def tearDown(self):
def test_dance_diffusion(self):
device = torch_device

pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k")
pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", device_map="auto")
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)

Expand All @@ -103,7 +103,9 @@ def test_dance_diffusion(self):
def test_dance_diffusion_fp16(self):
device = torch_device

pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.float16)
pipe = DanceDiffusionPipeline.from_pretrained(
"harmonai/maestro-150k", torch_dtype=torch.float16, device_map="auto"
)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)

Expand Down
4 changes: 2 additions & 2 deletions tests/pipelines/ddim/test_ddim.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class DDIMPipelineIntegrationTests(unittest.TestCase):
def test_inference_ema_bedroom(self):
model_id = "google/ddpm-ema-bedroom-256"

unet = UNet2DModel.from_pretrained(model_id)
unet = UNet2DModel.from_pretrained(model_id, device_map="auto")
scheduler = DDIMScheduler.from_config(model_id)

ddpm = DDIMPipeline(unet=unet, scheduler=scheduler)
Expand All @@ -97,7 +97,7 @@ def test_inference_ema_bedroom(self):
def test_inference_cifar10(self):
model_id = "google/ddpm-cifar10-32"

unet = UNet2DModel.from_pretrained(model_id)
unet = UNet2DModel.from_pretrained(model_id, device_map="auto")
scheduler = DDIMScheduler()

ddim = DDIMPipeline(unet=unet, scheduler=scheduler)
Expand Down
2 changes: 1 addition & 1 deletion tests/pipelines/ddpm/test_ddpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class DDPMPipelineIntegrationTests(unittest.TestCase):
def test_inference_cifar10(self):
model_id = "google/ddpm-cifar10-32"

unet = UNet2DModel.from_pretrained(model_id)
unet = UNet2DModel.from_pretrained(model_id, device_map="auto")
scheduler = DDPMScheduler.from_config(model_id)

ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
Expand Down
2 changes: 1 addition & 1 deletion tests/pipelines/karras_ve/test_karras_ve.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_inference(self):
class KarrasVePipelineIntegrationTests(unittest.TestCase):
def test_inference(self):
model_id = "google/ncsnpp-celebahq-256"
model = UNet2DModel.from_pretrained(model_id)
model = UNet2DModel.from_pretrained(model_id, device_map="auto")
scheduler = KarrasVeScheduler()

pipe = KarrasVePipeline(unet=model, scheduler=scheduler)
Expand Down
4 changes: 2 additions & 2 deletions tests/pipelines/latent_diffusion/test_latent_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def test_inference_text2img(self):
@require_torch
class LDMTextToImagePipelineIntegrationTests(unittest.TestCase):
def test_inference_text2img(self):
ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256", device_map="auto")
ldm.to(torch_device)
ldm.set_progress_bar_config(disable=None)

Expand All @@ -138,7 +138,7 @@ def test_inference_text2img(self):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

def test_inference_text2img_fast(self):
ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256", device_map="auto")
ldm.to(torch_device)
ldm.set_progress_bar_config(disable=None)

Expand Down
2 changes: 1 addition & 1 deletion tests/pipelines/pndm/test_pndm.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class PNDMPipelineIntegrationTests(unittest.TestCase):
def test_inference_cifar10(self):
model_id = "google/ddpm-cifar10-32"

unet = UNet2DModel.from_pretrained(model_id)
unet = UNet2DModel.from_pretrained(model_id, device_map="auto")
scheduler = PNDMScheduler()

pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
Expand Down
2 changes: 1 addition & 1 deletion tests/pipelines/score_sde_ve/test_score_sde_ve.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def test_inference(self):
class ScoreSdeVePipelineIntegrationTests(unittest.TestCase):
def test_inference(self):
model_id = "google/ncsnpp-church-256"
model = UNet2DModel.from_pretrained(model_id)
model = UNet2DModel.from_pretrained(model_id, device_map="auto")

scheduler = ScoreSdeVeScheduler.from_config(model_id)

Expand Down
25 changes: 15 additions & 10 deletions tests/pipelines/stable_diffusion/test_stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ def tearDown(self):

def test_stable_diffusion(self):
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", device_map="auto")
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)

Expand All @@ -548,7 +548,7 @@ def test_stable_diffusion(self):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

def test_stable_diffusion_fast_ddim(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", device_map="auto")
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)

Expand Down Expand Up @@ -576,7 +576,7 @@ def test_stable_diffusion_fast_ddim(self):

def test_lms_stable_diffusion_pipeline(self):
model_id = "CompVis/stable-diffusion-v1-1"
pipe = StableDiffusionPipeline.from_pretrained(model_id).to(torch_device)
pipe = StableDiffusionPipeline.from_pretrained(model_id, device_map="auto").to(torch_device)
pipe.set_progress_bar_config(disable=None)
scheduler = LMSDiscreteScheduler.from_config(model_id, subfolder="scheduler")
pipe.scheduler = scheduler
Expand All @@ -595,9 +595,10 @@ def test_lms_stable_diffusion_pipeline(self):
def test_stable_diffusion_memory_chunking(self):
torch.cuda.reset_peak_memory_stats()
model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
torch_device
pipe = StableDiffusionPipeline.from_pretrained(
model_id, revision="fp16", torch_dtype=torch.float16, device_map="auto"
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

prompt = "a photograph of an astronaut riding a horse"
Expand Down Expand Up @@ -633,9 +634,10 @@ def test_stable_diffusion_memory_chunking(self):
def test_stable_diffusion_text2img_pipeline_fp16(self):
torch.cuda.reset_peak_memory_stats()
model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
torch_device
pipe = StableDiffusionPipeline.from_pretrained(
model_id, revision="fp16", device_map="auto", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

prompt = "a photograph of an astronaut riding a horse"
Expand Down Expand Up @@ -670,6 +672,7 @@ def test_stable_diffusion_text2img_pipeline(self):
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
safety_checker=None,
device_map="auto",
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down Expand Up @@ -711,7 +714,7 @@ def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> No
test_callback_fn.has_been_called = False

pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, device_map="auto"
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand All @@ -737,7 +740,7 @@ def test_stable_diffusion_accelerate_auto_device(self):

start_time = time.time()
pipeline_normal_load = StableDiffusionPipeline.from_pretrained(
pipeline_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True
pipeline_id, revision="fp16", torch_dtype=torch.float16, device_map="auto"
)
pipeline_normal_load.to(torch_device)
normal_load_time = time.time() - start_time
Expand All @@ -758,7 +761,9 @@ def test_stable_diffusion_pipeline_with_unet_on_gpu_only(self):
pipeline_id = "CompVis/stable-diffusion-v1-4"
prompt = "Andromeda galaxy in a bottle"

pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, revision="fp16", torch_dtype=torch.float16)
pipeline = StableDiffusionPipeline.from_pretrained(
pipeline_id, revision="fp16", torch_dtype=torch.float16, device_map="auto"
)
pipeline.enable_attention_slicing(1)
pipeline.enable_sequential_cpu_offload()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,7 @@ def test_stable_diffusion_img2img_pipeline(self):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
model_id,
safety_checker=None,
device_map="auto",
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down Expand Up @@ -529,6 +530,7 @@ def test_stable_diffusion_img2img_pipeline_k_lms(self):
model_id,
scheduler=lms,
safety_checker=None,
device_map="auto",
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down Expand Up @@ -580,7 +582,7 @@ def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> No
init_image = init_image.resize((768, 512))

pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, device_map="auto"
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@ def test_stable_diffusion_inpaint_pipeline(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id,
safety_checker=None,
device_map="auto",
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down Expand Up @@ -329,6 +330,7 @@ def test_stable_diffusion_inpaint_pipeline_fp16(self):
revision="fp16",
torch_dtype=torch.float16,
safety_checker=None,
device_map="auto",
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down Expand Up @@ -366,7 +368,9 @@ def test_stable_diffusion_inpaint_pipeline_pndm(self):

pndm = PNDMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True)
model_id = "runwayml/stable-diffusion-inpainting"
pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None, scheduler=pndm)
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id, safety_checker=None, scheduler=pndm, device_map="auto"
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,7 @@ def test_stable_diffusion_inpaint_legacy_pipeline(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id,
safety_checker=None,
device_map="auto",
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down Expand Up @@ -413,6 +414,7 @@ def test_stable_diffusion_inpaint_legacy_pipeline_k_lms(self):
model_id,
scheduler=lms,
safety_checker=None,
device_map="auto",
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down Expand Up @@ -469,7 +471,7 @@ def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> No
)

pipe = StableDiffusionInpaintPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, device_map="auto"
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
Expand Down
33 changes: 20 additions & 13 deletions tests/test_pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ def test_local_custom_pipeline(self):
def test_load_pipeline_from_git(self):
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"

feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id)
clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id, device_map="auto")
clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16, device_map="auto")

pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
Expand All @@ -118,6 +118,7 @@ def test_load_pipeline_from_git(self):
feature_extractor=feature_extractor,
torch_dtype=torch.float16,
revision="fp16",
device_map="auto",
)
pipeline.enable_attention_slicing()
pipeline = pipeline.to(torch_device)
Expand Down Expand Up @@ -312,7 +313,9 @@ def tearDown(self):
def test_smart_download(self):
model_id = "hf-internal-testing/unet-pipeline-dummy"
with tempfile.TemporaryDirectory() as tmpdirname:
_ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True)
_ = DiffusionPipeline.from_pretrained(
model_id, cache_dir=tmpdirname, force_download=True, device_map="auto"
)
local_repo_name = "--".join(["models"] + model_id.split("/"))
snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots")
snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0])
Expand All @@ -335,7 +338,9 @@ def test_warning_unused_kwargs(self):
logger = logging.get_logger("diffusers.pipeline_utils")
with tempfile.TemporaryDirectory() as tmpdirname:
with CaptureLogger(logger) as cap_logger:
DiffusionPipeline.from_pretrained(model_id, not_used=True, cache_dir=tmpdirname, force_download=True)
DiffusionPipeline.from_pretrained(
model_id, not_used=True, cache_dir=tmpdirname, force_download=True, device_map="auto"
)

assert cap_logger.out == "Keyword arguments {'not_used': True} not recognized.\n"

Expand All @@ -358,7 +363,7 @@ def test_from_pretrained_save_pretrained(self):

with tempfile.TemporaryDirectory() as tmpdirname:
ddpm.save_pretrained(tmpdirname)
new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
new_ddpm = DDPMPipeline.from_pretrained(tmpdirname, device_map="auto")
new_ddpm.to(torch_device)

generator = torch.manual_seed(0)
Expand All @@ -374,10 +379,10 @@ def test_from_pretrained_hub(self):

scheduler = DDPMScheduler(num_train_timesteps=10)

ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler)
ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler, device_map="auto")
ddpm.to(torch_device)
ddpm.set_progress_bar_config(disable=None)
ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, device_map="auto")
ddpm_from_hub.to(torch_device)
ddpm_from_hub.set_progress_bar_config(disable=None)

Expand All @@ -395,12 +400,14 @@ def test_from_pretrained_hub_pass_model(self):
scheduler = DDPMScheduler(num_train_timesteps=10)

# pass unet into DiffusionPipeline
unet = UNet2DModel.from_pretrained(model_path)
ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler)
unet = UNet2DModel.from_pretrained(model_path, device_map="auto")
ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(
model_path, unet=unet, scheduler=scheduler, device_map="auto"
)
ddpm_from_hub_custom_model.to(torch_device)
ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)

ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, device_map="auto")
ddpm_from_hub.to(torch_device)
ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)

Expand All @@ -415,7 +422,7 @@ def test_from_pretrained_hub_pass_model(self):
def test_output_format(self):
model_path = "google/ddpm-cifar10-32"

pipe = DDIMPipeline.from_pretrained(model_path)
pipe = DDIMPipeline.from_pretrained(model_path, device_map="auto")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

Expand All @@ -437,7 +444,7 @@ def test_output_format(self):
def test_ddpm_ddim_equality(self):
model_id = "google/ddpm-cifar10-32"

unet = UNet2DModel.from_pretrained(model_id)
unet = UNet2DModel.from_pretrained(model_id, device_map="auto")
ddpm_scheduler = DDPMScheduler()
ddim_scheduler = DDIMScheduler()

Expand All @@ -461,7 +468,7 @@ def test_ddpm_ddim_equality(self):
def test_ddpm_ddim_equality_batched(self):
model_id = "google/ddpm-cifar10-32"

unet = UNet2DModel.from_pretrained(model_id)
unet = UNet2DModel.from_pretrained(model_id, device_map="auto")
ddpm_scheduler = DDPMScheduler()
ddim_scheduler = DDIMScheduler()

Expand Down