|
from typing import List, Optional, Union |
|
|
|
import torch |
|
from diffusers import PixArtAlphaPipeline |
|
from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha import \ |
|
retrieve_timesteps |
|
|
|
|
|
def freeze_params(params): |
|
for param in params: |
|
param.requires_grad = False |
|
|
|
|
|
class RewardPixartPipeline(PixArtAlphaPipeline): |
|
def __init__( |
|
self, tokenizer, text_encoder, transformer, scheduler, vae, memsave=False |
|
): |
|
super().__init__( |
|
tokenizer, |
|
text_encoder, |
|
vae, |
|
transformer, |
|
scheduler, |
|
) |
|
|
|
if memsave: |
|
import memsave_torch.nn |
|
|
|
self.vae = memsave_torch.nn.convert_to_memory_saving(self.vae) |
|
self.text_encoder = memsave_torch.nn.convert_to_memory_saving( |
|
self.text_encoder |
|
) |
|
self.text_encoder.gradient_checkpointing_enable() |
|
self.vae.enable_gradient_checkpointing() |
|
self.text_encoder.eval() |
|
self.vae.eval() |
|
freeze_params(self.vae.parameters()) |
|
freeze_params(self.text_encoder.parameters()) |
|
|
|
def apply( |
|
self, |
|
latents: torch.Tensor = None, |
|
prompt: Union[str, List[str]] = None, |
|
negative_prompt: str = "", |
|
num_inference_steps: int = 20, |
|
timesteps: List[int] = [400], |
|
sigmas: List[float] = None, |
|
guidance_scale: float = 1.0, |
|
num_images_per_prompt: Optional[int] = 1, |
|
height: Optional[int] = 512, |
|
width: Optional[int] = 512, |
|
eta: float = 0.0, |
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
prompt_attention_mask: Optional[torch.FloatTensor] = None, |
|
negative_prompt_embeds: Optional[torch.FloatTensor] = None, |
|
negative_prompt_attention_mask: Optional[torch.FloatTensor] = None, |
|
callback_steps: int = 1, |
|
clean_caption: bool = False, |
|
use_resolution_binning: bool = True, |
|
max_sequence_length: int = 120, |
|
**kwargs, |
|
): |
|
|
|
height = height or self.transformer.config.sample_size * self.vae_scale_factor |
|
width = width or self.transformer.config.sample_size * self.vae_scale_factor |
|
if use_resolution_binning: |
|
if self.transformer.config.sample_size == 128: |
|
aspect_ratio_bin = ASPECT_RATIO_1024_BIN |
|
elif self.transformer.config.sample_size == 64: |
|
aspect_ratio_bin = ASPECT_RATIO_512_BIN |
|
elif self.transformer.config.sample_size == 32: |
|
aspect_ratio_bin = ASPECT_RATIO_256_BIN |
|
else: |
|
raise ValueError("Invalid sample size") |
|
orig_height, orig_width = height, width |
|
height, width = self.image_processor.classify_height_width_bin( |
|
height, width, ratios=aspect_ratio_bin |
|
) |
|
|
|
self.check_inputs( |
|
prompt, |
|
height, |
|
width, |
|
negative_prompt, |
|
callback_steps, |
|
prompt_embeds, |
|
negative_prompt_embeds, |
|
prompt_attention_mask, |
|
negative_prompt_attention_mask, |
|
) |
|
|
|
|
|
if prompt is not None and isinstance(prompt, str): |
|
batch_size = 1 |
|
elif prompt is not None and isinstance(prompt, list): |
|
batch_size = len(prompt) |
|
else: |
|
batch_size = prompt_embeds.shape[0] |
|
|
|
device = self._execution_device |
|
|
|
|
|
|
|
|
|
do_classifier_free_guidance = guidance_scale > 1.0 |
|
|
|
|
|
( |
|
prompt_embeds, |
|
prompt_attention_mask, |
|
negative_prompt_embeds, |
|
negative_prompt_attention_mask, |
|
) = self.encode_prompt( |
|
prompt, |
|
do_classifier_free_guidance, |
|
negative_prompt=negative_prompt, |
|
num_images_per_prompt=num_images_per_prompt, |
|
device=device, |
|
prompt_embeds=prompt_embeds, |
|
negative_prompt_embeds=negative_prompt_embeds, |
|
prompt_attention_mask=prompt_attention_mask, |
|
negative_prompt_attention_mask=negative_prompt_attention_mask, |
|
clean_caption=clean_caption, |
|
max_sequence_length=max_sequence_length, |
|
) |
|
if do_classifier_free_guidance: |
|
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) |
|
prompt_attention_mask = torch.cat( |
|
[negative_prompt_attention_mask, prompt_attention_mask], dim=0 |
|
) |
|
|
|
|
|
timesteps, num_inference_steps = retrieve_timesteps( |
|
self.scheduler, num_inference_steps, device, timesteps, sigmas |
|
) |
|
|
|
|
|
latent_channels = self.transformer.config.in_channels |
|
latents = self.prepare_latents( |
|
batch_size * num_images_per_prompt, |
|
latent_channels, |
|
height, |
|
width, |
|
prompt_embeds.dtype, |
|
device, |
|
generator, |
|
latents, |
|
) |
|
|
|
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
|
|
|
|
|
added_cond_kwargs = {"resolution": None, "aspect_ratio": None} |
|
if self.transformer.config.sample_size == 128: |
|
resolution = torch.tensor([height, width]).repeat( |
|
batch_size * num_images_per_prompt, 1 |
|
) |
|
aspect_ratio = torch.tensor([float(height / width)]).repeat( |
|
batch_size * num_images_per_prompt, 1 |
|
) |
|
resolution = resolution.to(dtype=prompt_embeds.dtype, device=device) |
|
aspect_ratio = aspect_ratio.to(dtype=prompt_embeds.dtype, device=device) |
|
|
|
if do_classifier_free_guidance: |
|
resolution = torch.cat([resolution, resolution], dim=0) |
|
aspect_ratio = torch.cat([aspect_ratio, aspect_ratio], dim=0) |
|
|
|
added_cond_kwargs = {"resolution": resolution, "aspect_ratio": aspect_ratio} |
|
|
|
|
|
num_warmup_steps = max( |
|
len(timesteps) - num_inference_steps * self.scheduler.order, 0 |
|
) |
|
|
|
for i, t in enumerate(timesteps): |
|
latent_model_input = ( |
|
torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
|
) |
|
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
|
|
|
current_timestep = t |
|
if not torch.is_tensor(current_timestep): |
|
|
|
|
|
is_mps = latent_model_input.device.type == "mps" |
|
if isinstance(current_timestep, float): |
|
dtype = torch.float32 if is_mps else torch.float64 |
|
else: |
|
dtype = torch.int32 if is_mps else torch.int64 |
|
current_timestep = torch.tensor( |
|
[current_timestep], dtype=dtype, device=latent_model_input.device |
|
) |
|
elif len(current_timestep.shape) == 0: |
|
current_timestep = current_timestep[None].to(latent_model_input.device) |
|
|
|
current_timestep = current_timestep.expand(latent_model_input.shape[0]) |
|
|
|
|
|
noise_pred = self.transformer( |
|
latent_model_input, |
|
encoder_hidden_states=prompt_embeds, |
|
encoder_attention_mask=prompt_attention_mask, |
|
timestep=current_timestep, |
|
added_cond_kwargs=added_cond_kwargs, |
|
return_dict=False, |
|
)[0] |
|
|
|
|
|
if do_classifier_free_guidance: |
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * ( |
|
noise_pred_text - noise_pred_uncond |
|
) |
|
|
|
|
|
if self.transformer.config.out_channels // 2 == latent_channels: |
|
noise_pred = noise_pred.chunk(2, dim=1)[0] |
|
else: |
|
noise_pred = noise_pred |
|
|
|
|
|
if num_inference_steps == 1: |
|
|
|
latents = self.scheduler.step( |
|
noise_pred, t, latents, **extra_step_kwargs |
|
).pred_original_sample |
|
|
|
image = self.vae.decode( |
|
latents / self.vae.config.scaling_factor, return_dict=False |
|
)[0] |
|
if use_resolution_binning: |
|
image = self.image_processor.resize_and_crop_tensor( |
|
image, orig_width, orig_height |
|
) |
|
|
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
|
|
|
|
self.maybe_free_model_hooks() |
|
return image |
|
|
|
|
|
ASPECT_RATIO_2048_BIN = { |
|
"0.25": [1024.0, 4096.0], |
|
"0.26": [1024.0, 3968.0], |
|
"0.27": [1024.0, 3840.0], |
|
"0.28": [1024.0, 3712.0], |
|
"0.32": [1152.0, 3584.0], |
|
"0.33": [1152.0, 3456.0], |
|
"0.35": [1152.0, 3328.0], |
|
"0.4": [1280.0, 3200.0], |
|
"0.42": [1280.0, 3072.0], |
|
"0.48": [1408.0, 2944.0], |
|
"0.5": [1408.0, 2816.0], |
|
"0.52": [1408.0, 2688.0], |
|
"0.57": [1536.0, 2688.0], |
|
"0.6": [1536.0, 2560.0], |
|
"0.68": [1664.0, 2432.0], |
|
"0.72": [1664.0, 2304.0], |
|
"0.78": [1792.0, 2304.0], |
|
"0.82": [1792.0, 2176.0], |
|
"0.88": [1920.0, 2176.0], |
|
"0.94": [1920.0, 2048.0], |
|
"1.0": [2048.0, 2048.0], |
|
"1.07": [2048.0, 1920.0], |
|
"1.13": [2176.0, 1920.0], |
|
"1.21": [2176.0, 1792.0], |
|
"1.29": [2304.0, 1792.0], |
|
"1.38": [2304.0, 1664.0], |
|
"1.46": [2432.0, 1664.0], |
|
"1.67": [2560.0, 1536.0], |
|
"1.75": [2688.0, 1536.0], |
|
"2.0": [2816.0, 1408.0], |
|
"2.09": [2944.0, 1408.0], |
|
"2.4": [3072.0, 1280.0], |
|
"2.5": [3200.0, 1280.0], |
|
"2.89": [3328.0, 1152.0], |
|
"3.0": [3456.0, 1152.0], |
|
"3.11": [3584.0, 1152.0], |
|
"3.62": [3712.0, 1024.0], |
|
"3.75": [3840.0, 1024.0], |
|
"3.88": [3968.0, 1024.0], |
|
"4.0": [4096.0, 1024.0], |
|
} |
|
|
|
ASPECT_RATIO_256_BIN = { |
|
"0.25": [128.0, 512.0], |
|
"0.28": [128.0, 464.0], |
|
"0.32": [144.0, 448.0], |
|
"0.33": [144.0, 432.0], |
|
"0.35": [144.0, 416.0], |
|
"0.4": [160.0, 400.0], |
|
"0.42": [160.0, 384.0], |
|
"0.48": [176.0, 368.0], |
|
"0.5": [176.0, 352.0], |
|
"0.52": [176.0, 336.0], |
|
"0.57": [192.0, 336.0], |
|
"0.6": [192.0, 320.0], |
|
"0.68": [208.0, 304.0], |
|
"0.72": [208.0, 288.0], |
|
"0.78": [224.0, 288.0], |
|
"0.82": [224.0, 272.0], |
|
"0.88": [240.0, 272.0], |
|
"0.94": [240.0, 256.0], |
|
"1.0": [256.0, 256.0], |
|
"1.07": [256.0, 240.0], |
|
"1.13": [272.0, 240.0], |
|
"1.21": [272.0, 224.0], |
|
"1.29": [288.0, 224.0], |
|
"1.38": [288.0, 208.0], |
|
"1.46": [304.0, 208.0], |
|
"1.67": [320.0, 192.0], |
|
"1.75": [336.0, 192.0], |
|
"2.0": [352.0, 176.0], |
|
"2.09": [368.0, 176.0], |
|
"2.4": [384.0, 160.0], |
|
"2.5": [400.0, 160.0], |
|
"3.0": [432.0, 144.0], |
|
"4.0": [512.0, 128.0], |
|
} |
|
|
|
ASPECT_RATIO_1024_BIN = { |
|
"0.25": [512.0, 2048.0], |
|
"0.28": [512.0, 1856.0], |
|
"0.32": [576.0, 1792.0], |
|
"0.33": [576.0, 1728.0], |
|
"0.35": [576.0, 1664.0], |
|
"0.4": [640.0, 1600.0], |
|
"0.42": [640.0, 1536.0], |
|
"0.48": [704.0, 1472.0], |
|
"0.5": [704.0, 1408.0], |
|
"0.52": [704.0, 1344.0], |
|
"0.57": [768.0, 1344.0], |
|
"0.6": [768.0, 1280.0], |
|
"0.68": [832.0, 1216.0], |
|
"0.72": [832.0, 1152.0], |
|
"0.78": [896.0, 1152.0], |
|
"0.82": [896.0, 1088.0], |
|
"0.88": [960.0, 1088.0], |
|
"0.94": [960.0, 1024.0], |
|
"1.0": [1024.0, 1024.0], |
|
"1.07": [1024.0, 960.0], |
|
"1.13": [1088.0, 960.0], |
|
"1.21": [1088.0, 896.0], |
|
"1.29": [1152.0, 896.0], |
|
"1.38": [1152.0, 832.0], |
|
"1.46": [1216.0, 832.0], |
|
"1.67": [1280.0, 768.0], |
|
"1.75": [1344.0, 768.0], |
|
"2.0": [1408.0, 704.0], |
|
"2.09": [1472.0, 704.0], |
|
"2.4": [1536.0, 640.0], |
|
"2.5": [1600.0, 640.0], |
|
"3.0": [1728.0, 576.0], |
|
"4.0": [2048.0, 512.0], |
|
} |
|
|
|
ASPECT_RATIO_512_BIN = { |
|
"0.25": [256.0, 1024.0], |
|
"0.28": [256.0, 928.0], |
|
"0.32": [288.0, 896.0], |
|
"0.33": [288.0, 864.0], |
|
"0.35": [288.0, 832.0], |
|
"0.4": [320.0, 800.0], |
|
"0.42": [320.0, 768.0], |
|
"0.48": [352.0, 736.0], |
|
"0.5": [352.0, 704.0], |
|
"0.52": [352.0, 672.0], |
|
"0.57": [384.0, 672.0], |
|
"0.6": [384.0, 640.0], |
|
"0.68": [416.0, 608.0], |
|
"0.72": [416.0, 576.0], |
|
"0.78": [448.0, 576.0], |
|
"0.82": [448.0, 544.0], |
|
"0.88": [480.0, 544.0], |
|
"0.94": [480.0, 512.0], |
|
"1.0": [512.0, 512.0], |
|
"1.07": [512.0, 480.0], |
|
"1.13": [544.0, 480.0], |
|
"1.21": [544.0, 448.0], |
|
"1.29": [576.0, 448.0], |
|
"1.38": [576.0, 416.0], |
|
"1.46": [608.0, 416.0], |
|
"1.67": [640.0, 384.0], |
|
"1.75": [672.0, 384.0], |
|
"2.0": [704.0, 352.0], |
|
"2.09": [736.0, 352.0], |
|
"2.4": [768.0, 320.0], |
|
"2.5": [800.0, 320.0], |
|
"3.0": [864.0, 288.0], |
|
"4.0": [1024.0, 256.0], |
|
} |
|
|