KingNish commited on
Commit
ed2d2b6
1 Parent(s): e471a41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -13,13 +13,12 @@ from huggingface_hub import InferenceClient
13
 
14
 
15
  help_text = """
16
- To optimize image editing results:
17
  - Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
18
  - Modify the **Text CFG weight** to influence how closely the edit follows text instructions. Increase it to adhere more to the text, or decrease it for subtler changes.
19
  - Experiment with different **random seeds** and **CFG values** for varied outcomes.
20
  - **Rephrase your instructions** for potentially better results.
21
  - **Increase the number of steps** for enhanced edits.
22
- - For better facial details, especially if they're small, **crop the image** to enlarge the face's presence.
23
  """
24
 
25
  def set_timesteps_patched(self, num_inference_steps: int, device = None):
@@ -36,7 +35,7 @@ def set_timesteps_patched(self, num_inference_steps: int, device = None):
36
  self._begin_index = None
37
  self.sigmas = self.sigmas.to("cpu")
38
 
39
-
40
  edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
41
  normal_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl.safetensors")
42
 
@@ -57,32 +56,36 @@ if not torch.cuda.is_available():
57
 
58
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
59
 
 
 
60
  if torch.cuda.is_available():
61
  pipe = StableDiffusionXLPipeline.from_pretrained(
62
- "sd-community/sdxl-flash",
63
  torch_dtype=torch.float16,
64
  use_safetensors=True,
65
- add_watermarker=False
66
  )
67
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
 
68
 
69
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
70
  if randomize_seed:
71
  seed = random.randint(0, 999999)
72
  return seed
73
 
 
74
  @spaces.GPU(duration=30, queue=False)
75
  def king(type = "Image Generation",
76
  input_image = None,
77
  instruction: str = "Eiffel tower",
78
  steps: int = 8,
79
  randomize_seed: bool = False,
80
- seed: int = 24,
81
  text_cfg_scale: float = 7.3,
82
  image_cfg_scale: float = 1.7,
83
  width: int = 1024,
84
  height: int = 1024,
85
- guidance_scale: float = 3,
86
  use_resolution_binning: bool = True,
87
  progress=gr.Progress(track_tqdm=True),
88
  ):
@@ -92,7 +95,7 @@ def king(type = "Image Generation",
92
  image_cfg_scale = image_cfg_scale
93
  input_image = input_image
94
 
95
- steps=steps*3
96
  generator = torch.manual_seed(seed)
97
  output_image = pipe_edit(
98
  instruction, image=input_image,
@@ -118,8 +121,7 @@ def king(type = "Image Generation",
118
  output_image = pipe(**options).images[0]
119
  return seed, output_image
120
 
121
-
122
-
123
  def response(instruction, input_image=None):
124
  if input_image is None:
125
  output="Image Generation"
@@ -218,7 +220,7 @@ with gr.Blocks(css=css) as demo:
218
  inputs=[type,input_image, instruction],
219
  fn=king,
220
  outputs=[input_image],
221
- cache_examples=False,
222
  )
223
 
224
  gr.Markdown(help_text)
 
13
 
14
 
15
  help_text = """
16
+ To optimize image results:
17
  - Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
18
  - Modify the **Text CFG weight** to influence how closely the edit follows text instructions. Increase it to adhere more to the text, or decrease it for subtler changes.
19
  - Experiment with different **random seeds** and **CFG values** for varied outcomes.
20
  - **Rephrase your instructions** for potentially better results.
21
  - **Increase the number of steps** for enhanced edits.
 
22
  """
23
 
24
  def set_timesteps_patched(self, num_inference_steps: int, device = None):
 
35
  self._begin_index = None
36
  self.sigmas = self.sigmas.to("cpu")
37
 
38
+ # Image Editor
39
  edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
40
  normal_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl.safetensors")
41
 
 
56
 
57
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
58
 
59
+
60
+ # Image Generator
61
  if torch.cuda.is_available():
62
  pipe = StableDiffusionXLPipeline.from_pretrained(
63
+ "fluently/Fluently-XL-v4",
64
  torch_dtype=torch.float16,
65
  use_safetensors=True,
 
66
  )
67
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
68
+ pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
69
+ pipe.set_adapters("dalle")
70
 
71
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
72
  if randomize_seed:
73
  seed = random.randint(0, 999999)
74
  return seed
75
 
76
+ # Generator
77
  @spaces.GPU(duration=30, queue=False)
78
  def king(type = "Image Generation",
79
  input_image = None,
80
  instruction: str = "Eiffel tower",
81
  steps: int = 8,
82
  randomize_seed: bool = False,
83
+ seed: int = 25,
84
  text_cfg_scale: float = 7.3,
85
  image_cfg_scale: float = 1.7,
86
  width: int = 1024,
87
  height: int = 1024,
88
+ guidance_scale: float = 6.2,
89
  use_resolution_binning: bool = True,
90
  progress=gr.Progress(track_tqdm=True),
91
  ):
 
95
  image_cfg_scale = image_cfg_scale
96
  input_image = input_image
97
 
98
+ steps=steps
99
  generator = torch.manual_seed(seed)
100
  output_image = pipe_edit(
101
  instruction, image=input_image,
 
121
  output_image = pipe(**options).images[0]
122
  return seed, output_image
123
 
124
+ # Prompt classifier
 
125
  def response(instruction, input_image=None):
126
  if input_image is None:
127
  output="Image Generation"
 
220
  inputs=[type,input_image, instruction],
221
  fn=king,
222
  outputs=[input_image],
223
+ cache_examples=True,
224
  )
225
 
226
  gr.Markdown(help_text)