KingNish commited on
Commit
a6abdc9
1 Parent(s): 0a795c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -33
app.py CHANGED
@@ -18,6 +18,11 @@ pipe.to("cuda")
18
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
19
  refiner.to("cuda")
20
 
 
 
 
 
 
21
  help_text = """
22
  To optimize image results:
23
  - Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
@@ -55,17 +60,18 @@ def king(type ,
55
  instruction: str ,
56
  steps: int = 25,
57
  randomize_seed: bool = False,
58
- seed: int = 25,
59
  width: int = 1024,
60
  height: int = 1024,
61
  guidance_scale: float = 7,
 
62
  use_resolution_binning: bool = True,
63
  progress=gr.Progress(track_tqdm=True),
64
  ):
65
  if type=="Image Editing" :
66
  raw_image = Image.open(input_image).convert('RGB')
67
  if randomize_seed:
68
- seed = random.randint(0, 99999)
69
  generator = torch.manual_seed(seed)
70
  output_image = pipe_edit(
71
  instruction, image=raw_image,
@@ -82,24 +88,23 @@ def king(type ,
82
  return seed, refine
83
  else :
84
  if randomize_seed:
85
- seed = random.randint(0, 99999)
86
  generator = torch.Generator().manual_seed(seed)
87
- image = pipe(
88
- prompt = instruction,
 
 
 
89
  guidance_scale = guidance_scale,
90
  num_inference_steps = steps,
91
- width = (width),
92
- height = (height),
93
- generator = generator,
94
- output_type="latent",
95
  ).images
96
 
97
- refine = refiner(
98
- prompt=instruction,
99
  guidance_scale=guidance_scale,
100
  num_inference_steps=steps,
101
- image=image,
102
- generator=generator,
103
  ).images[0]
104
  return seed, refine
105
 
@@ -165,37 +170,49 @@ examples=[
165
  ],
166
  ]
167
 
168
- with gr.Blocks(css=css) as demo:
169
  gr.Markdown("# Image Generator Pro")
170
  with gr.Row():
171
- instruction = gr.Textbox(lines=1, label="Instruction", interactive=True)
172
- with gr.Row():
173
- with gr.Column(scale=1):
174
- type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True)
175
- enhance_prompt = gr.Checkbox(label="Enhance prompt", value = True)
176
  with gr.Column(scale=1):
177
  generate_button = gr.Button("Generate")
 
 
 
 
178
 
179
  with gr.Row():
180
  input_image = gr.Image(label="Image", type='filepath', interactive=True)
181
 
182
  with gr.Row():
183
- guidance_scale = gr.Number(value=6.0, step=0.1, label="Image Generation Guidance Scale", interactive=True)
184
  steps = gr.Number(value=25, step=1, label="Steps", interactive=True)
185
 
186
- with gr.Row():
187
- width = gr.Slider( label="Width", minimum=256, maximum=2048, step=64, value=1024)
188
- height = gr.Slider( label="Height", minimum=256, maximum=2048, step=64, value=1024)
189
-
190
- with gr.Row():
191
- randomize_seed = gr.Radio(
192
- ["Fix Seed", "Randomize Seed"],
193
- value="Randomize Seed",
194
- type="index",
195
- show_label=False,
196
- interactive=True,
197
- )
198
- seed = gr.Number(value=1371, step=1, label="Seed", interactive=True)
 
 
 
 
 
 
 
 
 
 
 
199
 
200
 
201
 
@@ -227,6 +244,7 @@ with gr.Blocks(css=css) as demo:
227
  width,
228
  height,
229
  guidance_scale,
 
230
  ],
231
  outputs=[seed, input_image],
232
  )
 
18
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
19
  refiner.to("cuda")
20
 
21
+ pipe_fast = StableDiffusionXLPipeline.from_pretrained( "SG161222/RealVisXL_V4.0_Lightning", torch_dtype=torch.float16, vae=vae)
22
+ pipe_fast.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
23
+ pipe_fast.set_adapters("lora")
24
+ pipe_fast.to("cuda")
25
+
26
  help_text = """
27
  To optimize image results:
28
  - Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
 
60
  instruction: str ,
61
  steps: int = 25,
62
  randomize_seed: bool = False,
63
+ seed: int = 2404,
64
  width: int = 1024,
65
  height: int = 1024,
66
  guidance_scale: float = 7,
67
+ fast=True,
68
  use_resolution_binning: bool = True,
69
  progress=gr.Progress(track_tqdm=True),
70
  ):
71
  if type=="Image Editing" :
72
  raw_image = Image.open(input_image).convert('RGB')
73
  if randomize_seed:
74
+ seed = random.randint(0, 999999)
75
  generator = torch.manual_seed(seed)
76
  output_image = pipe_edit(
77
  instruction, image=raw_image,
 
88
  return seed, refine
89
  else :
90
  if randomize_seed:
91
+ seed = random.randint(0, 999999)
92
  generator = torch.Generator().manual_seed(seed)
93
+ if fast:
94
+ pipes=pipe_fast
95
+ else:
96
+ pipes=pipe
97
+ image = pipes( prompt = instruction,
98
  guidance_scale = guidance_scale,
99
  num_inference_steps = steps,
100
+ width = width, height = height,
101
+ generator = generator, output_type="latent",
 
 
102
  ).images
103
 
104
+ refine = refiner( prompt=instruction,
 
105
  guidance_scale=guidance_scale,
106
  num_inference_steps=steps,
107
+ image=image, generator=generator,
 
108
  ).images[0]
109
  return seed, refine
110
 
 
170
  ],
171
  ]
172
 
173
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
174
  gr.Markdown("# Image Generator Pro")
175
  with gr.Row():
176
+ with gr.Column(scale=2):
177
+ instruction = gr.Textbox(lines=1, label="Instruction", interactive=True)
 
 
 
178
  with gr.Column(scale=1):
179
  generate_button = gr.Button("Generate")
180
+ with gr.Row():
181
+ type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True)
182
+ enhance_prompt = gr.Checkbox(label="Enhance prompt", value = True)
183
+ fast = gr.Checkbox(label="FAST Generation")
184
 
185
  with gr.Row():
186
  input_image = gr.Image(label="Image", type='filepath', interactive=True)
187
 
188
  with gr.Row():
189
+ guidance_scale = gr.Number(value=6.0, step=0.1, label="Guidance Scale", interactive=True)
190
  steps = gr.Number(value=25, step=1, label="Steps", interactive=True)
191
 
192
+ with gr.Accordion("Advanced options", open=False):
193
+ with gr.Row():
194
+ with gr.Column(scale=1):
195
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True, visible=True)
196
+ with gr.Column(scale=2):
197
+ negative_prompt = gr.Text(
198
+ label="Negative prompt",
199
+ max_lines=1,
200
+ placeholder="Enter a negative prompt",
201
+ value="(deformed iris, deformed pupils, semi-realistic, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, extra fingers, poorly drawn hands, poorly drawn face, deformed, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, ugly, disgusting,",
202
+ visible=True,
203
+ )
204
+ with gr.Row():
205
+ width = gr.Slider( label="Width", minimum=256, maximum=2048, step=64, value=1024)
206
+ height = gr.Slider( label="Height", minimum=256, maximum=2048, step=64, value=1024)
207
+ with gr.Row():
208
+ randomize_seed = gr.Radio(
209
+ ["Fix Seed", "Randomize Seed"],
210
+ value="Randomize Seed",
211
+ type="index",
212
+ show_label=False,
213
+ interactive=True,
214
+ )
215
+ seed = gr.Number(value=1371, step=1, label="Seed", interactive=True)
216
 
217
 
218
 
 
244
  width,
245
  height,
246
  guidance_scale,
247
+ fast,
248
  ],
249
  outputs=[seed, input_image],
250
  )