gokaygokay commited on
Commit
e33e382
1 Parent(s): 6baafd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -5
app.py CHANGED
@@ -288,16 +288,17 @@ class PromptGenerator:
288
  components.append(f"by {self.get_choice(kwargs.get('artist', ''), ARTIST)}")
289
  components.append("BREAK_CLIPL")
290
 
291
- if input_image is not None:
292
- caption = florence_caption(input_image)
293
- components.append(f" {caption}")
294
-
295
  prompt = " ".join(components)
296
  prompt = re.sub(" +", " ", prompt)
297
  replaced = prompt.replace("of as", "of")
298
  replaced = self.clean_consecutive_commas(replaced)
299
 
300
  return self.process_string(replaced, seed)
 
 
 
 
 
301
 
302
  class HuggingFaceInferenceNode:
303
  def __init__(self):
@@ -406,8 +407,11 @@ def create_interface():
406
  background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="Background", value="random")
407
  with gr.Column():
408
  input_image = gr.Image(label="Input Image (optional)")
 
 
409
  generate_button = gr.Button("Generate Prompt")
410
  output = gr.Textbox(label="Generated Prompt / Input Text", lines=5)
 
411
  t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True)
412
  clip_l_output = gr.Textbox(label="CLIP L Output", visible=True)
413
  clip_g_output = gr.Textbox(label="CLIP G Output", visible=True)
@@ -424,14 +428,31 @@ def create_interface():
424
  generate_text_button = gr.Button("Generate Text")
425
  text_output = gr.Textbox(label="Generated Text", lines=10)
426
 
 
 
 
 
 
 
 
 
 
 
 
427
  generate_button.click(
428
  prompt_generator.generate_prompt,
429
  inputs=[seed, custom, subject, artform, photo_type, body_types, default_tags, roles, hairstyles,
430
  additional_details, photography_styles, device, photographer, artist, digital_artform,
431
- place, lighting, clothing, composition, pose, background, input_image],
432
  outputs=[output, gr.Number(visible=False), t5xxl_output, clip_l_output, clip_g_output]
433
  )
434
 
 
 
 
 
 
 
435
  generate_text_button.click(
436
  huggingface_node.generate,
437
  inputs=[model, output, happy_talk, compress, compression_level, poster, custom_base_prompt],
 
288
  components.append(f"by {self.get_choice(kwargs.get('artist', ''), ARTIST)}")
289
  components.append("BREAK_CLIPL")
290
 
 
 
 
 
291
  prompt = " ".join(components)
292
  prompt = re.sub(" +", " ", prompt)
293
  replaced = prompt.replace("of as", "of")
294
  replaced = self.clean_consecutive_commas(replaced)
295
 
296
  return self.process_string(replaced, seed)
297
+
298
+ def add_caption_to_prompt(self, prompt, caption):
299
+ if caption:
300
+ return f"{prompt} {caption}"
301
+ return prompt
302
 
303
  class HuggingFaceInferenceNode:
304
  def __init__(self):
 
407
  background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="Background", value="random")
408
  with gr.Column():
409
  input_image = gr.Image(label="Input Image (optional)")
410
+ caption_output = gr.Textbox(label="Generated Caption", lines=3)
411
+ create_caption_button = gr.Button("Create Caption")
412
  generate_button = gr.Button("Generate Prompt")
413
  output = gr.Textbox(label="Generated Prompt / Input Text", lines=5)
414
+ add_caption_button = gr.Button("Add Caption to Prompt")
415
  t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True)
416
  clip_l_output = gr.Textbox(label="CLIP L Output", visible=True)
417
  clip_g_output = gr.Textbox(label="CLIP G Output", visible=True)
 
428
  generate_text_button = gr.Button("Generate Text")
429
  text_output = gr.Textbox(label="Generated Text", lines=10)
430
 
431
+ def create_caption(image):
432
+ if image is not None:
433
+ return florence_caption(image)
434
+ return ""
435
+
436
+ create_caption_button.click(
437
+ create_caption,
438
+ inputs=[input_image],
439
+ outputs=[caption_output]
440
+ )
441
+
442
  generate_button.click(
443
  prompt_generator.generate_prompt,
444
  inputs=[seed, custom, subject, artform, photo_type, body_types, default_tags, roles, hairstyles,
445
  additional_details, photography_styles, device, photographer, artist, digital_artform,
446
+ place, lighting, clothing, composition, pose, background],
447
  outputs=[output, gr.Number(visible=False), t5xxl_output, clip_l_output, clip_g_output]
448
  )
449
 
450
+ add_caption_button.click(
451
+ prompt_generator.add_caption_to_prompt,
452
+ inputs=[output, caption_output],
453
+ outputs=[output]
454
+ )
455
+
456
  generate_text_button.click(
457
  huggingface_node.generate,
458
  inputs=[model, output, happy_talk, compress, compression_level, poster, custom_base_prompt],