BennoKrojer commited on
Commit
5d8cce0
1 Parent(s): 4a973c3

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -40
app.py CHANGED
@@ -1,35 +1,14 @@
1
  from __future__ import annotations
2
  import math
3
- import random
4
  import gradio as gr
5
  import torch
6
  from PIL import Image, ImageOps
7
  from diffusers import StableDiffusionInstructPix2PixPipeline
8
 
9
- example_instructions = [
10
- "move the lemon to the right of the table"
11
- ]
12
-
13
  def main():
14
  pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("McGill-NLP/AURORA", safety_checker=None).to("cuda")
15
  example_image = Image.open("example.jpg").convert("RGB")
16
 
17
- def load_example(
18
- steps: int,
19
- seed: int,
20
- text_cfg_scale: float,
21
- image_cfg_scale: float,
22
- ):
23
- example_instruction = random.choice(example_instructions)
24
- return [example_image, example_instruction] + generate(
25
- example_image,
26
- example_instruction,
27
- steps,
28
- seed,
29
- text_cfg_scale,
30
- image_cfg_scale,
31
- )
32
-
33
  def generate(
34
  input_image: Image.Image,
35
  instruction: str,
@@ -57,28 +36,26 @@ def main():
57
  return [seed, text_cfg_scale, image_cfg_scale, edited_image]
58
 
59
  def reset():
60
- return [50, 42, 7.5, 1.5, None]
61
 
62
  with gr.Blocks() as demo:
63
  gr.HTML("""<h1 style="font-weight: 900; margin-bottom: 10px;">
64
  AURORA: Learning Action and Reasoning-Centric Image Editing from Videos and Simulations
65
  </h1>
66
  <p>
67
- AURORA (Action Reasoning Object Attribute) enables training an instruction-guided image editing model that can perform action and reasoning-centric edits, in addition to "simpler" established object, attribute or global edits. <b> To illustrate this, please click "Load example" </b>.
68
  </p>""")
69
 
70
  with gr.Row():
71
  with gr.Column(scale=3):
72
- instruction = gr.Textbox(lines=1, label="Edit instruction", interactive=True)
73
  with gr.Column(scale=1, min_width=100):
74
  generate_button = gr.Button("Generate", variant="primary")
75
  with gr.Column(scale=1, min_width=100):
76
  reset_button = gr.Button("Reset", variant="stop")
77
- with gr.Column(scale=1, min_width=100):
78
- load_button = gr.Button("Load example")
79
 
80
  with gr.Row():
81
- input_image = gr.Image(label="Input image", type="pil", interactive=True)
82
  edited_image = gr.Image(label=f"Edited image", type="pil", interactive=False)
83
 
84
  with gr.Row():
@@ -86,17 +63,7 @@ def main():
86
  seed = gr.Number(value=42, precision=0, label="Seed", interactive=True)
87
  text_cfg_scale = gr.Number(value=7.5, label=f"Text CFG", interactive=True)
88
  image_cfg_scale = gr.Number(value=1.5, label=f"Image CFG", interactive=True)
89
-
90
- load_button.click(
91
- fn=load_example,
92
- inputs=[
93
- steps,
94
- seed,
95
- text_cfg_scale,
96
- image_cfg_scale,
97
- ],
98
- outputs=[input_image, instruction, seed, text_cfg_scale, image_cfg_scale, edited_image],
99
- )
100
  generate_button.click(
101
  fn=generate,
102
  inputs=[
@@ -112,7 +79,7 @@ def main():
112
  reset_button.click(
113
  fn=reset,
114
  inputs=[],
115
- outputs=[steps, seed, text_cfg_scale, image_cfg_scale, edited_image],
116
  )
117
 
118
  demo.queue()
@@ -120,4 +87,4 @@ def main():
120
  # demo.launch(share=True)
121
 
122
  if __name__ == "__main__":
123
- main()
 
1
  from __future__ import annotations
2
  import math
 
3
  import gradio as gr
4
  import torch
5
  from PIL import Image, ImageOps
6
  from diffusers import StableDiffusionInstructPix2PixPipeline
7
 
 
 
 
 
8
  def main():
9
  pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("McGill-NLP/AURORA", safety_checker=None).to("cuda")
10
  example_image = Image.open("example.jpg").convert("RGB")
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def generate(
13
  input_image: Image.Image,
14
  instruction: str,
 
36
  return [seed, text_cfg_scale, image_cfg_scale, edited_image]
37
 
38
  def reset():
39
+ return ["", 50, 42, 7.5, 1.5, None, None]
40
 
41
  with gr.Blocks() as demo:
42
  gr.HTML("""<h1 style="font-weight: 900; margin-bottom: 10px;">
43
  AURORA: Learning Action and Reasoning-Centric Image Editing from Videos and Simulations
44
  </h1>
45
  <p>
46
+ AURORA (Action Reasoning Object Attribute) enables training an instruction-guided image editing model that can perform action and reasoning-centric edits, in addition to "simpler" established object, attribute or global edits.
47
  </p>""")
48
 
49
  with gr.Row():
50
  with gr.Column(scale=3):
51
+ instruction = gr.Textbox(value="move the lemon to the right of the table", lines=1, label="Edit instruction", interactive=True)
52
  with gr.Column(scale=1, min_width=100):
53
  generate_button = gr.Button("Generate", variant="primary")
54
  with gr.Column(scale=1, min_width=100):
55
  reset_button = gr.Button("Reset", variant="stop")
 
 
56
 
57
  with gr.Row():
58
+ input_image = gr.Image(value=example_image, label="Input image", type="pil", interactive=True)
59
  edited_image = gr.Image(label=f"Edited image", type="pil", interactive=False)
60
 
61
  with gr.Row():
 
63
  seed = gr.Number(value=42, precision=0, label="Seed", interactive=True)
64
  text_cfg_scale = gr.Number(value=7.5, label=f"Text CFG", interactive=True)
65
  image_cfg_scale = gr.Number(value=1.5, label=f"Image CFG", interactive=True)
66
+
 
 
 
 
 
 
 
 
 
 
67
  generate_button.click(
68
  fn=generate,
69
  inputs=[
 
79
  reset_button.click(
80
  fn=reset,
81
  inputs=[],
82
+ outputs=[instruction, steps, seed, text_cfg_scale, image_cfg_scale, edited_image, input_image],
83
  )
84
 
85
  demo.queue()
 
87
  # demo.launch(share=True)
88
 
89
  if __name__ == "__main__":
90
+ main()