linoyts HF staff commited on
Commit
e2371c5
1 Parent(s): 840f662

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -24
app.py CHANGED
@@ -7,6 +7,10 @@ import time
7
  import numpy as np
8
  import cv2
9
  from PIL import Image
 
 
 
 
10
 
11
  def process_controlnet_img(image):
12
  controlnet_img = np.array(image)
@@ -20,27 +24,12 @@ pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell",
20
  #pipe.enable_model_cpu_offload()
21
  t5_slider = T5SliderFlux(pipe, device=torch.device("cuda"))
22
 
23
- # pipe_adapter = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash").to("cuda", torch.float16)
24
- # pipe_adapter.scheduler = EulerDiscreteScheduler.from_config(pipe_adapter.scheduler.config)
25
- # #pipe_adapter.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
26
- # # scale = 0.8
27
- # # pipe_adapter.set_ip_adapter_scale(scale)
28
- # clip_slider_ip = CLIPSliderXL(sd_pipe=pipe_adapter, device=torch.device("cuda"))
29
-
30
- # controlnet = ControlNetModel.from_pretrained(
31
- # "xinsir/controlnet-canny-sdxl-1.0", # insert here your choice of controlnet
32
- # torch_dtype=torch.float16
33
- # )
34
- # vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
35
- # pipe_controlnet = StableDiffusionXLControlNetPipeline.from_pretrained(
36
- # "sd-community/sdxl-flash",
37
- # controlnet=controlnet,
38
- # vae=vae,
39
- # torch_dtype=torch.float16,
40
- # )
41
- # t5_slider_controlnet = T5SliderFlux(sd_pipe=pipe_controlnet,device=torch.device("cuda"))
42
-
43
- # clip_slider_inv = CLIPSliderXL_inv(sd_pipe=pipe_inv,device=torch.device("cuda"))
44
 
45
  @spaces.GPU(duration=120)
46
  def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale,
@@ -72,7 +61,7 @@ def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale
72
 
73
  if img2img_type=="controlnet canny" and img is not None:
74
  control_img = process_controlnet_img(img)
75
- image = t5_slider.generate(prompt, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
76
  elif img2img_type=="ip adapter" and img is not None:
77
  image = t5_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
78
  else: # text to image
@@ -98,7 +87,7 @@ def update_scales(x,y,prompt,seed, steps, guidance_scale,
98
  avg_diff_2nd = avg_diff_y.cuda()
99
  if img2img_type=="controlnet canny" and img is not None:
100
  control_img = process_controlnet_img(img)
101
- image = t5_slider.generate(prompt, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
102
  elif img2img_type=="ip adapter" and img is not None:
103
  image = t5_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
104
  else:
@@ -197,7 +186,7 @@ with gr.Blocks(css=css) as demo:
197
  image = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512))
198
  slider_x_a = gr.Dropdown(label="Slider X concept range", allow_custom_value=True, multiselect=True, max_choices=2)
199
  slider_y_a = gr.Dropdown(label="Slider X concept range", allow_custom_value=True, multiselect=True, max_choices=2)
200
- img2img_type = gr.Radio(["controlnet canny", "ip adapter"], label="", info="")
201
  prompt_a = gr.Textbox(label="Prompt")
202
  submit_a = gr.Button("Submit")
203
  with gr.Column():
@@ -231,6 +220,7 @@ with gr.Blocks(css=css) as demo:
231
  maximum=5.0,
232
  step=0.1,
233
  value=0.8,
 
234
  )
235
  seed_a = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
236
 
 
7
  import numpy as np
8
  import cv2
9
  from PIL import Image
10
+ from diffusers.utils import load_image
11
+ from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline
12
+ from diffusers.models.controlnet_flux import FluxControlNetModel
13
+
14
 
15
  def process_controlnet_img(image):
16
  controlnet_img = np.array(image)
 
24
  #pipe.enable_model_cpu_offload()
25
  t5_slider = T5SliderFlux(pipe, device=torch.device("cuda"))
26
 
27
+ base_model = 'black-forest-labs/FLUX.1-schnell'
28
+ controlnet_model = 'InstantX/FLUX.1-dev-Controlnet-Canny-alpha'
29
+ controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
30
+ pipe_controlnet = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
31
+ t5_slider_controlnet = T5SliderFlux(sd_pipe=pipe_controlnet,device=torch.device("cuda"))
32
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  @spaces.GPU(duration=120)
35
  def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale,
 
61
 
62
  if img2img_type=="controlnet canny" and img is not None:
63
  control_img = process_controlnet_img(img)
64
+ image = t5_slider_controlnet.generate(prompt, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
65
  elif img2img_type=="ip adapter" and img is not None:
66
  image = t5_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
67
  else: # text to image
 
87
  avg_diff_2nd = avg_diff_y.cuda()
88
  if img2img_type=="controlnet canny" and img is not None:
89
  control_img = process_controlnet_img(img)
90
+ image = t5_slider_controlnet.generate(prompt, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
91
  elif img2img_type=="ip adapter" and img is not None:
92
  image = t5_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=x, scale_2nd=y, seed=seed, num_inference_steps=steps, avg_diff=avg_diff,avg_diff_2nd=avg_diff_2nd)
93
  else:
 
186
  image = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512))
187
  slider_x_a = gr.Dropdown(label="Slider X concept range", allow_custom_value=True, multiselect=True, max_choices=2)
188
  slider_y_a = gr.Dropdown(label="Slider X concept range", allow_custom_value=True, multiselect=True, max_choices=2)
189
+ img2img_type = gr.Radio(["controlnet canny", "ip adapter"], label="", info="", visible=False, value="controlnet canny")
190
  prompt_a = gr.Textbox(label="Prompt")
191
  submit_a = gr.Button("Submit")
192
  with gr.Column():
 
220
  maximum=5.0,
221
  step=0.1,
222
  value=0.8,
223
+ visible=False
224
  )
225
  seed_a = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
226