John6666 commited on
Commit
f55d446
1 Parent(s): 26a5ac0

Upload 10 files

Browse files
Files changed (7) hide show
  1. README.md +1 -1
  2. app.py +429 -172
  3. custom.png +0 -0
  4. loras.json +6 -0
  5. mod.py +8 -21
  6. prompts.csv +242 -0
  7. requirements.txt +3 -3
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏆😻
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.41.0
8
  app_file: app.py
9
  pinned: true
10
  license: mit
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.44.1
8
  app_file: app.py
9
  pinned: true
10
  license: mit
app.py CHANGED
@@ -7,13 +7,17 @@ from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_imag
7
  from diffusers.utils import load_image
8
  from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel, FluxControlNetImg2ImgPipeline
9
  from huggingface_hub import HfFileSystem, ModelCard
 
 
10
  import random
11
  import time
 
 
12
 
13
  from env import models, num_loras, num_cns
14
  from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
15
  description_ui, compose_lora_json, is_valid_lora, fuse_loras, save_image, preprocess_i2i_image,
16
- get_trigger_word, enhance_prompt, deselect_lora, set_control_union_image,
17
  get_control_union_mode, set_control_union_mode, get_control_params, translate_to_en)
18
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
19
  download_my_lora, get_all_lora_tupled_list, apply_lora_prompt,
@@ -21,6 +25,10 @@ from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_
21
  from tagger.tagger import predict_tags_wd, compose_prompt_to_copy
22
  from tagger.fl2flux import predict_tags_fl2_flux
23
 
 
 
 
 
24
  # Load LoRAs from JSON file
25
  with open('loras.json', 'r') as f:
26
  loras = json.load(f)
@@ -99,7 +107,7 @@ class calculateDuration:
99
  def __enter__(self):
100
  self.start_time = time.time()
101
  return self
102
-
103
  def __exit__(self, exc_type, exc_value, traceback):
104
  self.end_time = time.time()
105
  self.elapsed_time = self.end_time - self.start_time
@@ -108,32 +116,217 @@ class calculateDuration:
108
  else:
109
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
110
 
111
- def update_selection(evt: gr.SelectData, width, height):
112
- selected_lora = loras[evt.index]
113
- new_placeholder = f"Type a prompt for {selected_lora['title']}"
114
- lora_repo = selected_lora["repo"]
115
- updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
116
- if "aspect" in selected_lora:
117
- if selected_lora["aspect"] == "portrait":
118
- width = 768
119
- height = 1024
120
- elif selected_lora["aspect"] == "landscape":
121
- width = 1024
122
- height = 768
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  else:
124
- width = 1024
125
- height = 1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  return (
127
- gr.update(placeholder=new_placeholder),
128
- updated_text,
129
- evt.index,
130
- width,
131
- height,
 
 
 
 
132
  )
133
 
134
  @spaces.GPU(duration=70)
135
  @torch.inference_mode()
136
- def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
137
  global pipe, taef1, good_vae, controlnet, controlnet_union
138
  try:
139
  good_vae.to("cuda")
@@ -155,7 +348,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
155
  width=width,
156
  height=height,
157
  generator=generator,
158
- joint_attention_kwargs={"scale": lora_scale},
159
  output_type="pil",
160
  good_vae=good_vae,
161
  ):
@@ -177,7 +370,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
177
  height=height,
178
  controlnet_conditioning_scale=scales,
179
  generator=generator,
180
- joint_attention_kwargs={"scale": lora_scale},
181
  ).images:
182
  yield img
183
  except Exception as e:
@@ -186,7 +379,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
186
 
187
  @spaces.GPU(duration=70)
188
  @torch.inference_mode()
189
- def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed, cn_on, progress=gr.Progress(track_tqdm=True)):
190
  global pipe_i2i, good_vae, controlnet, controlnet_union
191
  try:
192
  good_vae.to("cuda")
@@ -210,7 +403,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
210
  width=width,
211
  height=height,
212
  generator=generator,
213
- joint_attention_kwargs={"scale": lora_scale},
214
  output_type="pil",
215
  ).images[0]
216
  return final_image
@@ -234,7 +427,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
234
  height=height,
235
  controlnet_conditioning_scale=scales,
236
  generator=generator,
237
- joint_attention_kwargs={"scale": lora_scale},
238
  output_type="pil",
239
  ).images[0]
240
  return final_image
@@ -242,73 +435,99 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
242
  print(e)
243
  raise gr.Error(f"I2I Inference Error: {e}") from e
244
 
245
- def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
246
- lora_scale, lora_json, cn_on, translate_on, progress=gr.Progress(track_tqdm=True)):
247
- global pipe
248
- if selected_index is None and not is_valid_lora(lora_json):
 
249
  gr.Info("LoRA isn't selected.")
250
  # raise gr.Error("You must select a LoRA before proceeding.")
251
  progress(0, desc="Preparing Inference.")
252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  with calculateDuration("Unloading LoRA"):
254
  try:
255
- pipe.unfuse_lora()
256
  pipe.unload_lora_weights()
257
- pipe_i2i.unfuse_lora()
258
  pipe_i2i.unload_lora_weights()
259
  except Exception as e:
260
  print(e)
261
-
262
- clear_cache() #
263
 
264
- if translate_on: prompt = translate_to_en(prompt)
 
 
 
265
 
266
- prompt_mash = prompt + get_model_trigger(last_model)
267
- if is_valid_lora(lora_json):
268
- # Load External LoRA weights
 
 
269
  with calculateDuration("Loading External LoRA weights"):
270
- fuse_loras(pipe, lora_json)
271
- fuse_loras(pipe_i2i, lora_json)
272
  trigger_word = get_trigger_word(lora_json)
273
- prompt_mash = f"{prompt} {trigger_word}"
274
- if selected_index is not None:
275
- selected_lora = loras[selected_index]
276
- lora_path = selected_lora["repo"]
277
- trigger_word = selected_lora["trigger_word"]
278
- if(trigger_word):
279
- if "trigger_position" in selected_lora:
280
- if selected_lora["trigger_position"] == "prepend":
281
- prompt_mash = f"{trigger_word} {prompt_mash}"
282
- else:
283
- prompt_mash = f"{prompt_mash} {trigger_word}"
284
- else:
285
- prompt_mash = f"{trigger_word} {prompt_mash}"
286
- else:
287
- prompt_mash = prompt_mash
288
- # Load LoRA weights
289
- with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
290
- if(image_input is not None):
291
- if "weights" in selected_lora:
292
- pipe_i2i.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
293
  else:
294
- pipe_i2i.load_lora_weights(lora_path)
295
  else:
296
- if "weights" in selected_lora:
297
- pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
298
  else:
299
- pipe.load_lora_weights(lora_path)
300
-
 
 
 
 
 
 
 
 
301
  # Set random seed for reproducibility
302
  with calculateDuration("Randomizing seed"):
303
  if randomize_seed:
304
  seed = random.randint(0, MAX_SEED)
305
 
 
306
  progress(0, desc="Running Inference.")
307
  if(image_input is not None):
308
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed, cn_on, progress)
309
  yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(visible=False)
310
  else:
311
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress)
312
  # Consume the generator to get the final image
313
  final_image = None
314
  step_counter = 0
@@ -319,90 +538,65 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
319
  yield image, seed, gr.update(value=progress_bar, visible=True)
320
  yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(value=progress_bar, visible=False)
321
 
 
 
322
  def get_huggingface_safetensors(link):
323
- split_link = link.split("/")
324
- if(len(split_link) == 2):
325
- model_card = ModelCard.load(link)
326
- base_model = model_card.data.get("base_model")
327
- print(base_model)
328
- if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
329
- raise Exception("Not a FLUX LoRA!")
330
- image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
331
- trigger_word = model_card.data.get("instance_prompt", "")
332
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
333
- fs = HfFileSystem()
334
- try:
335
- list_of_files = fs.ls(link, detail=False)
336
- for file in list_of_files:
337
- if(file.endswith(".safetensors")):
338
- safetensors_name = file.split("/")[-1]
339
- if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
340
- image_elements = file.split("/")
341
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
342
- except Exception as e:
343
- print(e)
344
- gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
345
- raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
346
- return split_link[1], link, safetensors_name, trigger_word, image_url
 
 
 
 
347
 
348
  def check_custom_model(link):
349
- if(link.startswith("https://")):
350
- if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
 
 
 
 
 
 
 
 
351
  link_split = link.split("huggingface.co/")
352
  return get_huggingface_safetensors(link_split[1])
353
- else:
354
- return get_huggingface_safetensors(link)
355
-
356
- def add_custom_lora(custom_lora):
357
- global loras
358
- if(custom_lora):
359
- try:
360
- title, repo, path, trigger_word, image = check_custom_model(custom_lora)
361
- print(f"Loaded custom LoRA: {repo}")
362
- card = f'''
363
- <div class="custom_lora_card">
364
- <span>Loaded custom LoRA:</span>
365
- <div class="card_internal">
366
- <img src="{image}" />
367
- <div>
368
- <h3>{title}</h3>
369
- <small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
370
- </div>
371
- </div>
372
- </div>
373
- '''
374
- existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
375
- if(not existing_item_index):
376
- new_item = {
377
- "image": image,
378
- "title": title,
379
- "repo": repo,
380
- "weights": path,
381
- "trigger_word": trigger_word
382
- }
383
- print(new_item)
384
- existing_item_index = len(loras)
385
- loras.append(new_item)
386
-
387
- return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
388
- except Exception as e:
389
- gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
390
- return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=True), gr.update(), "", None, ""
391
  else:
392
- return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
393
-
394
- def remove_custom_lora():
395
- return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
396
-
397
- run_lora.zerogpu = True
398
 
399
  css = '''
400
  #gen_btn{height: 100%}
401
  #title{text-align: center}
402
  #title h1{font-size: 3em; display:inline-flex; align-items:center}
403
- #title img{width: 100px; margin-right: 0.5em}
404
- #gallery .grid-wrap{height: 10vh}
405
  #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
 
406
  .card_internal{display: flex;height: 100px;margin-top: .5em}
407
  .card_internal img{margin-right: 1em}
408
  .styler{--form-gap-width: 0px !important}
@@ -410,15 +604,21 @@ css = '''
410
  #progress .generating{display:none}
411
  .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
412
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
 
 
 
 
 
413
  .info {text-align:center; !important}
414
  '''
415
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache=(60, 3600)) as app:
416
  with gr.Tab("FLUX LoRA the Explorer"):
417
  title = gr.HTML(
418
- """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA">FLUX LoRA the Explorer Mod</h1>""",
419
  elem_id="title",
420
  )
421
- selected_index = gr.State(None)
 
422
  with gr.Row():
423
  with gr.Column(scale=3):
424
  with gr.Group():
@@ -438,7 +638,30 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
438
  prompt_enhance = gr.Button(value="Enhance your prompt", variant="secondary")
439
  auto_trans = gr.Checkbox(label="Auto translate to English", value=False, elem_classes="info")
440
  with gr.Column(scale=1, elem_id="gen_column"):
441
- generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  with gr.Row():
443
  with gr.Column():
444
  selected_info = gr.Markdown("")
@@ -446,15 +669,15 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
446
  [(item["image"], item["title"]) for item in loras],
447
  label="LoRA Gallery",
448
  allow_preview=False,
449
- columns=3,
450
  elem_id="gallery"
451
  )
452
  with gr.Group():
453
- custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path", placeholder="multimodalart/vintage-ads-flux")
454
- gr.Markdown("[Check the list of FLUX LoRas](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
455
- custom_lora_info = gr.HTML(visible=False)
456
- custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
457
- deselect_lora_button = gr.Button("Deselect LoRA", variant="secondary")
458
  with gr.Column():
459
  progress_bar = gr.Markdown(elem_id="progress",visible=False)
460
  result = gr.Image(label="Generated Image", format="png", show_share_button=False)
@@ -470,10 +693,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
470
  input_image_preprocess = gr.Checkbox(True, label="Preprocess Input image")
471
  with gr.Column():
472
  with gr.Row():
473
- lora_scale = gr.Slider(label="LoRA Scale", minimum=-3, maximum=3, step=0.01, value=0.95)
474
  width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
475
  height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
476
- with gr.Row():
477
  cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
478
  steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
479
  with gr.Row():
@@ -482,6 +703,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
482
  disable_model_cache = gr.Checkbox(False, label="Disable model caching")
483
  with gr.Accordion("External LoRA", open=True):
484
  with gr.Column():
 
485
  lora_repo_json = gr.JSON(value=[{}] * num_loras, visible=False)
486
  lora_repo = [None] * num_loras
487
  lora_weights = [None] * num_loras
@@ -506,9 +728,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
506
  lora_num[i] = gr.Number(i, visible=False)
507
  with gr.Accordion("From URL", open=True, visible=True):
508
  with gr.Row():
509
- lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Flux.1 D", "Flux.1 S"], value=["Flux.1 D", "Flux.1 S"])
510
- lora_search_civitai_sort = gr.Radio(label="Sort", choices=["Highest Rated", "Most Downloaded", "Newest"], value="Highest Rated")
511
- lora_search_civitai_period = gr.Radio(label="Period", choices=["AllTime", "Year", "Month", "Week", "Day"], value="AllTime")
512
  with gr.Row():
513
  lora_search_civitai_query = gr.Textbox(label="Query", placeholder="flux", lines=1)
514
  lora_search_civitai_tag = gr.Textbox(label="Tag", lines=1)
@@ -545,24 +767,32 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
545
 
546
  gallery.select(
547
  update_selection,
548
- inputs=[width, height],
549
- outputs=[prompt, selected_info, selected_index, width, height],
550
- queue=False,
551
- show_api=False,
552
- trigger_mode="once",
 
 
 
 
 
 
 
 
 
 
 
553
  )
554
- custom_lora.input(
555
  add_custom_lora,
556
- inputs=[custom_lora],
557
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt],
558
- queue=False,
559
- show_api=False,
560
  )
561
- custom_lora_button.click(
562
  remove_custom_lora,
563
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora],
564
- queue=False,
565
- show_api=False,
566
  )
567
  gr.on(
568
  triggers=[generate_button.click, prompt.submit],
@@ -574,15 +804,14 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
574
  trigger_mode="once",
575
  ).success(
576
  fn=run_lora,
577
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
578
- lora_scale, lora_repo_json, cn_on, auto_trans],
579
  outputs=[result, seed, progress_bar],
580
  queue=True,
581
  show_api=True,
582
  )
583
 
584
  input_image.upload(preprocess_i2i_image, [input_image, input_image_preprocess, height, width], [input_image], queue=False, show_api=False)
585
- deselect_lora_button.click(deselect_lora, None, [prompt, selected_info, selected_index, width, height], queue=False, show_api=False)
586
  gr.on(
587
  triggers=[model_name.change, cn_on.change],
588
  fn=get_t2i_model_info,
@@ -800,10 +1029,38 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
800
  ]
801
  )
802
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
803
  description_ui()
804
  gr.LoginButton()
805
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
806
 
807
-
808
  app.queue()
809
  app.launch()
 
7
  from diffusers.utils import load_image
8
  from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel, FluxControlNetImg2ImgPipeline
9
  from huggingface_hub import HfFileSystem, ModelCard
10
+ import os
11
+ import copy
12
  import random
13
  import time
14
+ import requests
15
+ import pandas as pd
16
 
17
  from env import models, num_loras, num_cns
18
  from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
19
  description_ui, compose_lora_json, is_valid_lora, fuse_loras, save_image, preprocess_i2i_image,
20
+ get_trigger_word, enhance_prompt, set_control_union_image,
21
  get_control_union_mode, set_control_union_mode, get_control_params, translate_to_en)
22
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
23
  download_my_lora, get_all_lora_tupled_list, apply_lora_prompt,
 
25
  from tagger.tagger import predict_tags_wd, compose_prompt_to_copy
26
  from tagger.fl2flux import predict_tags_fl2_flux
27
 
28
+ #Load prompts for randomization
29
+ df = pd.read_csv('prompts.csv', header=None)
30
+ prompt_values = df.values.flatten()
31
+
32
  # Load LoRAs from JSON file
33
  with open('loras.json', 'r') as f:
34
  loras = json.load(f)
 
107
  def __enter__(self):
108
  self.start_time = time.time()
109
  return self
110
+
111
  def __exit__(self, exc_type, exc_value, traceback):
112
  self.end_time = time.time()
113
  self.elapsed_time = self.end_time - self.start_time
 
116
  else:
117
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
118
 
119
+ def download_file(url, directory=None):
120
+ if directory is None:
121
+ directory = os.getcwd() # Use current working directory if not specified
122
+
123
+ # Get the filename from the URL
124
+ filename = url.split('/')[-1]
125
+
126
+ # Full path for the downloaded file
127
+ filepath = os.path.join(directory, filename)
128
+
129
+ # Download the file
130
+ response = requests.get(url)
131
+ response.raise_for_status() # Raise an exception for bad status codes
132
+
133
+ # Write the content to the file
134
+ with open(filepath, 'wb') as file:
135
+ file.write(response.content)
136
+
137
+ return filepath
138
+
139
+ def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
140
+ selected_index = evt.index
141
+ selected_indices = selected_indices or []
142
+ if selected_index in selected_indices:
143
+ selected_indices.remove(selected_index)
144
+ else:
145
+ if len(selected_indices) < 2:
146
+ selected_indices.append(selected_index)
147
  else:
148
+ gr.Warning("You can select up to 2 LoRAs, remove one to select a new one.")
149
+ return gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), width, height, gr.update(), gr.update()
150
+
151
+ selected_info_1 = "Select a LoRA 1"
152
+ selected_info_2 = "Select a LoRA 2"
153
+ lora_scale_1 = 1.15
154
+ lora_scale_2 = 1.15
155
+ lora_image_1 = None
156
+ lora_image_2 = None
157
+ if len(selected_indices) >= 1:
158
+ lora1 = loras_state[selected_indices[0]]
159
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
160
+ lora_image_1 = lora1['image']
161
+ if len(selected_indices) >= 2:
162
+ lora2 = loras_state[selected_indices[1]]
163
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
164
+ lora_image_2 = lora2['image']
165
+
166
+ if selected_indices:
167
+ last_selected_lora = loras_state[selected_indices[-1]]
168
+ new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
169
+ else:
170
+ new_placeholder = "Type a prompt"
171
+
172
+ return gr.update(placeholder=new_placeholder), selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2
173
+
174
+ def remove_lora_1(selected_indices, loras_state):
175
+ if len(selected_indices) >= 1:
176
+ selected_indices.pop(0)
177
+ selected_info_1 = "Select a LoRA 1"
178
+ selected_info_2 = "Select a LoRA 2"
179
+ lora_scale_1 = 1.15
180
+ lora_scale_2 = 1.15
181
+ lora_image_1 = None
182
+ lora_image_2 = None
183
+ if len(selected_indices) >= 1:
184
+ lora1 = loras_state[selected_indices[0]]
185
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
186
+ lora_image_1 = lora1['image']
187
+ if len(selected_indices) >= 2:
188
+ lora2 = loras_state[selected_indices[1]]
189
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
190
+ lora_image_2 = lora2['image']
191
+ return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
192
+
193
+ def remove_lora_2(selected_indices, loras_state):
194
+ if len(selected_indices) >= 2:
195
+ selected_indices.pop(1)
196
+ selected_info_1 = "Select a LoRA 1"
197
+ selected_info_2 = "Select a LoRA 2"
198
+ lora_scale_1 = 1.15
199
+ lora_scale_2 = 1.15
200
+ lora_image_1 = None
201
+ lora_image_2 = None
202
+ if len(selected_indices) >= 1:
203
+ lora1 = loras_state[selected_indices[0]]
204
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
205
+ lora_image_1 = lora1['image']
206
+ if len(selected_indices) >= 2:
207
+ lora2 = loras_state[selected_indices[1]]
208
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
209
+ lora_image_2 = lora2['image']
210
+ return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
211
+
212
+ def randomize_loras(selected_indices, loras_state):
213
+ if len(loras_state) < 2:
214
+ raise gr.Error("Not enough LoRAs to randomize.")
215
+ selected_indices = random.sample(range(len(loras_state)), 2)
216
+ lora1 = loras_state[selected_indices[0]]
217
+ lora2 = loras_state[selected_indices[1]]
218
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
219
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
220
+ lora_scale_1 = 1.15
221
+ lora_scale_2 = 1.15
222
+ lora_image_1 = lora1['image']
223
+ lora_image_2 = lora2['image']
224
+ random_prompt = random.choice(prompt_values)
225
+ return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, random_prompt
226
+
227
+ def add_custom_lora(custom_lora, selected_indices, current_loras):
228
+ if custom_lora:
229
+ try:
230
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
231
+ print(f"Loaded custom LoRA: {repo}")
232
+ existing_item_index = next((index for (index, item) in enumerate(current_loras) if item['repo'] == repo), None)
233
+ if existing_item_index is None:
234
+ if repo.endswith(".safetensors") and repo.startswith("http"):
235
+ repo = download_file(repo)
236
+ new_item = {
237
+ "image": image if image else "/home/user/app/custom.png",
238
+ "title": title,
239
+ "repo": repo,
240
+ "weights": path,
241
+ "trigger_word": trigger_word
242
+ }
243
+ print(f"New LoRA: {new_item}")
244
+ existing_item_index = len(current_loras)
245
+ current_loras.append(new_item)
246
+
247
+ # Update gallery
248
+ gallery_items = [(item["image"], item["title"]) for item in current_loras]
249
+ # Update selected_indices if there's room
250
+ if len(selected_indices) < 2:
251
+ selected_indices.append(existing_item_index)
252
+ else:
253
+ gr.Warning("You can select up to 2 LoRAs, remove one to select a new one.")
254
+
255
+ # Update selected_info and images
256
+ selected_info_1 = "Select a LoRA 1"
257
+ selected_info_2 = "Select a LoRA 2"
258
+ lora_scale_1 = 1.15
259
+ lora_scale_2 = 1.15
260
+ lora_image_1 = None
261
+ lora_image_2 = None
262
+ if len(selected_indices) >= 1:
263
+ lora1 = current_loras[selected_indices[0]]
264
+ selected_info_1 = f"### LoRA 1 Selected: {lora1['title']} ✨"
265
+ lora_image_1 = lora1['image'] if lora1['image'] else None
266
+ if len(selected_indices) >= 2:
267
+ lora2 = current_loras[selected_indices[1]]
268
+ selected_info_2 = f"### LoRA 2 Selected: {lora2['title']} ✨"
269
+ lora_image_2 = lora2['image'] if lora2['image'] else None
270
+ print("Finished adding custom LoRA")
271
+ return (
272
+ current_loras,
273
+ gr.update(value=gallery_items),
274
+ selected_info_1,
275
+ selected_info_2,
276
+ selected_indices,
277
+ lora_scale_1,
278
+ lora_scale_2,
279
+ lora_image_1,
280
+ lora_image_2
281
+ )
282
+ except Exception as e:
283
+ print(e)
284
+ gr.Warning(str(e))
285
+ return current_loras, gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update()
286
+ else:
287
+ return current_loras, gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update()
288
+
289
+ def remove_custom_lora(selected_indices, current_loras):
290
+ if current_loras:
291
+ custom_lora_repo = current_loras[-1]['repo']
292
+ # Remove from loras list
293
+ current_loras = current_loras[:-1]
294
+ # Remove from selected_indices if selected
295
+ custom_lora_index = len(current_loras)
296
+ if custom_lora_index in selected_indices:
297
+ selected_indices.remove(custom_lora_index)
298
+ # Update gallery
299
+ gallery_items = [(item["image"], item["title"]) for item in current_loras]
300
+ # Update selected_info and images
301
+ selected_info_1 = "Select a LoRA 1"
302
+ selected_info_2 = "Select a LoRA 2"
303
+ lora_scale_1 = 1.15
304
+ lora_scale_2 = 1.15
305
+ lora_image_1 = None
306
+ lora_image_2 = None
307
+ if len(selected_indices) >= 1:
308
+ lora1 = current_loras[selected_indices[0]]
309
+ selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
310
+ lora_image_1 = lora1['image']
311
+ if len(selected_indices) >= 2:
312
+ lora2 = current_loras[selected_indices[1]]
313
+ selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
314
+ lora_image_2 = lora2['image']
315
  return (
316
+ current_loras,
317
+ gr.update(value=gallery_items),
318
+ selected_info_1,
319
+ selected_info_2,
320
+ selected_indices,
321
+ lora_scale_1,
322
+ lora_scale_2,
323
+ lora_image_1,
324
+ lora_image_2
325
  )
326
 
327
  @spaces.GPU(duration=70)
328
  @torch.inference_mode()
329
+ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, cn_on, progress=gr.Progress(track_tqdm=True)):
330
  global pipe, taef1, good_vae, controlnet, controlnet_union
331
  try:
332
  good_vae.to("cuda")
 
348
  width=width,
349
  height=height,
350
  generator=generator,
351
+ joint_attention_kwargs={"scale": 1.0},
352
  output_type="pil",
353
  good_vae=good_vae,
354
  ):
 
370
  height=height,
371
  controlnet_conditioning_scale=scales,
372
  generator=generator,
373
+ joint_attention_kwargs={"scale": 1.0},
374
  ).images:
375
  yield img
376
  except Exception as e:
 
379
 
380
  @spaces.GPU(duration=70)
381
  @torch.inference_mode()
382
+ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed, cn_on, progress=gr.Progress(track_tqdm=True)):
383
  global pipe_i2i, good_vae, controlnet, controlnet_union
384
  try:
385
  good_vae.to("cuda")
 
403
  width=width,
404
  height=height,
405
  generator=generator,
406
+ joint_attention_kwargs={"scale": 1.0},
407
  output_type="pil",
408
  ).images[0]
409
  return final_image
 
427
  height=height,
428
  controlnet_conditioning_scale=scales,
429
  generator=generator,
430
+ joint_attention_kwargs={"scale": 1.0},
431
  output_type="pil",
432
  ).images[0]
433
  return final_image
 
435
  print(e)
436
  raise gr.Error(f"I2I Inference Error: {e}") from e
437
 
438
+ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2,
439
+ randomize_seed, seed, width, height, loras_state,
440
+ lora_json, cn_on, translate_on, progress=gr.Progress(track_tqdm=True)):
441
+ global pipe, pipe_i2i
442
+ if not selected_indices and not is_valid_lora(lora_json):
443
  gr.Info("LoRA isn't selected.")
444
  # raise gr.Error("You must select a LoRA before proceeding.")
445
  progress(0, desc="Preparing Inference.")
446
 
447
+ selected_loras = [loras_state[idx] for idx in selected_indices]
448
+
449
+ if translate_on: prompt = translate_to_en(prompt)
450
+
451
+ # Build the prompt with trigger words
452
+ prepends = []
453
+ appends = []
454
+ for lora in selected_loras:
455
+ trigger_word = lora.get('trigger_word', '')
456
+ if trigger_word:
457
+ if lora.get("trigger_position") == "prepend":
458
+ prepends.append(trigger_word)
459
+ else:
460
+ appends.append(trigger_word)
461
+ prompt_mash = " ".join(prepends + [prompt] + appends)
462
+ print("Prompt Mash: ", prompt_mash) #
463
+
464
+ # Unload previous LoRA weights
465
  with calculateDuration("Unloading LoRA"):
466
  try:
467
+ #pipe.unfuse_lora()
468
  pipe.unload_lora_weights()
469
+ #pipe_i2i.unfuse_lora()
470
  pipe_i2i.unload_lora_weights()
471
  except Exception as e:
472
  print(e)
 
 
473
 
474
+ print(pipe.get_active_adapters()) #
475
+ print(pipe_i2i.get_active_adapters()) #
476
+
477
+ clear_cache() #
478
 
479
+ # Build the prompt for External LoRAs
480
+ prompt_mash = prompt_mash + get_model_trigger(last_model)
481
+ lora_names = []
482
+ lora_weights = []
483
+ if is_valid_lora(lora_json): # Load External LoRA weights
484
  with calculateDuration("Loading External LoRA weights"):
485
+ if image_input is not None: lora_names, lora_weights = fuse_loras(pipe_i2i, lora_json)
486
+ else: lora_names, lora_weights = fuse_loras(pipe, lora_json)
487
  trigger_word = get_trigger_word(lora_json)
488
+ prompt_mash = f"{prompt_mash} {trigger_word}"
489
+ print("Prompt Mash: ", prompt_mash) #
490
+
491
+ # Load LoRA weights with respective scales
492
+ with calculateDuration("Loading LoRA weights"):
493
+ for idx, lora in enumerate(selected_loras):
494
+ lora_name = f"lora_{idx}"
495
+ lora_names.append(lora_name)
496
+ lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
497
+ lora_path = lora['repo']
498
+ weight_name = lora.get("weights")
499
+ print(f"Lora Path: {lora_path}")
500
+ if image_input is not None:
501
+ if weight_name:
502
+ pipe_i2i.load_lora_weights(lora_path, weight_name=weight_name, low_cpu_mem_usage=True, adapter_name=lora_name)
 
 
 
 
 
503
  else:
504
+ pipe_i2i.load_lora_weights(lora_path, low_cpu_mem_usage=True, adapter_name=lora_name)
505
  else:
506
+ if weight_name:
507
+ pipe.load_lora_weights(lora_path, weight_name=weight_name, low_cpu_mem_usage=True, adapter_name=lora_name)
508
  else:
509
+ pipe.load_lora_weights(lora_path, low_cpu_mem_usage=True, adapter_name=lora_name)
510
+ print("Loaded LoRAs:", lora_names)
511
+ if image_input is not None:
512
+ pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
513
+ else:
514
+ pipe.set_adapters(lora_names, adapter_weights=lora_weights)
515
+
516
+ print(pipe.get_active_adapters()) #
517
+ print(pipe_i2i.get_active_adapters()) #
518
+
519
  # Set random seed for reproducibility
520
  with calculateDuration("Randomizing seed"):
521
  if randomize_seed:
522
  seed = random.randint(0, MAX_SEED)
523
 
524
+ # Generate image
525
  progress(0, desc="Running Inference.")
526
  if(image_input is not None):
527
+ final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed, cn_on)
528
  yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(visible=False)
529
  else:
530
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, cn_on)
531
  # Consume the generator to get the final image
532
  final_image = None
533
  step_counter = 0
 
538
  yield image, seed, gr.update(value=progress_bar, visible=True)
539
  yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(value=progress_bar, visible=False)
540
 
541
+ run_lora.zerogpu = True
542
+
543
  def get_huggingface_safetensors(link):
544
+ split_link = link.split("/")
545
+ if len(split_link) == 2:
546
+ model_card = ModelCard.load(link)
547
+ base_model = model_card.data.get("base_model")
548
+ print(f"Base model: {base_model}")
549
+ if base_model not in ["black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-schnell"]:
550
+ raise Exception("Not a FLUX LoRA!")
551
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
552
+ trigger_word = model_card.data.get("instance_prompt", "")
553
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
554
+ fs = HfFileSystem()
555
+ safetensors_name = None
556
+ try:
557
+ list_of_files = fs.ls(link, detail=False)
558
+ for file in list_of_files:
559
+ if file.endswith(".safetensors"):
560
+ safetensors_name = file.split("/")[-1]
561
+ if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
562
+ image_elements = file.split("/")
563
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
564
+ except Exception as e:
565
+ print(e)
566
+ raise gr.Error("Invalid Hugging Face repository with a *.safetensors LoRA")
567
+ if not safetensors_name:
568
+ raise gr.Error("No *.safetensors file found in the repository")
569
+ return split_link[1], link, safetensors_name, trigger_word, image_url
570
+ else:
571
+ raise gr.Error("Invalid Hugging Face repository link")
572
 
573
  def check_custom_model(link):
574
+ if link.endswith(".safetensors"):
575
+ # Treat as direct link to the LoRA weights
576
+ title = os.path.basename(link)
577
+ repo = link
578
+ path = None # No specific weight name
579
+ trigger_word = ""
580
+ image_url = None
581
+ return title, repo, path, trigger_word, image_url
582
+ elif link.startswith("https://"):
583
+ if "huggingface.co" in link:
584
  link_split = link.split("huggingface.co/")
585
  return get_huggingface_safetensors(link_split[1])
586
+ else:
587
+ raise Exception("Unsupported URL")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
  else:
589
+ # Assume it's a Hugging Face model path
590
+ return get_huggingface_safetensors(link)
 
 
 
 
591
 
592
  css = '''
593
  #gen_btn{height: 100%}
594
  #title{text-align: center}
595
  #title h1{font-size: 3em; display:inline-flex; align-items:center}
596
+ #title img{width: 100px; margin-right: 0.25em}
597
+ #gallery .grid-wrap{height: 5vh}
598
  #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
599
+ .custom_lora_card{margin-bottom: 1em}
600
  .card_internal{display: flex;height: 100px;margin-top: .5em}
601
  .card_internal img{margin-right: 1em}
602
  .styler{--form-gap-width: 0px !important}
 
604
  #progress .generating{display:none}
605
  .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
606
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
607
+ .button_total{height: 100%}
608
+ #loaded_loras [data-testid="block-info"]{font-size:80%}
609
+ #custom_lora_structure{background: var(--block-background-fill)}
610
+ #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
611
+ #random_btn{font-size: 300%}
612
  .info {text-align:center; !important}
613
  '''
614
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache=(60, 3600)) as app:
615
  with gr.Tab("FLUX LoRA the Explorer"):
616
  title = gr.HTML(
617
+ """<h1><img src="https://huggingface.co/spaces/John6666/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA">FLUX LoRA the Explorer Mod</h1>""",
618
  elem_id="title",
619
  )
620
+ loras_state = gr.State(loras)
621
+ selected_indices = gr.State([])
622
  with gr.Row():
623
  with gr.Column(scale=3):
624
  with gr.Group():
 
638
  prompt_enhance = gr.Button(value="Enhance your prompt", variant="secondary")
639
  auto_trans = gr.Checkbox(label="Auto translate to English", value=False, elem_classes="info")
640
  with gr.Column(scale=1, elem_id="gen_column"):
641
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn", elem_classes=["button_total"])
642
+ with gr.Row(elem_id="loaded_loras"):
643
+ with gr.Column(scale=1, min_width=25):
644
+ randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
645
+ with gr.Column(scale=8):
646
+ with gr.Row():
647
+ with gr.Column(scale=0, min_width=50):
648
+ lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
649
+ with gr.Column(scale=3, min_width=100):
650
+ selected_info_1 = gr.Markdown("Select a LoRA 1")
651
+ with gr.Column(scale=5, min_width=50):
652
+ lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
653
+ with gr.Row():
654
+ remove_button_1 = gr.Button("Remove", size="sm")
655
+ with gr.Column(scale=8):
656
+ with gr.Row():
657
+ with gr.Column(scale=0, min_width=50):
658
+ lora_image_2 = gr.Image(label="LoRA 2 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
659
+ with gr.Column(scale=3, min_width=100):
660
+ selected_info_2 = gr.Markdown("Select a LoRA 2")
661
+ with gr.Column(scale=5, min_width=50):
662
+ lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
663
+ with gr.Row():
664
+ remove_button_2 = gr.Button("Remove", size="sm")
665
  with gr.Row():
666
  with gr.Column():
667
  selected_info = gr.Markdown("")
 
669
  [(item["image"], item["title"]) for item in loras],
670
  label="LoRA Gallery",
671
  allow_preview=False,
672
+ columns=5,
673
  elem_id="gallery"
674
  )
675
  with gr.Group():
676
+ with gr.Row(elem_id="custom_lora_structure"):
677
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="multimodalart/vintage-ads-flux", scale=3, min_width=150)
678
+ add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
679
+ remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
680
+ gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
681
  with gr.Column():
682
  progress_bar = gr.Markdown(elem_id="progress",visible=False)
683
  result = gr.Image(label="Generated Image", format="png", show_share_button=False)
 
693
  input_image_preprocess = gr.Checkbox(True, label="Preprocess Input image")
694
  with gr.Column():
695
  with gr.Row():
 
696
  width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
697
  height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
 
698
  cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
699
  steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
700
  with gr.Row():
 
703
  disable_model_cache = gr.Checkbox(False, label="Disable model caching")
704
  with gr.Accordion("External LoRA", open=True):
705
  with gr.Column():
706
+ deselect_lora_button = gr.Button("Remove External LoRAs", variant="secondary")
707
  lora_repo_json = gr.JSON(value=[{}] * num_loras, visible=False)
708
  lora_repo = [None] * num_loras
709
  lora_weights = [None] * num_loras
 
728
  lora_num[i] = gr.Number(i, visible=False)
729
  with gr.Accordion("From URL", open=True, visible=True):
730
  with gr.Row():
731
+ lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Flux.1 D", "Flux.1 S"], value=["Flux.1 D"])
732
+ lora_search_civitai_sort = gr.Radio(label="Sort", choices=["Highest Rated", "Most Downloaded", "Newest"], value="Most Downloaded")
733
+ lora_search_civitai_period = gr.Radio(label="Period", choices=["AllTime", "Year", "Month", "Week", "Day"], value="Month")
734
  with gr.Row():
735
  lora_search_civitai_query = gr.Textbox(label="Query", placeholder="flux", lines=1)
736
  lora_search_civitai_tag = gr.Textbox(label="Tag", lines=1)
 
767
 
768
  gallery.select(
769
  update_selection,
770
+ inputs=[selected_indices, loras_state, width, height],
771
+ outputs=[prompt, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2])
772
+ remove_button_1.click(
773
+ remove_lora_1,
774
+ inputs=[selected_indices, loras_state],
775
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
776
+ )
777
+ remove_button_2.click(
778
+ remove_lora_2,
779
+ inputs=[selected_indices, loras_state],
780
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
781
+ )
782
+ randomize_button.click(
783
+ randomize_loras,
784
+ inputs=[selected_indices, loras_state],
785
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, prompt]
786
  )
787
+ add_custom_lora_button.click(
788
  add_custom_lora,
789
+ inputs=[custom_lora, selected_indices, loras_state],
790
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
 
 
791
  )
792
+ remove_custom_lora_button.click(
793
  remove_custom_lora,
794
+ inputs=[selected_indices, loras_state],
795
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
 
796
  )
797
  gr.on(
798
  triggers=[generate_button.click, prompt.submit],
 
804
  trigger_mode="once",
805
  ).success(
806
  fn=run_lora,
807
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2,
808
+ randomize_seed, seed, width, height, loras_state, lora_repo_json, cn_on, auto_trans],
809
  outputs=[result, seed, progress_bar],
810
  queue=True,
811
  show_api=True,
812
  )
813
 
814
  input_image.upload(preprocess_i2i_image, [input_image, input_image_preprocess, height, width], [input_image], queue=False, show_api=False)
 
815
  gr.on(
816
  triggers=[model_name.change, cn_on.change],
817
  fn=get_t2i_model_info,
 
1029
  ]
1030
  )
1031
 
1032
+ with gr.Tab("PNG Info"):
1033
+ def extract_exif_data(image):
1034
+ if image is None: return ""
1035
+
1036
+ try:
1037
+ metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
1038
+
1039
+ for key in metadata_keys:
1040
+ if key in image.info:
1041
+ return image.info[key]
1042
+
1043
+ return str(image.info)
1044
+
1045
+ except Exception as e:
1046
+ return f"Error extracting metadata: {str(e)}"
1047
+
1048
+ with gr.Row():
1049
+ with gr.Column():
1050
+ image_metadata = gr.Image(label="Image with metadata", type="pil", sources=["upload"])
1051
+
1052
+ with gr.Column():
1053
+ result_metadata = gr.Textbox(label="Metadata", show_label=True, show_copy_button=True, interactive=False, container=True, max_lines=99)
1054
+
1055
+ image_metadata.change(
1056
+ fn=extract_exif_data,
1057
+ inputs=[image_metadata],
1058
+ outputs=[result_metadata],
1059
+ )
1060
+
1061
  description_ui()
1062
  gr.LoginButton()
1063
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
1064
 
 
1065
  app.queue()
1066
  app.launch()
custom.png ADDED
loras.json CHANGED
@@ -41,6 +41,12 @@
41
  "title": "Retro Comic",
42
  "trigger_position": "prepend"
43
  },
 
 
 
 
 
 
44
  {
45
  "image": "https://huggingface.co/renderartist/simplevectorflux/resolve/main/images/ComfyUI_09477_.jpeg",
46
  "title": "Simple Vector",
 
41
  "title": "Retro Comic",
42
  "trigger_position": "prepend"
43
  },
44
+ {
45
+ "image": "https://huggingface.co/glif/l0w-r3z/resolve/main/images/a19d658b-5d4c-45bc-9df6-f2bec54462a5.png",
46
+ "repo": "glif/l0w-r3z",
47
+ "trigger_word": ", l0w-r3z",
48
+ "title": "Low Res 3D"
49
+ },
50
  {
51
  "image": "https://huggingface.co/renderartist/simplevectorflux/resolve/main/images/ComfyUI_09477_.jpeg",
52
  "title": "Simple Vector",
mod.py CHANGED
@@ -58,21 +58,6 @@ def clear_cache():
58
  raise Exception(f"Cache clearing error: {e}") from e
59
 
60
 
61
- def deselect_lora():
62
- selected_index = None
63
- new_placeholder = "Type a prompt"
64
- updated_text = ""
65
- width = 1024
66
- height = 1024
67
- return (
68
- gr.update(placeholder=new_placeholder),
69
- updated_text,
70
- selected_index,
71
- width,
72
- height,
73
- )
74
-
75
-
76
  def get_repo_safetensors(repo_id: str):
77
  from huggingface_hub import HfApi
78
  api = HfApi()
@@ -284,7 +269,7 @@ def get_model_trigger(model_name: str):
284
  # https://github.com/huggingface/diffusers/issues/4919
285
  def fuse_loras(pipe, lorajson: list[dict]):
286
  try:
287
- if not lorajson or not isinstance(lorajson, list): return
288
  a_list = []
289
  w_list = []
290
  for d in lorajson:
@@ -292,20 +277,21 @@ def fuse_loras(pipe, lorajson: list[dict]):
292
  k = d["name"]
293
  if is_repo_name(k) and is_repo_exists(k):
294
  a_name = Path(k).stem
295
- pipe.load_lora_weights(k, weight_name=d["filename"], adapter_name = a_name)
296
  elif not Path(k).exists():
297
  print(f"LoRA not found: {k}")
298
  continue
299
  else:
300
  w_name = Path(k).name
301
  a_name = Path(k).stem
302
- pipe.load_lora_weights(k, weight_name = w_name, adapter_name = a_name)
303
  a_list.append(a_name)
304
  w_list.append(d["scale"])
305
- if not a_list: return
306
- pipe.set_adapters(a_list, adapter_weights=w_list)
307
- pipe.fuse_lora(adapter_names=a_list, lora_scale=1.0)
308
  #pipe.unload_lora_weights()
 
309
  except Exception as e:
310
  print(f"External LoRA Error: {e}")
311
  raise Exception(f"External LoRA Error: {e}") from e
@@ -315,6 +301,7 @@ def description_ui():
315
  gr.Markdown(
316
  """
317
  - Mod of [multimodalart/flux-lora-the-explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer),
 
318
  [jiuface/FLUX.1-dev-Controlnet-Union](https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union),
319
  [DamarJati/FLUX.1-DEV-Canny](https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny),
320
  [gokaygokay/FLUX-Prompt-Generator](https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator).
 
58
  raise Exception(f"Cache clearing error: {e}") from e
59
 
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  def get_repo_safetensors(repo_id: str):
62
  from huggingface_hub import HfApi
63
  api = HfApi()
 
269
  # https://github.com/huggingface/diffusers/issues/4919
270
  def fuse_loras(pipe, lorajson: list[dict]):
271
  try:
272
+ if not lorajson or not isinstance(lorajson, list): return [], []
273
  a_list = []
274
  w_list = []
275
  for d in lorajson:
 
277
  k = d["name"]
278
  if is_repo_name(k) and is_repo_exists(k):
279
  a_name = Path(k).stem
280
+ pipe.load_lora_weights(k, weight_name=d["filename"], adapter_name = a_name, low_cpu_mem_usage=True)
281
  elif not Path(k).exists():
282
  print(f"LoRA not found: {k}")
283
  continue
284
  else:
285
  w_name = Path(k).name
286
  a_name = Path(k).stem
287
+ pipe.load_lora_weights(k, weight_name = w_name, adapter_name = a_name, low_cpu_mem_usage=True)
288
  a_list.append(a_name)
289
  w_list.append(d["scale"])
290
+ if not a_list: return [], []
291
+ #pipe.set_adapters(a_list, adapter_weights=w_list)
292
+ #pipe.fuse_lora(adapter_names=a_list, lora_scale=1.0)
293
  #pipe.unload_lora_weights()
294
+ return a_list, w_list
295
  except Exception as e:
296
  print(f"External LoRA Error: {e}")
297
  raise Exception(f"External LoRA Error: {e}") from e
 
301
  gr.Markdown(
302
  """
303
  - Mod of [multimodalart/flux-lora-the-explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer),
304
+ [multimodalart/flux-lora-lab](https://huggingface.co/spaces/multimodalart/flux-lora-lab),
305
  [jiuface/FLUX.1-dev-Controlnet-Union](https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union),
306
  [DamarJati/FLUX.1-DEV-Canny](https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny),
307
  [gokaygokay/FLUX-Prompt-Generator](https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator).
prompts.csv ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ intelligence
2
+ "A raccoon wearing formal clothes, wearing a tophat and holding a cane. The raccoon is holding a garbage bag. Oil painting in the style of traditional Chinese painting."
3
+ a wood cabin
4
+ A tornado made of sharks crashing into a skyscraper. painting in the style of abstract cubism.
5
+ a beach with apartment buildings next to it
6
+ a cat coming through a cat door
7
+ an extreme close-up view of a capybara sitting in a field
8
+ a red cube on top of a blue cube
9
+ weeds in the cracks of a sidewalk
10
+ a smiling man with wavy brown hair and trimmed beard
11
+ A photo of an Athenian vase with a painting of pangolins playing tennis in the style of Egyptian hieroglyphics
12
+ a crowd of people watching fireworks by a park
13
+ a book with the words 'Don't Panic!' written on it
14
+ Portrait of a gecko wearing a train conductor’s hat and holding a flag that has a yin-yang symbol on it. Woodcut.
15
+ a violin next to an apple
16
+ a smiling man
17
+ a cricketer standing next to a wicket
18
+ a thumbnail image of an ice cream cone
19
+ a hamster dragon
20
+ "an old raccoon wearing a top hat and holding an apple, oil painting in the style of van gogh"
21
+ a traffic jam at Times Square
22
+ "a traffic jam at Times Square, with a cat on top of each car"
23
+ a glass of orange juice to the right of a plate with buttered toast on it
24
+ a clock tower
25
+ "Aerial view of downtown Manhattan, but with Millennium Wheel next to the Statue of Liberty. The Great Pyramid is on a sandy island near the buildings."
26
+ a pig in a field
27
+ "the door of knowing, a portal brightly opening the way through darkness. abstract anime landscape oil painting."
28
+ a sticker stuck in the middle of a stop sign
29
+ a cat jumping down from a wall
30
+ a man reading a book with a prism on its cover
31
+ "A DSLR photo of a shiny VW van that has a cityscape painted on it. A smiling sloth stands on grass in front of the van and is wearing a leather jacket, a cowboy hat, a kilt and a bowtie. The sloth is holding a quarterstaff and a big book."
32
+ a wall
33
+ A punk rock frog in a studded leather jacket shouting into a microphone while standing on a boulder
34
+ a red sports car on the road
35
+ A red bus is driving on the road
36
+ A photo of an Athenian vase with a painting of toucans playing tennis in the style of Egyptian hieroglyphics
37
+ a moose by a mountain stream
38
+ a coloring book page of a horse next to a stream
39
+ a coffee mug with an ankh symbol on it
40
+ three green glass bottles
41
+ a blue airplane taxiing on a runway with the sun behind it
42
+ the skyline of New York City
43
+ a present
44
+ a car with no windows
45
+ A tourist is looking at a whale using a binocular
46
+ a white plastic bench with a high arched back
47
+ element
48
+ food
49
+ a book cover
50
+ a series of musical notes
51
+ a large white yacht
52
+ a horse running in a field
53
+ "a statue of Abraham Lincoln wearing an opaque and shiny astronaut's helmet. The statue sits on the moon, with the planet Earth in the sky."
54
+ a volcano erupting near a small town
55
+ an ostrich
56
+ the finale of a fireworks display
57
+ Portrait of a tiger wearing a train conductor's hat and holding a skateboard that has a yin-yang symbol on it. Chinese ink and wash painting
58
+ a giant gorilla at the top of the Empire State Building
59
+ a drawing of the skyline of New York City
60
+ alien history
61
+ a red fire hydrant by a brick wall
62
+ an inflatable rabbit held up in the air by the geyser Old Faithful
63
+ A large city fountain that has milk instead of water. Several cats are leaning into the fountain.
64
+ a mixed media image with a photograph of a woman with long orange hair over a background that is a sketch of a city skyline
65
+ a cat patting a crystal ball with the number 7 written on it in black marker
66
+ Ha Long Bay
67
+ a comic about a friendly car in the city
68
+ a chimpanzee sitting on a wooden bench
69
+ space
70
+ A giraffe walking through a green grass covered field
71
+ "A set of 2x2 emoji icons with happy, angry, surprised and sobbing faces. The emoji icons look like pigs. All of the pigs are wearing crowns."
72
+ a person with arms like a tree branch
73
+ A small house in the wilderness
74
+ a fox
75
+ "Oil painting of a giant robot made of sushi, holding chopsticks."
76
+ a windmill
77
+ "The saying ""BE EXCELLENT TO EACH OTHER"" written in a stained glass window."
78
+ a watermelon chair
79
+ a drawing of a peaceful lakeside landscape
80
+ a thumbnail image of a person skiing
81
+ a nerdy bear wearing glasses and a bowtie
82
+ artificial intelligence
83
+ "a nerdy bear wearing glasses and a bowtie, realistic"
84
+ a green clock in the shape of a pentagon
85
+ a yellow box to the right of a blue sphere
86
+ two chairs
87
+ a woman looking at a house
88
+ an espresso machine
89
+ The trophy doesn't fit into the brown suitcase because it's too small
90
+ a volcano with lava pouring down its slopes
91
+ a laughing woman
92
+ a drawing of a pig face with an eye patch
93
+ a clock with no hands
94
+ a silver fire hydrant next to a sidewalk
95
+ a Tyrannosaurus Rex roaring in front of a palm tree
96
+ A bowl of Pho served with bean sprouts on top
97
+ a pick-up truck
98
+ "A raccoon wearing formal clothes, wearing a tophat and holding a cane. The raccoon is holding a garbage bag. Oil painting in the style of Rembrandt."
99
+ a girl diving into a pool
100
+ a monarch butterfly hatching from its chrysalis
101
+ a ceiling fan with four white blades
102
+ chair
103
+ A portrait of a metal statue of a pharaoh wearing steampunk glasses and a leather jacket over a white t-shirt that has a drawing of a space shuttle on it.
104
+ a horse reading a newspaper
105
+ A photo of a panda made of water.
106
+ three pickup trucks piled on top of each other
107
+ people packed on a double-decker bus
108
+ the grand canyon on a cloudy day
109
+ a pair of headphones on a pumpkin
110
+ A photo of a palm tree made of water.
111
+ view of a clock tower from below
112
+ "a robot painted as graffiti on a brick wall. a sidewalk is in front of the wall, and grass is growing out of cracks in the concrete."
113
+ G I G G L E painted in thick colorful lettering as graffiti on a faded red brick wall with a splotch of exploding white paint.
114
+ Sunset over the sea
115
+ a tiger in a forest
116
+ the city of London on Mars
117
+ the moon with a smiling face
118
+ a sword slicing through pouring milk
119
+ a boy and a tiger
120
+ a comic about a boy and a tiger
121
+ fairy cottage with smoke coming up chimney and a squirrel looking from the window
122
+ a flower with large red petals growing on the moon's surface
123
+ a woman using a sledgehammer to smash an ice sculpture of a goose
124
+ a kangaroo jumping through the park
125
+ a lovestruck cup of boba
126
+ a tennis court with tennis balls scattered all over it
127
+ a family
128
+ a man pouring milk into a coffee cup to make a latte with a beatiful design
129
+ a field with ten massive modern windmills
130
+ a cartoon of a cow jumping over the moon
131
+ the flag of the United Kingdom painted in rusty corrugated iron
132
+ a mouse sitting next to a computer mouse
133
+ a white cat with black ears and markings
134
+ "Two cups of coffee, one with latte art of yin yang symbol. The other has latte art of a heart."
135
+ matching socks with cute cats on them
136
+ "Renaissance portrayals of the Virgin Mary, seated in a loggia. Behind her is a hazy and seemingly isolated landscape imagined by the artist and painted using sfumato."
137
+ a coffee table with a magazine on it
138
+ A heart made of cookie
139
+ a penguin standing on a sidewalk
140
+ "bismuth crystals, intricate fractal pattern"
141
+ "chaotic attractor, multicolored neon"
142
+ "fractal heart pattern, pink blue and white"
143
+ chaotic system interpretation of polyamory
144
+ high dimensional topology of latent spaces
145
+ 300 movie titles
146
+ a man with puppet that looks like a king
147
+ a group of skiers are preparing to walk up a sand dune
148
+ a blue t-shirt with a dinosaur on it
149
+ a king salmon
150
+ an Egyptian statue in the desert
151
+ a moose standing over a fox
152
+ A bowl of soup that looks like a monster spray-painted on a wall
153
+ a shih-tzu dog
154
+ a pirate ship
155
+ early bird and night owl
156
+ a taxi
157
+ the eyes of an owl
158
+ graffiti of a rocket ship on a brick wall
159
+ a musical note
160
+ a small airplane flying over rolling hills
161
+ a tiny football in front of three yellow tennis balls
162
+ a cute illustration of a horned owl with a graduation cap and diploma
163
+ a teddy bear to the right of a toy car
164
+ a map of Australia
165
+ an eagle
166
+ a roast turkey on the table
167
+ a store front
168
+ a map of Manhattan
169
+ a portrait of a postal worker who has forgotten their mailbag
170
+ The Statue of Liberty with the Manhattan skyline in the background.
171
+ a bottle of beer next to an ashtray with a half-smoked cigarrette
172
+ a monarch butterfly
173
+ a large blue box
174
+ a car with tires that have yellow rims
175
+ A funny Rube Goldberg machine made out of metal
176
+ a photograph of a fiddle next to a basketball on a ping pong table
177
+ A bowl of Chicken Pho
178
+ view of a giraffe and a zebra in the middle of a field
179
+ a sunken submarine at the bottom of the ocean
180
+ two wine bottles
181
+ a turtle upside down and spinning on its shell
182
+ a painting of a fox in the style of starry night
183
+ a poodle wearing a baseball cap holding a dictionary in hand and writing bonez on a chalkboard
184
+ a stained glass window of a panda eating bamboo
185
+ a blue cow is standing next to a tree with red leaves and yellow fruit. the cow is standing in a field with white flowers. impressionistic painting.
186
+ "an ornate, high-backed mahogany chair with a red cushion"
187
+ the hands of a single person holding a basketball
188
+ a snail
189
+ a man with puppet
190
+ a pickup truck with a horse on its left and two dogs on its right
191
+ an airplane flying into a cloud that looks like monster
192
+ 7 dogs sitting around a poker table
193
+ a racoon detective using a microscope while riding in a train
194
+ a kids' book cover with an illustration of white dog driving a red pickup truck
195
+ a woman with long hair next to a luminescent bird
196
+ a boat
197
+ a high-quality oil painting of a psychedelic hamster dragon
198
+ age of empires
199
+ Downtown Austin at sunrise. detailed ink wash.
200
+ a pile of cash on a stone floor
201
+ A helicopter flies over the Arches National Park.
202
+ the word 'START' written on a street surface
203
+ a tennis court with three yellow cones on it
204
+ "A rabbit checks its watch, and so does a gecko."
205
+ a close-up of a bloody mary cocktail
206
+ a view of the Kremlin on a sunny day
207
+ The Oriental Pearl in oil painting
208
+ Tibetan priests ringing a bell
209
+ Portrait of a tiger wearing a train conductor's hat and holding a skateboard that has a yin-yang symbol on it. charcoal sketch
210
+ a cat sitting in a car seat
211
+ a selfie of an old man with a white beard
212
+ "A green sign that says ""Very Deep Learning"" and is at the edge of the Grand Canyon."
213
+ an octopus
214
+ a zebra
215
+ "a warrior, golden army"
216
+ a triangle with a smiling face
217
+ "Pop art, bold colors, mass culture, commercial techniques, screen printing, repetition, consumerism imagery, iconic portraits, flat imagery, irony, everyday objects"
218
+ "Minimalist, conceptual, geometric, bold, abstract, systematic, wall drawings, instructions-based, modular forms, repetitive patterns, color-blocks, line variations, innovative, influential"
219
+ "Conceptual, mixed-media, appropriation, text-and-image, irony, photo-collage, bold colors, visual narrative, humorous, minimalist, innovative, montage, semiotics, critique, pop culture, fragmented, juxtaposition"
220
+ "Feminist, avant-garde, body art, performance art, video art, provocative, conceptual, radical, multimedia, body politics, identity exploration, societal critique"
221
+ "In the style of Rembrandt, this portrait on canvas captures a man seated in a dimly lit space, his gaze introspective and laden with experience. His clothing is of a rich, textured fabric, suggesting a life of complexity and depth. The sparse background emphasizes his solitary figure, highlighting the contemplative mood and the dignified simplicity of his demeanor. The overall atmosphere is one of introspection and timeless wisdom."
222
+ "East-West fusion, trans-cultural, dream-like, nature-inspired, ethereal, organic forms, delicate colors, figurative-abstract blend, poetic, intimate, evolving, mystical, feminine perspective"
223
+ "17th century, Rembrandt Harmensz van Rijn, Baroque art, oil on canvas, half-body portrait, solitary woman, neutral expression, indoor scene, warm lighting, detailed drapery, velvet dress, gold embroidery, pearl necklace, lace collar, ornate jewelry, realistic skin tones, fine hair details, soft shadows, meticulous brushwork, classical pose, timeless elegance, artistic mastery, cultural significance, visual narrative."
224
+ "Rembrandt Harmensz van Rijn, 17th century Dutch Golden Age, oil on canvas, half-body portrait, female figure, neutral facial expression, elaborate Renaissance attire, rich velvet dress with intricate embroidery, lace collar, pearl necklace, ornate headdress adorned with jewels, seated pose, hands resting on a table, dark background with soft lighting, realistic style, fine brushwork, attention to detail in textures and fabrics, historical context, cultural significance."
225
+ "Rembrandt Harmensz. van Rijn, oil on canvas, captures a distinguished gentleman in a moment of quiet reflection. He is attired in a sumptuous black velvet cloak with a high white collar, the textures rendered with precise detail to showcase the contrast between the soft velvet and crisp linen. The subtle play of light illuminates his contemplative expression and highlights the gentle grip of his hand on a leather-bound journal, hinting at a life of intellectual pursuits. The background is a soft interplay of shadows, enhancing the intimate and introspective mood of the portrait."
226
+ "Minimalist, large-scale, industrial materials, site-specific, immersive, abstract, sculptural, steel-focused, gravity-based, experiential, curved forms, monumental, spatial exploration, environmental interaction, tactile"
227
+ "Minimalist, conceptual, postmodern, geometric abstraction, color-block, multi-disciplinary, playful irony, spatial intervention, typographic, avant-garde, video art, performance elements"
228
+ "Abstract, minimalism, expressionism, bold colors, geometric shapes, large-scale works, mixed media, architectural elements, contemporary, post-modern, layered textures"
229
+ "Environmental art, large-scale installations, fabric-wrapped structures, public spaces, temporary works, conceptual, immersive experiences, land art, transformative, outdoor"
230
+ "found-object assemblage, Eat Art, Nouveau Réalisme, tableaux traps, everyday objects, collage, three-dimensional, playful, ironic, conceptual, kinetic, interactive, mixed media"
231
+ a submarine floating past a shark
232
+ "Photomontage, feminist, critical, conceptual, socio-political, collage, video art, performance, domestic sphere, anti-war, satire, urban life, activist, multimedia, appropriation"
233
+ "Abstract expressionism, scribbles, calligraphy, graffiti, child-like, spontaneous, emotional, textured, layered, historical references, poetic, minimal color, gestural, large-scale canvases"
234
+ "Contemporary, provocative, conceptual, mixed-media, death-themed, colorful, controversial, pharmaceutical motifs, formaldehyde specimens, spot paintings, spin art, luxury, critique of consumerism, installation art."
235
+ a room
236
+ "minimalism, color fields, abstract, geometric shapes, bold simplicity, hard edges, monochromes, vibrant, non-figurative, spatial relationships, innovative, pure forms"
237
+ "Photographic narrative, African-American experience, critical race exploration, feminism, cultural identity, social justice, mixed media, evocative, poignant, reflective, storytelling, human condition"
238
+ a Harley-Davidson motorcycle with a flame decal
239
+ an ornate jewel-encrusted key
240
+ a woman running on a trail
241
+ a giraffe with a funny face
242
+ square blue apples on a tree with circular yellow leaves
requirements.txt CHANGED
@@ -1,9 +1,9 @@
1
- spaces
2
  torch
3
- git+https://github.com/huggingface/diffusers@aa73072f1f7014635e3de916cbcf47858f4c37a0
4
  transformers
5
- peft
6
  sentencepiece
 
7
  torchvision
8
  huggingface_hub
9
  timm
 
 
1
  torch
2
+ git+https://github.com/huggingface/diffusers.git@1131e3d04e3131f4c24565257665d75364d696d9
3
  transformers
4
+ git+https://github.com/huggingface/peft.git
5
  sentencepiece
6
+ spaces==0.30.3
7
  torchvision
8
  huggingface_hub
9
  timm