Spaces:
Runtime error
Runtime error
ClaireOzzz
commited on
Commit
•
9028614
1
Parent(s):
c069efa
Update app.py
Browse files
app.py
CHANGED
@@ -386,8 +386,8 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
386 |
|
387 |
load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
|
388 |
image_in = gr.Image(sources="upload", type="filepath", value=( "shop1.jpg"))
|
389 |
-
gr.Examples(
|
390 |
-
|
391 |
|
392 |
|
393 |
with gr.Column(elem_id="col-container"):
|
@@ -402,7 +402,7 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
402 |
# with gr.Group():
|
403 |
prompt = gr.Textbox(label="Prompt", placeholder="Add your trigger word here + prompt")
|
404 |
|
405 |
-
with gr.Accordion(label="Advanced Options", open=False):
|
406 |
# with gr.Group():
|
407 |
negative_prompt = gr.Textbox(label="Negative prompt", value="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured")
|
408 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=8.8)
|
@@ -428,7 +428,467 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
428 |
# label = gr.Label(label="Loader")
|
429 |
# submit_btn.click(infer, outputs=[label])
|
430 |
|
431 |
-
result = gr.Image(label="Result", visible=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
432 |
|
433 |
use_custom_model.change(
|
434 |
fn = check_use_custom_or_no,
|
|
|
386 |
|
387 |
load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
|
388 |
image_in = gr.Image(sources="upload", type="filepath", value=( "shop1.jpg"))
|
389 |
+
# gr.Examples(
|
390 |
+
# examples=[[os.path.join(os.path.dirname(__file__), "shop2.jpg")],[os.path.join(os.path.dirname(__file__), "shop3.jpg")]], inputs=im)
|
391 |
|
392 |
|
393 |
with gr.Column(elem_id="col-container"):
|
|
|
402 |
# with gr.Group():
|
403 |
prompt = gr.Textbox(label="Prompt", placeholder="Add your trigger word here + prompt")
|
404 |
|
405 |
+
with gr.Accordion(label="Advanced Options", open=False, visible=False):
|
406 |
# with gr.Group():
|
407 |
negative_prompt = gr.Textbox(label="Negative prompt", value="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured")
|
408 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=8.8)
|
|
|
428 |
# label = gr.Label(label="Loader")
|
429 |
# submit_btn.click(infer, outputs=[label])
|
430 |
|
431 |
+
result = gr.Image(label="Result", visible=False)
|
432 |
+
|
433 |
+
use_custom_model.change(
|
434 |
+
fn = check_use_custom_or_no,
|
435 |
+
inputs =[use_custom_model],
|
436 |
+
outputs = [custom_model_box],
|
437 |
+
queue = False
|
438 |
+
)
|
439 |
+
custom_model.blur(
|
440 |
+
fn=custom_model_changed,
|
441 |
+
inputs = [custom_model, previous_model],
|
442 |
+
outputs = [model_status],
|
443 |
+
queue = False
|
444 |
+
)
|
445 |
+
load_model_btn.click(
|
446 |
+
fn = load_model,
|
447 |
+
inputs=[custom_model],
|
448 |
+
outputs = [previous_model, model_status, weight_name, trigger_word],
|
449 |
+
queue = False
|
450 |
+
)
|
451 |
+
submit_btn.click(
|
452 |
+
fn = infer,
|
453 |
+
inputs = [use_custom_model,custom_model, weight_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, inf_steps, seed],
|
454 |
+
outputs = [result, last_used_seed]
|
455 |
+
)
|
456 |
+
|
457 |
+
|
458 |
+
# return demo
|
459 |
+
|
460 |
+
|
461 |
+
demo.queue().launch(share=True)import os
|
462 |
+
# os.system('pip install pip==23.3.0')
|
463 |
+
# os.system('pip uninstall spaces -y')
|
464 |
+
# os.system('pip install spaces==0.18.0')
|
465 |
+
# os.system('pip install gradio==4.0.2')
|
466 |
+
|
467 |
+
|
468 |
+
import gradio as gr
|
469 |
+
from huggingface_hub import login, HfFileSystem, HfApi, ModelCard
|
470 |
+
import os
|
471 |
+
import spaces
|
472 |
+
import random
|
473 |
+
import torch
|
474 |
+
|
475 |
+
from transformers import GLPNFeatureExtractor, GLPNForDepthEstimation
|
476 |
+
from transformers import AutoFeatureExtractor, AutoModelForDepthEstimation
|
477 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained("Intel/dpt-large")
|
478 |
+
modeld = AutoModelForDepthEstimation.from_pretrained("Intel/dpt-large")
|
479 |
+
|
480 |
+
# from depthGAN.app import create_visual_demo
|
481 |
+
|
482 |
+
is_shared_ui = False
|
483 |
+
hf_token = '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'
|
484 |
+
login(token=hf_token)
|
485 |
+
|
486 |
+
fs = HfFileSystem(token=hf_token)
|
487 |
+
api = HfApi()
|
488 |
+
|
489 |
+
device="cuda" if torch.cuda.is_available() else "cpu"
|
490 |
+
|
491 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
|
492 |
+
from diffusers.utils import load_image
|
493 |
+
from PIL import Image
|
494 |
+
import torch
|
495 |
+
import numpy as np
|
496 |
+
import cv2
|
497 |
+
|
498 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
499 |
+
|
500 |
+
controlnet = ControlNetModel.from_pretrained(
|
501 |
+
"diffusers/controlnet-canny-sdxl-1.0",
|
502 |
+
torch_dtype=torch.float16
|
503 |
+
)
|
504 |
+
|
505 |
+
def check_use_custom_or_no(value):
|
506 |
+
if value is True:
|
507 |
+
return gr.update(visible=True)
|
508 |
+
else:
|
509 |
+
return gr.update(visible=False)
|
510 |
+
|
511 |
+
def get_files(file_paths):
|
512 |
+
last_files = {} # Dictionary to store the last file for each path
|
513 |
+
|
514 |
+
for file_path in file_paths:
|
515 |
+
# Split the file path into directory and file components
|
516 |
+
directory, file_name = file_path.rsplit('/', 1)
|
517 |
+
|
518 |
+
# Update the last file for the current path
|
519 |
+
last_files[directory] = file_name
|
520 |
+
|
521 |
+
# Extract the last files from the dictionary
|
522 |
+
result = list(last_files.values())
|
523 |
+
|
524 |
+
return result
|
525 |
+
|
526 |
+
def load_model(model_name):
|
527 |
+
|
528 |
+
if model_name == "":
|
529 |
+
gr.Warning("If you want to use a private model, you need to duplicate this space on your personal account.")
|
530 |
+
raise gr.Error("You forgot to define Model ID.")
|
531 |
+
|
532 |
+
# Get instance_prompt a.k.a trigger word
|
533 |
+
card = ModelCard.load(model_name)
|
534 |
+
repo_data = card.data.to_dict()
|
535 |
+
instance_prompt = repo_data.get("instance_prompt")
|
536 |
+
|
537 |
+
if instance_prompt is not None:
|
538 |
+
print(f"Trigger word: {instance_prompt}")
|
539 |
+
else:
|
540 |
+
instance_prompt = "no trigger word needed"
|
541 |
+
print(f"Trigger word: no trigger word needed")
|
542 |
+
|
543 |
+
# List all ".safetensors" files in repo
|
544 |
+
sfts_available_files = fs.glob(f"{model_name}/*safetensors")
|
545 |
+
sfts_available_files = get_files(sfts_available_files)
|
546 |
+
|
547 |
+
if sfts_available_files == []:
|
548 |
+
sfts_available_files = ["NO SAFETENSORS FILE"]
|
549 |
+
|
550 |
+
print(f"Safetensors available: {sfts_available_files}")
|
551 |
+
|
552 |
+
return model_name, "Model Ready", gr.update(choices=sfts_available_files, value=sfts_available_files[0], visible=True), gr.update(value=instance_prompt, visible=True)
|
553 |
+
|
554 |
+
def custom_model_changed(model_name, previous_model):
|
555 |
+
if model_name == "" and previous_model == "" :
|
556 |
+
status_message = ""
|
557 |
+
elif model_name != previous_model:
|
558 |
+
status_message = "model changed, please reload before any new run"
|
559 |
+
else:
|
560 |
+
status_message = "model ready"
|
561 |
+
return status_message
|
562 |
+
|
563 |
+
def resize_image(input_path, output_path, target_height):
|
564 |
+
# Open the input image
|
565 |
+
img = Image.open(input_path)
|
566 |
+
|
567 |
+
# Calculate the aspect ratio of the original image
|
568 |
+
original_width, original_height = img.size
|
569 |
+
original_aspect_ratio = original_width / original_height
|
570 |
+
|
571 |
+
# Calculate the new width while maintaining the aspect ratio and the target height
|
572 |
+
new_width = int(target_height * original_aspect_ratio)
|
573 |
+
|
574 |
+
# Resize the image while maintaining the aspect ratio and fixing the height
|
575 |
+
img = img.resize((new_width, target_height), Image.LANCZOS)
|
576 |
+
|
577 |
+
# Save the resized image
|
578 |
+
img.save(output_path)
|
579 |
+
|
580 |
+
return output_path
|
581 |
+
|
582 |
+
def predict(image):
|
583 |
+
inputs = feature_extractor(images=image, return_tensors="pt")
|
584 |
+
with torch.no_grad():
|
585 |
+
outputs = modeld(**inputs)
|
586 |
+
predicted_depth = outputs.predicted_depth
|
587 |
+
# interpolate to original size
|
588 |
+
prediction = torch.nn.functional.interpolate(
|
589 |
+
predicted_depth.unsqueeze(1),
|
590 |
+
size=image.size[::-1],
|
591 |
+
mode="bicubic",
|
592 |
+
align_corners=False,
|
593 |
+
)
|
594 |
+
# visualize the prediction
|
595 |
+
output = prediction.squeeze().cpu().numpy()
|
596 |
+
formatted = (output * 255 / np.max(output)).astype("uint8")
|
597 |
+
depth_image = Image.fromarray(formatted)
|
598 |
+
depth_image.save(f"depth.png")
|
599 |
+
return depth_image
|
600 |
+
|
601 |
+
|
602 |
+
@spaces.GPU
|
603 |
+
def infer(use_custom_model, model_name, weight_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, inf_steps, seed, progress=gr.Progress(track_tqdm=True)):
|
604 |
+
|
605 |
+
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
606 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
607 |
+
controlnet=controlnet,
|
608 |
+
vae=vae,
|
609 |
+
torch_dtype=torch.float16,
|
610 |
+
variant="fp16",
|
611 |
+
use_safetensors=True
|
612 |
+
)
|
613 |
+
|
614 |
+
pipe.to(device)
|
615 |
+
|
616 |
+
prompt = prompt
|
617 |
+
negative_prompt = negative_prompt
|
618 |
+
|
619 |
+
if seed < 0 :
|
620 |
+
seed = random.randint(0, 423538377342)
|
621 |
+
|
622 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
623 |
+
|
624 |
+
if image_in == None:
|
625 |
+
raise gr.Error("You forgot to upload a source image.")
|
626 |
+
|
627 |
+
image_in = resize_image(image_in, "resized_input.jpg", 1024)
|
628 |
+
|
629 |
+
if preprocessor == "canny":
|
630 |
+
|
631 |
+
image = load_image(image_in)
|
632 |
+
|
633 |
+
image = np.array(image)
|
634 |
+
image = cv2.Canny(image, 100, 200)
|
635 |
+
image = image[:, :, None]
|
636 |
+
image = np.concatenate([image, image, image], axis=2)
|
637 |
+
image = Image.fromarray(image)
|
638 |
+
|
639 |
+
if use_custom_model:
|
640 |
+
|
641 |
+
if model_name == "":
|
642 |
+
raise gr.Error("you forgot to set a custom model name.")
|
643 |
+
|
644 |
+
custom_model = model_name
|
645 |
+
|
646 |
+
# This is where you load your trained weights
|
647 |
+
if weight_name == "NO SAFETENSORS FILE":
|
648 |
+
pipe.load_lora_weights(
|
649 |
+
custom_model,
|
650 |
+
low_cpu_mem_usage = True,
|
651 |
+
use_auth_token = True
|
652 |
+
)
|
653 |
+
|
654 |
+
else:
|
655 |
+
pipe.load_lora_weights(
|
656 |
+
custom_model,
|
657 |
+
weight_name = weight_name,
|
658 |
+
low_cpu_mem_usage = True,
|
659 |
+
use_auth_token = True
|
660 |
+
)
|
661 |
+
|
662 |
+
lora_scale=custom_lora_weight
|
663 |
+
|
664 |
+
images = pipe(
|
665 |
+
prompt,
|
666 |
+
negative_prompt=negative_prompt,
|
667 |
+
image=image,
|
668 |
+
controlnet_conditioning_scale=float(controlnet_conditioning_scale),
|
669 |
+
guidance_scale = float(guidance_scale),
|
670 |
+
num_inference_steps=inf_steps,
|
671 |
+
generator=generator,
|
672 |
+
cross_attention_kwargs={"scale": lora_scale}
|
673 |
+
).images
|
674 |
+
else:
|
675 |
+
images = pipe(
|
676 |
+
prompt,
|
677 |
+
negative_prompt=negative_prompt,
|
678 |
+
image=image,
|
679 |
+
controlnet_conditioning_scale=float(controlnet_conditioning_scale),
|
680 |
+
guidance_scale = float(guidance_scale),
|
681 |
+
num_inference_steps=inf_steps,
|
682 |
+
generator=generator,
|
683 |
+
).images
|
684 |
+
|
685 |
+
images[0].save(f"result.png")
|
686 |
+
print("HELP")
|
687 |
+
predict(images[0])
|
688 |
+
# create_visual_demo();
|
689 |
+
return f"result.png", seed
|
690 |
+
|
691 |
+
|
692 |
+
css="""
|
693 |
+
.{
|
694 |
+
height: 20%;
|
695 |
+
}
|
696 |
+
#col-container{
|
697 |
+
margin: 0 auto;
|
698 |
+
max-width: 720px;
|
699 |
+
text-align: left;
|
700 |
+
}
|
701 |
+
div#warning-duplicate {
|
702 |
+
background-color: #ebf5ff;
|
703 |
+
padding: 0 10px 5px;
|
704 |
+
margin: 20px 0;
|
705 |
+
}
|
706 |
+
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
|
707 |
+
color: #0f4592!important;
|
708 |
+
}
|
709 |
+
div#warning-duplicate strong {
|
710 |
+
color: #0f4592;
|
711 |
+
}
|
712 |
+
p.actions {
|
713 |
+
display: flex;
|
714 |
+
align-items: center;
|
715 |
+
margin: 20px 0;
|
716 |
+
}
|
717 |
+
div#warning-duplicate .actions a {
|
718 |
+
display: inline-block;
|
719 |
+
margin-right: 10px;
|
720 |
+
}
|
721 |
+
button#load_model_btn{
|
722 |
+
height: 45px !important;
|
723 |
+
border: none;
|
724 |
+
background-color: #99F6E4; !important;
|
725 |
+
border-radius: 10px !important;
|
726 |
+
padding: 10px !important;
|
727 |
+
cursor: pointer;
|
728 |
+
display: block;
|
729 |
+
position: relative;
|
730 |
+
top: -20px;
|
731 |
+
z-index: 100;
|
732 |
+
}
|
733 |
+
#status_info{
|
734 |
+
font-size: 0.9em;
|
735 |
+
}
|
736 |
+
"""
|
737 |
+
|
738 |
+
theme = gr.themes.Soft(
|
739 |
+
primary_hue="teal",
|
740 |
+
secondary_hue="gray",
|
741 |
+
).set(
|
742 |
+
body_text_color_dark='*neutral_800',
|
743 |
+
background_fill_primary_dark='*neutral_50',
|
744 |
+
background_fill_secondary_dark='*neutral_50',
|
745 |
+
border_color_accent_dark='*primary_300',
|
746 |
+
border_color_primary_dark='*neutral_200',
|
747 |
+
color_accent_soft_dark='*neutral_50',
|
748 |
+
link_text_color_dark='*secondary_600',
|
749 |
+
link_text_color_active_dark='*secondary_600',
|
750 |
+
link_text_color_hover_dark='*secondary_700',
|
751 |
+
link_text_color_visited_dark='*secondary_500',
|
752 |
+
code_background_fill_dark='*neutral_100',
|
753 |
+
shadow_spread_dark='6px',
|
754 |
+
block_background_fill_dark='white',
|
755 |
+
block_label_background_fill_dark='*primary_100',
|
756 |
+
block_label_text_color_dark='*primary_500',
|
757 |
+
block_title_text_color_dark='*primary_500',
|
758 |
+
checkbox_background_color_dark='*background_fill_primary',
|
759 |
+
checkbox_background_color_selected_dark='*primary_600',
|
760 |
+
checkbox_border_color_dark='*neutral_100',
|
761 |
+
checkbox_border_color_focus_dark='*primary_500',
|
762 |
+
checkbox_border_color_hover_dark='*neutral_300',
|
763 |
+
checkbox_border_color_selected_dark='*primary_600',
|
764 |
+
checkbox_label_background_fill_selected_dark='*primary_500',
|
765 |
+
checkbox_label_text_color_selected_dark='white',
|
766 |
+
error_background_fill_dark='#fef2f2',
|
767 |
+
error_border_color_dark='#b91c1c',
|
768 |
+
error_text_color_dark='#b91c1c',
|
769 |
+
error_icon_color_dark='#b91c1c',
|
770 |
+
input_background_fill_dark='white',
|
771 |
+
input_background_fill_focus_dark='*secondary_500',
|
772 |
+
input_border_color_dark='*neutral_50',
|
773 |
+
input_border_color_focus_dark='*secondary_300',
|
774 |
+
input_placeholder_color_dark='*neutral_400',
|
775 |
+
slider_color_dark='*primary_500',
|
776 |
+
stat_background_fill_dark='*primary_300',
|
777 |
+
table_border_color_dark='*neutral_300',
|
778 |
+
table_even_background_fill_dark='white',
|
779 |
+
table_odd_background_fill_dark='*neutral_50',
|
780 |
+
button_primary_background_fill_dark='*primary_500',
|
781 |
+
button_primary_background_fill_hover_dark='*primary_400',
|
782 |
+
button_primary_border_color_dark='*primary_00',
|
783 |
+
button_secondary_background_fill_dark='whiite',
|
784 |
+
button_secondary_background_fill_hover_dark='*neutral_100',
|
785 |
+
button_secondary_border_color_dark='*neutral_200',
|
786 |
+
button_secondary_text_color_dark='*neutral_800'
|
787 |
+
)
|
788 |
+
|
789 |
+
#examples = [["examples/" + img] for img in os.listdir("examples/")]
|
790 |
+
im = gr.Image(visible=False)
|
791 |
+
|
792 |
+
with gr.Blocks(theme=theme, css=css) as demo:
|
793 |
+
with gr.Row():
|
794 |
+
with gr.Column(elem_id="col-container"):
|
795 |
+
|
796 |
+
gr.HTML("""
|
797 |
+
<h2 style="text-align: left;">Choose a Style</h2>
|
798 |
+
<p style="text-align: left;">Our Pretrained Models can be found on Huggingface</p>
|
799 |
+
""")
|
800 |
+
|
801 |
+
use_custom_model = gr.Checkbox(label="Use a custom pre-trained LoRa model ? (optional)", visible = False, value=False, info="To use a private model, you'll need to duplicate the space with your own access token.")
|
802 |
+
|
803 |
+
with gr.Blocks(visible=False) as custom_model_box:
|
804 |
+
with gr.Row():
|
805 |
+
with gr.Column():
|
806 |
+
if not is_shared_ui:
|
807 |
+
your_username = api.whoami()["name"]
|
808 |
+
my_models = api.list_models(author=your_username, filter=["diffusers", "stable-diffusion-xl", 'lora'])
|
809 |
+
model_names = [item.modelId for item in my_models]
|
810 |
+
|
811 |
+
if not is_shared_ui:
|
812 |
+
custom_model = gr.Dropdown(
|
813 |
+
label = "Your custom model ID",
|
814 |
+
info="You can pick one of your private models",
|
815 |
+
choices = model_names,
|
816 |
+
allow_custom_value = True
|
817 |
+
#placeholder = "username/model_id"
|
818 |
+
)
|
819 |
+
else:
|
820 |
+
custom_model = gr.Textbox(
|
821 |
+
label="Your custom model ID",
|
822 |
+
placeholder="your_username/your_trained_model_name",
|
823 |
+
info="Make sure your model is set to PUBLIC"
|
824 |
+
)
|
825 |
+
|
826 |
+
weight_name = gr.Dropdown(
|
827 |
+
label="Safetensors file",
|
828 |
+
#value="pytorch_lora_weights.safetensors",
|
829 |
+
info="specify which one if model has several .safetensors files",
|
830 |
+
allow_custom_value=True,
|
831 |
+
visible = False
|
832 |
+
)
|
833 |
+
with gr.Column():
|
834 |
+
with gr.Group():
|
835 |
+
# load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
|
836 |
+
previous_model = gr.Textbox(
|
837 |
+
visible = False
|
838 |
+
)
|
839 |
+
|
840 |
+
model_status = gr.Textbox(
|
841 |
+
label = "model status",
|
842 |
+
show_label = False,
|
843 |
+
elem_id = "status_info"
|
844 |
+
)
|
845 |
+
trigger_word = gr.Textbox(label="Trigger word", interactive=False, visible=False)
|
846 |
+
|
847 |
+
load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
|
848 |
+
image_in = gr.Image(sources="upload", type="filepath", value=( "shop1.jpg"))
|
849 |
+
# gr.Examples(
|
850 |
+
# examples=[[os.path.join(os.path.dirname(__file__), "shop2.jpg")],[os.path.join(os.path.dirname(__file__), "shop3.jpg")]], inputs=im)
|
851 |
+
|
852 |
+
|
853 |
+
with gr.Column(elem_id="col-container"):
|
854 |
+
gr.HTML("""
|
855 |
+
<h2 style="text-align: left;">Input a Prompt!</h2>
|
856 |
+
<p style="text-align: left;">Negative prompts and other settings can be found in advanced options</p>
|
857 |
+
""")
|
858 |
+
|
859 |
+
with gr.Row():
|
860 |
+
|
861 |
+
with gr.Column():
|
862 |
+
# with gr.Group():
|
863 |
+
prompt = gr.Textbox(label="Prompt", placeholder="Add your trigger word here + prompt")
|
864 |
+
|
865 |
+
with gr.Accordion(label="Advanced Options", open=False, visible=False):
|
866 |
+
# with gr.Group():
|
867 |
+
negative_prompt = gr.Textbox(label="Negative prompt", value="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured")
|
868 |
+
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=8.8)
|
869 |
+
inf_steps = gr.Slider(label="Inference Steps", minimum="25", maximum="50", step=1, value=25)
|
870 |
+
custom_lora_weight = gr.Slider(label="Custom model weights", minimum=0.1, maximum=0.9, step=0.1, value=0.7)
|
871 |
+
preprocessor = gr.Dropdown(label="Preprocessor", choices=["canny"], value="canny", interactive=False, info="For the moment, only canny is available")
|
872 |
+
controlnet_conditioning_scale = gr.Slider(label="Controlnet conditioning Scale", minimum=0.1, maximum=0.9, step=0.01, value=0.3)
|
873 |
+
seed = gr.Slider(
|
874 |
+
label="Seed",
|
875 |
+
info = "-1 denotes a random seed",
|
876 |
+
minimum=-1,
|
877 |
+
maximum=423538377342,
|
878 |
+
step=1,
|
879 |
+
value=-1
|
880 |
+
)
|
881 |
+
last_used_seed = gr.Number(
|
882 |
+
label = "Last used seed",
|
883 |
+
info = "the seed used in the last generation",
|
884 |
+
)
|
885 |
+
|
886 |
+
submit_btn = gr.Button("Submit")
|
887 |
+
|
888 |
+
# label = gr.Label(label="Loader")
|
889 |
+
# submit_btn.click(infer, outputs=[label])
|
890 |
+
|
891 |
+
result = gr.Image(label="Result", visible=False)
|
892 |
|
893 |
use_custom_model.change(
|
894 |
fn = check_use_custom_or_no,
|