File size: 1,898 Bytes
87d91a7
2419492
 
 
 
 
 
3651eaa
 
 
 
 
 
2419492
3651eaa
 
 
 
 
 
 
 
 
 
 
 
2419492
 
 
 
 
 
3651eaa
 
932ef9c
83f75b0
 
c058625
71e48de
c058625
 
 
 
 
 
 
 
 
 
 
 
 
71e48de
83f75b0
71e48de
c058625
 
 
 
 
 
 
3651eaa
c058625
 
 
 
 
3651eaa
c058625
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
from huggingface_hub import login
import os

hf_token = os.environ.get("HF_TOKEN")
login(token=hf_token)

from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
from diffusers.utils import load_image
from PIL import Image
import torch
import numpy as np
import cv2


controlnet = ControlNetModel.from_pretrained(
    "diffusers/controlnet-canny-sdxl-1.0",
    torch_dtype=torch.float16
)
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
    "stabilityai/stable-diffusion-xl-base-1.0",
    controlnet=controlnet,
    vae=vae,
    torch_dtype=torch.float16,
)

custom_model = "fffiloni/eugene_jour_general"

# This is where you load your trained weights
pipe.load_lora_weights(custom_model, use_auth_token=True)
#pipe.to("cuda")
pipe.enable_model_cpu_offload()

def infer(image_in, prompt):
    prompt = prompt
    negative_prompt = ""

    image = load_image(image_in)

    controlnet_conditioning_scale = 0.5  # recommended for good generalization

    image = np.array(image)
    image = cv2.Canny(image, 100, 200)
    image = image[:, :, None]
    image = np.concatenate([image, image, image], axis=2)
    image = Image.fromarray(image)

    images = pipe(
        prompt, negative_prompt=negative_prompt, image=image, controlnet_conditioning_scale=controlnet_conditioning_scale,
        ).images

    images[0].save(f"hug_lab.png")

    return f"hug_lab.png"

with gr.Blocks() as demo:
    with gr.Column():
        image_in = gr.Image(source="upload", type="filepath")
        prompt = gr.Textbox(label="Prompt")
        submit_btn = gr.Button("Submit")
        result = gr.Image(label="Result")

    submit_btn.click(
        fn = infer,
        inputs = [image_in, prompt],
        outputs = [result]
    )

demo.queue().launch()