KingNish commited on
Commit
c0b809f
1 Parent(s): 928f3b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -18
app.py CHANGED
@@ -10,15 +10,13 @@ from diffusers import StableDiffusionXLImg2ImgPipeline, StableDiffusionXLPipelin
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
13
- pipe = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, vae=vae)
14
- pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
15
- pipe.set_adapters("lora")
16
- pipe.to("cuda")
17
 
18
  refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
19
  refiner.to("cuda")
20
 
21
- pipe_fast = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0_Lightning", torch_dtype=torch.float16)
 
 
22
  pipe_fast.to("cuda")
23
 
24
  help_text = """
@@ -100,33 +98,33 @@ def king(type ,
100
  steps=int(steps/2.5)
101
  guidance_scale2=(guidance_scale/3)
102
 
103
- image = pipe_fast( prompt = instruction,
104
  guidance_scale = guidance_scale2,
105
  num_inference_steps = steps,
106
  width = width, height = height,
107
- use_resolution_binning = True,
108
- generator = generator, output_type="latent",
109
- ).images
110
  else:
111
  if enhance_prompt:
112
  print(f"BEFORE: {instruction} ")
113
  instruction = promptifier(instruction)
114
  print(f"AFTER: {instruction} ")
115
- image = pipe( prompt = instruction,
 
 
116
  negative_prompt=negative_prompt,
117
- guidance_scale = guidance_scale,
118
  num_inference_steps = steps,
119
  width = width, height = height,
120
- use_resolution_binning = True,
121
  generator = generator, output_type="latent",
122
  ).images
123
 
124
- refine = refiner( prompt=instruction,
125
- negative_prompt = negative_prompt,
126
- guidance_scale = guidance_scale,
127
- num_inference_steps= steps,
128
- image=image, generator=generator,
129
- ).images[0]
130
  return seed, refine
131
 
132
  client = InferenceClient()
 
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
 
 
 
 
13
 
14
  refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
15
  refiner.to("cuda")
16
 
17
+ pipe_fast = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0_Lightning", torch_dtype=torch.float16, vae=vae)
18
+ pipe_fast.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
19
+ pipe_fast.set_adapters("lora")
20
  pipe_fast.to("cuda")
21
 
22
  help_text = """
 
98
  steps=int(steps/2.5)
99
  guidance_scale2=(guidance_scale/3)
100
 
101
+ refine = pipe_fast( prompt = instruction,
102
  guidance_scale = guidance_scale2,
103
  num_inference_steps = steps,
104
  width = width, height = height,
105
+ generator = generator,
106
+ ).images[0]
 
107
  else:
108
  if enhance_prompt:
109
  print(f"BEFORE: {instruction} ")
110
  instruction = promptifier(instruction)
111
  print(f"AFTER: {instruction} ")
112
+ guidance_scale2=(guidance_scale/2)
113
+
114
+ image = pipe_fast( prompt = instruction,
115
  negative_prompt=negative_prompt,
116
+ guidance_scale = guidance_scale2,
117
  num_inference_steps = steps,
118
  width = width, height = height,
 
119
  generator = generator, output_type="latent",
120
  ).images
121
 
122
+ refine = refiner( prompt=instruction,
123
+ negative_prompt = negative_prompt,
124
+ guidance_scale = guidance_scale,
125
+ num_inference_steps= steps,
126
+ image=image, generator=generator,
127
+ ).images[0]
128
  return seed, refine
129
 
130
  client = InferenceClient()