KingNish commited on
Commit
cad0ecd
1 Parent(s): c0b809f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -51,8 +51,8 @@ pipe_edit.to("cuda")
51
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
- system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, reply with prompt only, Your task is to reply with final prompt in SDXL image generation format only. Just reply with pure prompt.[USER]"
55
- formatted_prompt = f"{system_instructions1} {prompt} [FINAL_PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
58
 
@@ -95,15 +95,19 @@ def king(type ,
95
  seed = random.randint(0, 999999)
96
  generator = torch.Generator().manual_seed(seed)
97
  if fast:
 
 
 
 
98
  steps=int(steps/2.5)
99
- guidance_scale2=(guidance_scale/3)
100
 
101
  refine = pipe_fast( prompt = instruction,
102
  guidance_scale = guidance_scale2,
103
  num_inference_steps = steps,
104
  width = width, height = height,
105
- generator = generator,
106
- ).images[0]
107
  else:
108
  if enhance_prompt:
109
  print(f"BEFORE: {instruction} ")
@@ -119,12 +123,12 @@ def king(type ,
119
  generator = generator, output_type="latent",
120
  ).images
121
 
122
- refine = refiner( prompt=instruction,
123
  negative_prompt = negative_prompt,
124
  guidance_scale = guidance_scale,
125
  num_inference_steps= steps,
126
  image=image, generator=generator,
127
- ).images[0]
128
  return seed, refine
129
 
130
  client = InferenceClient()
 
51
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
+ system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, you have to optiomize prompt and also add some keywords like, 4k, realistic, featuristic according to prompt and also break prompt into sub-lines using comma, Your task is to reply with final optimized prompt only. Just reply with prompt only.[USER]"
55
+ formatted_prompt = f"{system_instructions1} {prompt} [PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
58
 
 
95
  seed = random.randint(0, 999999)
96
  generator = torch.Generator().manual_seed(seed)
97
  if fast:
98
+ if enhance_prompt:
99
+ print(f"BEFORE: {instruction} ")
100
+ instruction = promptifier(instruction)
101
+ print(f"AFTER: {instruction} ")
102
  steps=int(steps/2.5)
103
+ guidance_scale2=(guidance_scale/2)
104
 
105
  refine = pipe_fast( prompt = instruction,
106
  guidance_scale = guidance_scale2,
107
  num_inference_steps = steps,
108
  width = width, height = height,
109
+ generator = generator, output_type="latent",
110
+ ).images
111
  else:
112
  if enhance_prompt:
113
  print(f"BEFORE: {instruction} ")
 
123
  generator = generator, output_type="latent",
124
  ).images
125
 
126
+ refine = refiner( prompt=instruction,
127
  negative_prompt = negative_prompt,
128
  guidance_scale = guidance_scale,
129
  num_inference_steps= steps,
130
  image=image, generator=generator,
131
+ ).images[0]
132
  return seed, refine
133
 
134
  client = InferenceClient()