KingNish commited on
Commit
8a468da
1 Parent(s): da8cc79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -52,7 +52,7 @@ pipe_edit.to("cuda")
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
  system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, you have to optiomize prompt and also add some keywords like: cute, masterpiece, 4k, realistic, featuristic, or styles according to prompt, or anything good which help in generating better image like use want \n Your task is to reply with final optimized prompt only. If you get big prompt make it concise.[USER]"
55
- formatted_prompt = f"{system_instructions1} {prompt} [PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
58
 
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
  system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, you have to optiomize prompt and also add some keywords like: cute, masterpiece, 4k, realistic, featuristic, or styles according to prompt, or anything good which help in generating better image like use want \n Your task is to reply with final optimized prompt only. If you get big prompt make it concise.[USER]"
55
+ formatted_prompt = f"{system_instructions1} {prompt} [OPTIMIZED_PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
58