codeShare commited on
Commit
16237fc
1 Parent(s): b6b88ea

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
@@ -25,20 +25,11 @@
25
  "id": "cRV2YWomjMBU"
26
  }
27
  },
28
- {
29
- "cell_type": "markdown",
30
- "source": [
31
- "THIS IS AN OLD VERSION OF THE CLIP INTERROGATOR.\n",
32
- "\n",
33
- "YOU WILL FIND THE UP TO DATE VERSION HERE:https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data/tree/main/Google%20Colab%20Jupyter%20Notebooks"
34
- ],
35
- "metadata": {
36
- "id": "9slWHq0JIX6D"
37
- }
38
- },
39
  {
40
  "cell_type": "code",
41
  "source": [
 
 
42
  "import os\n",
43
  "home_directory = '/content/'\n",
44
  "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
@@ -69,18 +60,17 @@
69
  " %cd {home_directory}\n",
70
  " !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n",
71
  " loaded = True\n",
72
- " %cd {home_directory + 'fusion-t2i-generator-data/'}\n",
73
- " !unzip vocab.zip\n",
74
- " !unzip reference.zip\n",
 
 
 
 
 
 
 
75
  "#------#\n",
76
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
77
- "with open(f'prompts.json', 'r') as f:\n",
78
- " data = json.load(f)\n",
79
- " _df = pd.DataFrame({'count': data})['count']\n",
80
- " prompts = {\n",
81
- " key : value for key, value in _df.items()\n",
82
- " }\n",
83
- "#-------#\n",
84
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
85
  "with open(f'reference_prompts.json', 'r') as f:\n",
86
  " data = json.load(f)\n",
@@ -95,28 +85,11 @@
95
  " target_urls = {\n",
96
  " key : value for key, value in _df.items()\n",
97
  " }\n",
98
- "from transformers import AutoTokenizer\n",
99
- "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
100
- "from transformers import CLIPProcessor, CLIPModel\n",
101
- "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
102
- "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
103
- "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
104
- "\n",
105
- "index = 0\n",
106
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
107
- "vocab_encodings = torch.load('vocab_encodings.pt', weights_only=False)\n",
108
- "for key in vocab_encodings:\n",
109
- " index = index + 1;\n",
110
- "#------#\n",
111
- "NUM_VOCAB_ITEMS = index\n",
112
  "\n",
113
- "index = 0\n",
114
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
115
- "for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
116
- " index = index + 1;\n",
117
  "#------#\n",
118
- "NUM_REFERENCE_ITEMS = index\n",
119
- "\n"
 
120
  ],
121
  "metadata": {
122
  "id": "TC5lMJrS1HCC"
@@ -124,74 +97,151 @@
124
  "execution_count": null,
125
  "outputs": []
126
  },
 
 
 
 
 
 
 
 
 
127
  {
128
  "cell_type": "code",
129
  "source": [
130
- "# @title \t⚄ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
131
- "# @markdown Choose a pre-encoded reference\n",
132
- "index = 213 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
133
  "PROMPT_INDEX = index\n",
134
  "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
135
  "url = target_urls[f'{PROMPT_INDEX}']\n",
136
  "if url.find('perchance')>-1:\n",
137
  " image = Image.open(requests.get(url, stream=True).raw)\n",
138
  "#------#\n",
 
 
 
139
  "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
140
  "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
141
- "log_strength_1 = 2.17 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
142
- "prompt_strength = torch.tensor(math.pow(10 ,log_strength_1-1)).to(dtype = torch.float32)\n",
143
- "reference = torch.zeros(768).to(dtype = torch.float32)\n",
144
- "\n",
145
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
146
  "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
147
- "reference = torch.add(reference, prompt_strength * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
148
- "reference = torch.add(reference, prompt_strength * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
149
  "references = '' # Clear up memory\n",
150
- "# @markdown -----------\n",
151
- "# @markdown 📝➕ 1st Enhance similarity to prompt(s)\n",
152
- "POS_2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
153
- "log_strength_2 = 1.03 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
154
- "pos_strength = torch.tensor(math.pow(10 ,log_strength_2-1)).to(dtype = torch.float32)\n",
155
- "for _POS in POS_2.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
156
- " inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
157
- " text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
158
- " text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
159
- " reference = torch.add(reference, pos_strength * text_features_POS)\n",
160
- "# @markdown -----------\n",
161
- "\n",
162
- "# @markdown -----------\n",
163
- "# @markdown 📝➕ 2nd Enhance similarity to prompt(s)\n",
164
- "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
165
- "log_strength_3 = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
166
- "pos_strength = torch.tensor(math.pow(10 ,log_strength_3-1)).to(dtype = torch.float32)\n",
167
- "for _POS in POS.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
168
- " inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
169
- " text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
170
- " text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
171
- " reference = torch.add(reference, pos_strength * text_features_POS)\n",
172
- "# @markdown -----------\n",
173
- "\n",
174
- "# @markdown 🚫 Penalize similarity to prompt(s)\n",
175
- "NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
176
- "log_strength_4 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
177
- "neg_strength = torch.tensor(math.pow(10 ,log_strength_4-1)).to(dtype = torch.float32)\n",
178
- "for _NEG in NEG.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
179
- " inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
180
- " text_features_NEG = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
181
- " text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
182
- " reference = torch.sub(reference, neg_strength * text_features_NEG)\n",
183
- "# @markdown -----------\n",
184
- "# @markdown ⏩ Skip item(s) containing the word(s)\n",
185
- "SKIP = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
 
 
 
 
 
186
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  "min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n",
188
- "\n",
189
- "def isBlacklisted(_txt, _blacklist):\n",
190
- " blacklist = _blacklist.lower().replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n",
191
  " txt = _txt.lower().strip()\n",
192
  " if len(txt)<min_wordcount: return True\n",
193
  " if txt.isnumeric(): return True\n",
194
- " if blacklist == '': return False\n",
195
  " for item in list(blacklist.split(',')):\n",
196
  " if item.strip() == '' : continue\n",
197
  " if txt.find(item.strip())> -1 : return True\n",
@@ -203,76 +253,112 @@
203
  " if found:break\n",
204
  " #------#\n",
205
  " return not found\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  "\n",
207
- "# @markdown -----------\n",
208
- "# @markdown 🔍 How similar should the results be?\n",
209
- "list_size = 1000 # @param {type:'number'}\n",
210
- "start_at_index = 1 # @param {type:'number'}\n",
211
- "# @markdown -----------\n",
212
- "# @markdown Repeat output N times\n",
213
- "N = 7 # @param {type:\"slider\", min:0, max:20, step:1}\n",
214
- "# @markdown -----------\n",
215
- "# @markdown ⚙️ Run the script?\n",
216
- "update_list = True # @param {type:\"boolean\"}\n",
217
- "\n",
218
- "calculate_variance = False # @param {type:\"boolean\"}\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  "\n",
220
- "ne = update_list\n",
 
 
 
 
221
  "\n",
222
- "try: first\n",
223
- "except:\n",
224
- " enable = True\n",
225
- " first = True\n",
 
 
 
 
 
226
  "\n",
227
- "if (enable):\n",
228
- " reference = reference/reference.norm(p=2, dim=-1, keepdim=True)\n",
229
- " %cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
230
- " sims = torch.matmul(vocab_encodings.dequantize(),reference.t())\n",
231
- " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
232
  "\n",
233
- " if calculate_variance:\n",
234
- " average = torch.zeros(768)\n",
235
- " for key in range(NUM_VOCAB_ITEMS):\n",
236
- " if (key>=start_at_index and key < start_at_index + list_size):\n",
237
- " average = torch.add(average, vocab_encodings[key].dequantize())\n",
238
- " if (key>=start_at_index + list_size) : break\n",
239
- " average = average * (1/max(1, list_size))\n",
240
- " average = average/average.norm(p=2, dim=-1, keepdim=True)\n",
241
- " average = average.clone().detach();\n",
242
- " variance = torch.zeros(1)\n",
243
- " for key in range(NUM_VOCAB_ITEMS):\n",
244
- " if (key>=start_at_index and key < start_at_index + list_size):\n",
245
- " #dot product\n",
246
- " difference_to_average = 100 * (torch.ones(1) - torch.dot(average[0]\n",
247
- " , vocab_encodings[key].dequantize()[0])/average.norm(p=2, dim=-1, keepdim=True))\n",
248
- " variance = torch.add(variance, difference_to_average * difference_to_average)\n",
249
- " if (key>=start_at_index + list_size) : break\n",
250
- " #--------#\n",
251
- " variance = variance * (1/max(1, list_size))\n",
252
- " variance= variance.clone().detach();\n",
253
- " print(f'The variance for the selected range is {math.sqrt(variance.item())} units from average')\n",
254
- " #--------#\n",
255
- "#---#\n",
256
- "output = '{'\n",
257
- "for _index in range(list_size):\n",
258
- " tmp = prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}']\n",
259
- " if isBlacklisted(tmp , SKIP): continue\n",
260
- " tmp = fix_bad_symbols(tmp)\n",
261
- " if output.find(tmp)>-1:continue\n",
262
- " output = output + tmp + '|'\n",
263
  "#---------#\n",
264
- "output = (output + '}').replace('|}' , '} ')\n",
265
- "print('')\n",
266
- "print('')\n",
267
- "for iter in range(N):\n",
268
- " print(output)\n",
269
- "#-------#\n",
270
- "print('')\n",
271
- "print('')\n",
272
- "image or print('No image found')"
273
  ],
274
  "metadata": {
275
- "id": "NqL_I3ZSrISq"
276
  },
277
  "execution_count": null,
278
  "outputs": []
@@ -280,89 +366,50 @@
280
  {
281
  "cell_type": "code",
282
  "source": [
283
- "# Check the average value for this set\n",
284
- "sims = torch.matmul(vocab_encodings.dequantize(),average.t())\n",
285
- "sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
286
- "for index in range(10):\n",
287
- " print(prompts[f'{indices[index].item()}'])"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
  ],
289
  "metadata": {
290
- "id": "XNHz0hfhHRUu"
291
  },
292
  "execution_count": null,
293
  "outputs": []
294
  },
295
  {
296
- "cell_type": "code",
297
  "source": [
298
- "# @title ⚙️📝 Print the results (Advanced)\n",
299
- "list_size = 1000 # @param {type:'number'}\n",
300
- "start_at_index = 0 # @param {type:'number'}\n",
301
- "print_Similarity = True # @param {type:\"boolean\"}\n",
302
- "print_Prompts = True # @param {type:\"boolean\"}\n",
303
- "print_Descriptions = True # @param {type:\"boolean\"}\n",
304
- "compact_Output = True # @param {type:\"boolean\"}\n",
305
- "newline_Separator = False # @param {type:\"boolean\"}\n",
306
- "\n",
307
- "import random\n",
308
- "# @markdown -----------\n",
309
- "# @markdown Mix with...\n",
310
- "list_size2 = 1000 # @param {type:'number'}\n",
311
- "start_at_index2 = 10000 # @param {type:'number'}\n",
312
- "rate_percent = 0 # @param {type:\"slider\", min:0, max:100, step:1}\n",
313
- "\n",
314
- "# @markdown -----------\n",
315
- "# @markdown Repeat output N times\n",
316
- "N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
317
- "\n",
318
- "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
319
- "RANGE = list_size\n",
320
- "separator = '|'\n",
321
- "if newline_Separator : separator = separator + '\\n'\n",
322
- "\n",
323
- "_prompts = ''\n",
324
- "_sims = ''\n",
325
- "for _index in range(start_at_index + RANGE):\n",
326
- " if _index < start_at_index : continue\n",
327
- " index = indices[_index].item()\n",
328
- "\n",
329
- " prompt = prompts[f'{index}']\n",
330
- " if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
331
- "\n",
332
- " #Remove duplicates\n",
333
- " if _prompts.find(prompt + separator)<=-1:\n",
334
- " _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
335
- " #-------#\n",
336
- " _prompts = _prompts.replace(prompt + separator,'')\n",
337
- " _prompts = _prompts + prompt + separator\n",
338
- " #------#\n",
339
- "#------#\n",
340
- "__prompts = fix_bad_symbols(__prompts)\n",
341
- "__prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
342
- "__sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
343
- "#------#\n",
344
- "\n",
345
- "if(not print_Prompts): __prompts = ''\n",
346
- "if(not print_Similarity): __sims = ''\n",
347
- "\n",
348
- "if(not compact_Output):\n",
349
- " if(print_Descriptions):\n",
350
- " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
351
- " for i in range(N) : print(__prompts)\n",
352
- " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
353
- " print('')\n",
354
- " else:\n",
355
- " for i in range(N) : print(__prompts)\n",
356
- "else:\n",
357
- " for i in range(N) : print(__prompts)\n",
358
- "#-------#"
359
  ],
360
  "metadata": {
361
- "id": "EdBiAguJO9aX",
362
- "cellView": "form"
363
- },
364
- "execution_count": null,
365
- "outputs": []
366
  },
367
  {
368
  "cell_type": "markdown",
@@ -605,59 +652,54 @@
605
  {
606
  "cell_type": "code",
607
  "source": [
608
- "# @title \t⚄ New code (work in progress)\n",
609
- "\n",
610
- "def get_num_vocab_items(_url):\n",
611
- " num_vocab_items = 0\n",
612
- " for item in _url.split('_'):\n",
613
- " if item.find('safetensors')>-1: num_vocab_items = int(item.replace('.safetensors', ''))\n",
614
- " #------#\n",
615
- " return num_vocab_items-1\n",
616
  "\n",
 
 
 
617
  "\n",
618
- "def get_similiar(_ref , urls, _LIST_SIZE):\n",
619
- " dot_dtype = torch.float16\n",
620
- " _SCALE = torch.tensor(0.0043).to(dot_dtype)\n",
621
- " _DIM = 768\n",
622
- " _vocab = {}\n",
623
- " #----#\n",
624
- " inputs = tokenizer(text = _ref.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
625
- " ref = model.get_text_features(**inputs)[0]\n",
626
- " ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n",
627
- " #-----#\n",
628
- " num_vocab_items = 0\n",
629
- " for url in urls:\n",
630
- " num_vocab_items = num_vocab_items + get_num_vocab_items(url)\n",
631
- " #------#\n",
632
- " vocab = torch.zeros(num_vocab_items , _DIM).to(torch.uint8)\n",
633
  " prompts = {}\n",
634
- " index = 0\n",
635
- " for url in urls:\n",
636
- " __vocab = load_file(url)\n",
637
- " for key in load_file(url):\n",
638
- " vocab[index] = __vocab[key][1:_DIM+1] - __vocab[key][0]*torch.ones(_DIM).t()\n",
639
- " prompts[f'{index}'] = key\n",
640
- " index = index + 1\n",
641
- " #-------#\n",
642
- " __vocab = {}\n",
643
- " #-------#\n",
644
- " sims = torch.matmul((vocab*_SCALE).to(dot_dtype) , ref.t())\n",
645
- " sorted , indices = torch.sort(sims, dim = 0 , descending = True)\n",
646
- " return indices , prompts , sims\n",
647
- " _prompts = {}\n",
648
- " for index in range(num_vocab_items):\n",
649
- " key = prompts[f'{indices[index]}']\n",
650
- " _prompts[f'{key}'] = sims[key].item()\n",
651
- " index = index + 1\n",
652
- " if index>_LIST_SIZE:break\n",
 
 
 
 
 
 
 
653
  " #-------#\n",
654
- " return _prompts\n",
655
- "#-------#\n",
656
- "\n"
 
 
 
 
 
657
  ],
658
  "metadata": {
659
  "cellView": "form",
660
- "id": "uDzsk02CbMFc"
661
  },
662
  "execution_count": null,
663
  "outputs": []
@@ -665,31 +707,14 @@
665
  {
666
  "cell_type": "code",
667
  "source": [
668
- "vocab = {}\n",
669
- "# @title \t⚄ New code (work in progress)\n",
670
- "ref = 'impressionist painting by luis royo' # @param {type:'string' , placeholder:'type a single prompt to match'}\n",
671
- "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
672
- "urls = [ '/content/fusion-t2i-generator-data/civitai_vocab_q0043_203663.safetensors' ,]\n",
673
- "\n",
674
- " #'/content/fusion-t2i-generator-data/clip_vocab_q0043_541291.safetensors' , '/content/fusion-t2i-generator-data/lyrics_vocab_q0043_41905.safetensors' , '/content/fusion-t2i-generator-data/names_vocab_q0043_162977.safetensors' , '/content/fusion-t2i-generator-data/r34_vocab_q0043_96166.safetensors' ]\n",
675
- "\n",
676
- "indices , prompts , sims = get_similiar(ref , urls , LIST_SIZE)\n",
677
- "\n",
678
- "index = 0\n",
679
- "_prompts = {}\n",
680
- "for index in range(203662):\n",
681
- " try:\n",
682
- " key = prompts[f'{indices[index].item()}']\n",
683
- " print(key)\n",
684
- " except: print('Not found!')\n",
685
- " #_prompts[f'{key}'] = sims[key].item()\n",
686
- " index = index + 1\n",
687
- " if index>LIST_SIZE:break\n",
688
- "\n"
689
  ],
690
  "metadata": {
691
- "cellView": "form",
692
- "id": "Azz1kCza6LB3"
693
  },
694
  "execution_count": null,
695
  "outputs": []
 
25
  "id": "cRV2YWomjMBU"
26
  }
27
  },
 
 
 
 
 
 
 
 
 
 
 
28
  {
29
  "cell_type": "code",
30
  "source": [
31
+ "# @title ⚄ Initialize\n",
32
+ "\n",
33
  "import os\n",
34
  "home_directory = '/content/'\n",
35
  "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
 
60
  " %cd {home_directory}\n",
61
  " !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n",
62
  " loaded = True\n",
63
+ "\n",
64
+ "from transformers import AutoTokenizer\n",
65
+ "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
66
+ "from transformers import CLIPProcessor, CLIPModel\n",
67
+ "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
68
+ "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
69
+ "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
70
+ "\n",
71
+ "%cd {home_directory + 'fusion-t2i-generator-data/'}\n",
72
+ "!unzip reference.zip\n",
73
  "#------#\n",
 
 
 
 
 
 
 
 
74
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
75
  "with open(f'reference_prompts.json', 'r') as f:\n",
76
  " data = json.load(f)\n",
 
85
  " target_urls = {\n",
86
  " key : value for key, value in _df.items()\n",
87
  " }\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  "\n",
 
 
 
 
89
  "#------#\n",
90
+ "dot_dtype = torch.float32\n",
91
+ "dim = 768\n",
92
+ "reference = torch.zeros(dim).to(dtype = dot_dtype)"
93
  ],
94
  "metadata": {
95
  "id": "TC5lMJrS1HCC"
 
97
  "execution_count": null,
98
  "outputs": []
99
  },
100
+ {
101
+ "cell_type": "markdown",
102
+ "source": [
103
+ "Feel free to skip these cells if you do not plan on using them\n"
104
+ ],
105
+ "metadata": {
106
+ "id": "Xf9zoq-Za3wi"
107
+ }
108
+ },
109
  {
110
  "cell_type": "code",
111
  "source": [
112
+ "# @markdown 🖼️+📝 Choose a pre-encoded reference (optional)\n",
113
+ "index = 657 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
 
114
  "PROMPT_INDEX = index\n",
115
  "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
116
  "url = target_urls[f'{PROMPT_INDEX}']\n",
117
  "if url.find('perchance')>-1:\n",
118
  " image = Image.open(requests.get(url, stream=True).raw)\n",
119
  "#------#\n",
120
+ "try: reference\n",
121
+ "except: reference = torch.zeros(dim).to(dtype = dot_dtype)\n",
122
+ "if reference == '': reference = torch.zeros(dim).to(dtype = dot_dtype)\n",
123
  "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
124
  "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
125
+ "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
 
 
 
126
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
127
  "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
128
+ "reference = torch.add(reference, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
129
+ "reference = torch.add(reference, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
130
  "references = '' # Clear up memory\n",
131
+ "ref = reference.clone().detach()\n",
132
+ "#------#\n",
133
+ "print(f'Prompt for this image : \\n\\n \"{prompt} \" \\n\\n')\n",
134
+ "image"
135
+ ],
136
+ "metadata": {
137
+ "id": "BwrEs5zVB0Sb"
138
+ },
139
+ "execution_count": null,
140
+ "outputs": []
141
+ },
142
+ {
143
+ "cell_type": "code",
144
+ "source": [
145
+ "# @markdown 🖼️ Upload your own image for use as reference via URL (optional)\n",
146
+ "URL = '' # @param {type:'string' ,placeholder:'paste an url here'}\n",
147
+ "image = Image.open(requests.get(URL, stream=True).raw)\n",
148
+ "#---------#\n",
149
+ "# Get image features\n",
150
+ "inputs = processor(images=image, return_tensors=\"pt\")\n",
151
+ "image_features = model.get_image_features(**inputs)\n",
152
+ "image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
153
+ "#-----#\n",
154
+ "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
155
+ "ref = ref + math.pow(10,log_strength-1)*image_features\n",
156
+ "image"
157
+ ],
158
+ "metadata": {
159
+ "id": "IqUsiQw2HU2C"
160
+ },
161
+ "execution_count": null,
162
+ "outputs": []
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "source": [
167
+ "# @markdown 🖼️ Upload your own image in the /content/ folder for use as reference (optional)\n",
168
+ "FILENAME = '' # @param {type:'string' ,placeholder:'IMG_123.png'}\n",
169
+ "import cv2\n",
170
+ "image = cv2.imread(FILENAME)\n",
171
+ "image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
172
  "\n",
173
+ "#---------#\n",
174
+ "# Get image features\n",
175
+ "inputs = processor(images=image, return_tensors=\"pt\")\n",
176
+ "image_features = model.get_image_features(**inputs)\n",
177
+ "image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
178
+ "#-----#\n",
179
+ "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
180
+ "ref = ref + math.pow(10,log_strength-1)*image_features\n",
181
+ "image"
182
+ ],
183
+ "metadata": {
184
+ "id": "I_-GOwFPKkha"
185
+ },
186
+ "execution_count": null,
187
+ "outputs": []
188
+ },
189
+ {
190
+ "cell_type": "markdown",
191
+ "source": [
192
+ "Save the reference prior to running the Interrogator"
193
+ ],
194
+ "metadata": {
195
+ "id": "zeu6JcM-mk9z"
196
+ }
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "source": [
201
+ "# @title ⚄ Save the reference\n",
202
+ "try: ref\n",
203
+ "except: ref = torch.zeros(dim)\n",
204
+ "_ref = {}\n",
205
+ "_ref['weights'] = ref.to(dot_dtype)\n",
206
+ "%cd /content/\n",
207
+ "save_file(_ref , 'reference.safetensors' )"
208
+ ],
209
+ "metadata": {
210
+ "id": "lOQuTPfBMK82"
211
+ },
212
+ "execution_count": null,
213
+ "outputs": []
214
+ },
215
+ {
216
+ "cell_type": "code",
217
+ "source": [
218
+ "# @title ⚄ Run the CLIP interrogator on the saved reference\n",
219
+ "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
220
+ "START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
221
+ "# @markdown -----\n",
222
+ "# @markdown Select vocab\n",
223
+ "general = False # @param {type:\"boolean\"}\n",
224
+ "civit9 = True # @param {type:\"boolean\"}\n",
225
+ "fanfic1 = False # @param {type:\"boolean\"}\n",
226
+ "fanfic2 = False # @param {type:\"boolean\"}\n",
227
+ "# @markdown -----\n",
228
+ "# @title ⚄ New interrogator code using quantized text corpus\n",
229
+ "%cd /content/\n",
230
+ "_ref = load_file('reference.safetensors' )\n",
231
+ "ref = _ref['weights'].to(dot_dtype)\n",
232
+ "# @markdown 📝 Enhance/Penalize Similarity and skip items containing word(s)\n",
233
+ "POS1 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
234
+ "POS2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
235
+ "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
236
+ "SKIP = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
237
  "min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n",
238
+ "def isBlacklisted(_txt):\n",
239
+ " blacklist = SKIP.lower().replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n",
240
+ " if blacklist == '': return False\n",
241
  " txt = _txt.lower().strip()\n",
242
  " if len(txt)<min_wordcount: return True\n",
243
  " if txt.isnumeric(): return True\n",
244
+ " #-----#\n",
245
  " for item in list(blacklist.split(',')):\n",
246
  " if item.strip() == '' : continue\n",
247
  " if txt.find(item.strip())> -1 : return True\n",
 
253
  " if found:break\n",
254
  " #------#\n",
255
  " return not found\n",
256
+ "# @markdown -----\n",
257
+ "# @markdown logarithmic prompt strength x for value 10^(x-1)\n",
258
+ "_POS1 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
259
+ "_POS2 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
260
+ "_NEG = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
261
+ "# @markdown -----\n",
262
+ "for _item in POS1.split(','):\n",
263
+ " item = _item.strip()\n",
264
+ " if item == '':continue\n",
265
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
266
+ " ref = ref + math.pow(10,_POS1-1) * model.get_text_features(**inputs)[0]\n",
267
+ "#-------#\n",
268
+ "for _item in POS2.split(','):\n",
269
+ " item = _item.strip()\n",
270
+ " if item == '':continue\n",
271
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
272
+ " ref = ref + math.pow(10,_POS2-1) * model.get_text_features(**inputs)[0]\n",
273
+ "#-------#\n",
274
+ "for _item in NEG.split(','):\n",
275
+ " item = _item.strip()\n",
276
+ " if item == '':continue\n",
277
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
278
+ " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n",
279
+ "#------#\n",
280
+ "ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n",
281
+ "vocab_to_load = ''\n",
282
+ "if (general): vocab_to_load = vocab_to_load + 'general , '\n",
283
+ "if (civit9): vocab_to_load = vocab_to_load + 'civit9 , '\n",
284
+ "if (fanfic1): vocab_to_load = vocab_to_load + 'fanfic1 , '\n",
285
+ "if (fanfic2): vocab_to_load = vocab_to_load + 'fanfic2 , '\n",
286
+ "vocab_to_load = (vocab_to_load +'}').replace(' , }' , '')\n",
287
+ "multi = vocab_to_load.find(',')>-1\n",
288
  "\n",
289
+ "#-----#\n",
290
+ "prompts_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text'\n",
291
+ "encodings_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text_encodings'\n",
292
+ "#----#\n",
293
+ "scale = 0.0043\n",
294
+ "size = 0\n",
295
+ "#------#\n",
296
+ "total_items = 0\n",
297
+ "for filename in os.listdir(prompts_folder):\n",
298
+ " if (not general and filename.find('general')>-1):continue\n",
299
+ " if (not civit9 and filename.find('civit9')>-1):continue\n",
300
+ " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n",
301
+ " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n",
302
+ " size = size + LIST_SIZE\n",
303
+ "#-------#\n",
304
+ "similiar_sims = torch.zeros(size)\n",
305
+ "similiar_prompts = {}\n",
306
+ "_index = 0\n",
307
+ "#-------#\n",
308
+ "similiar_encodings = {}\n",
309
+ "for filename in os.listdir(prompts_folder):\n",
310
+ " if (not general and filename.find('general')>-1):continue\n",
311
+ " if (not civit9 and filename.find('civit9')>-1):continue\n",
312
+ " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n",
313
+ " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n",
314
+ " #------#\n",
315
+ " root_filename = filename.replace('.json', '')\n",
316
+ " %cd {prompts_folder}\n",
317
+ " prompts = {}\n",
318
+ " with open(f'{root_filename}.json', 'r') as f:\n",
319
+ " data = json.load(f).items()\n",
320
+ " for key,value in data:\n",
321
+ " prompts[key] = value\n",
322
+ " num_items = int(prompts['num_items'])\n",
323
+ " total_items = total_items + num_items\n",
324
  "\n",
325
+ " #------#\n",
326
+ " try:vocab_loaded\n",
327
+ " except:\n",
328
+ " vocab_loaded = 'first'\n",
329
+ " #-----#\n",
330
  "\n",
331
+ " if vocab_loaded == 'first' or (vocab_loaded != vocab_to_load and not multi):\n",
332
+ " %cd {encodings_folder}\n",
333
+ " _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n",
334
+ " text_encodings = torch.zeros(num_items , dim)\n",
335
+ " tmp = torch.ones(dim).to(dot_dtype)\n",
336
+ " for index in range(num_items):\n",
337
+ " text_encodings[index] = torch.sub(_text_encodings[index][1:dim+1].to(dot_dtype) , tmp , alpha= _text_encodings[index][0].to(dot_dtype))\n",
338
+ " vocab_loaded = vocab_to_load\n",
339
+ " #------#\n",
340
  "\n",
 
 
 
 
 
341
  "\n",
342
+ " sims = torch.matmul(text_encodings*scale, ref.t())\n",
343
+ " sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
344
+ " #-----#\n",
345
+ " for index in range(LIST_SIZE + START_AT):\n",
346
+ " if index<START_AT: continue\n",
347
+ " key = indices[index].item()\n",
348
+ " try:prompt = prompts[f'{key}']\n",
349
+ " except:continue\n",
350
+ " if(isBlacklisted(prompt)):continue\n",
351
+ " #-------#\n",
352
+ " similiar_sims[_index] = torch.tensor(round(sims[key].item(), 5))\n",
353
+ " similiar_prompts[f'{_index}'] = prompt\n",
354
+ " _index = _index + 1\n",
355
+ " #-------#\n",
356
+ " continue\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  "#---------#\n",
358
+ "print(f'\\n\\nProcessed entire list of {total_items} items to find closest match. Saved closest matching indices {START_AT} to {START_AT + LIST_SIZE} as the dict \"similiar_prompts\" with {LIST SIZE} items. \\n\\n')\n"
 
 
 
 
 
 
 
 
359
  ],
360
  "metadata": {
361
+ "id": "kOYZ8Ajn-DD8"
362
  },
363
  "execution_count": null,
364
  "outputs": []
 
366
  {
367
  "cell_type": "code",
368
  "source": [
369
+ "\n",
370
+ "# @title ⚄ Printing results from text corpus\n",
371
+ "sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
372
+ "include_similiarity = False # @param {type:\"boolean\"}\n",
373
+ "print_as_list = False # @param {type:\"boolean\"}\n",
374
+ "N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
375
+ "\n",
376
+ "if(print_as_list):\n",
377
+ " for index in range(LIST_SIZE):\n",
378
+ " key = indices[index].item()\n",
379
+ " sim = similiar_sims[key].item()\n",
380
+ " prompt = similiar_prompts[f'{key}']\n",
381
+ " if include_similiarity :print(f'{prompt} - {round(sim*100,1)} %')\n",
382
+ " else: print(f'{prompt}')\n",
383
+ "#-------#\n",
384
+ "else:\n",
385
+ " prompt = ''\n",
386
+ " for iter in range(N):\n",
387
+ " prompt = prompt + '{'\n",
388
+ " for index in range(LIST_SIZE):\n",
389
+ " key = indices[index].item()\n",
390
+ " sim = similiar_sims[key].item()\n",
391
+ " prompt = prompt + fix_bad_symbols(similiar_prompts[f'{key}']) + '|'\n",
392
+ " #-----#\n",
393
+ " prompt = (prompt + '}').replace('|}', '} ')\n",
394
+ " #------#\n",
395
+ " print(f'\\ Similiar prompts: \\n\\n {prompt} \\n\\n')\n",
396
+ " image\n",
397
+ "#-----#\n"
398
  ],
399
  "metadata": {
400
+ "id": "XOMkIKc9-wZz"
401
  },
402
  "execution_count": null,
403
  "outputs": []
404
  },
405
  {
406
+ "cell_type": "markdown",
407
  "source": [
408
+ "OTHER STUFF BELOW - Code for the modules below are work-in-progress."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409
  ],
410
  "metadata": {
411
+ "id": "FRIqYJDEebpf"
412
+ }
 
 
 
413
  },
414
  {
415
  "cell_type": "markdown",
 
652
  {
653
  "cell_type": "code",
654
  "source": [
655
+ "# @title \t⚄ Quick fix for normalizing encoded text corpus tensors\n",
 
 
 
 
 
 
 
656
  "\n",
657
+ "import os\n",
658
+ "my_mkdirs('/content/output')\n",
659
+ "my_mkdirs('/content/output/text_encodings')\n",
660
  "\n",
661
+ "for filename in os.listdir(f'{prompts_folder}'):\n",
662
+ " %cd {prompts_folder}\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
663
  " prompts = {}\n",
664
+ " with open(f'{filename}', 'r') as f:\n",
665
+ " data = json.load(f).items()\n",
666
+ " for key,value in data:\n",
667
+ " prompts[key] = value\n",
668
+ " #------#\n",
669
+ " num_items = int(prompts['num_items'])\n",
670
+ "\n",
671
+ " %cd {encodings_folder}\n",
672
+ " enc_filename = filename.replace('json', 'safetensors')\n",
673
+ " _text_encodings = load_file(f'{enc_filename}')['weights'].to(torch.uint8)\n",
674
+ " text_encodings = torch.zeros(num_items , dim)\n",
675
+ " tmp = torch.ones(dim)\n",
676
+ " tmp2 = torch.tensor(1/0.0043)\n",
677
+ " zero_point = 0\n",
678
+ " for index in range(num_items):\n",
679
+ " text_encodings[index] = torch.tensor(0.0043) * torch.sub(_text_encodings[index][1:dim+1] , tmp , alpha= _text_encodings[index][0]).to(torch.float32)\n",
680
+ " text_encodings[index] = tmp2*text_encodings[index]/text_encodings[index].norm(p=2, dim=-1, keepdim = True)\n",
681
+ " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
682
+ " less_than_zero = test<0\n",
683
+ " while(torch.any(less_than_zero).item()):\n",
684
+ " zero_point = zero_point + 1\n",
685
+ " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
686
+ " less_than_zero = test<0\n",
687
+ " #------#\n",
688
+ " _text_encodings[index][0] = zero_point\n",
689
+ " _text_encodings[index][1:dim+1] = test\n",
690
  " #-------#\n",
691
+ " %cd /content/output/text_encodings\n",
692
+ "\n",
693
+ " tmp = {}\n",
694
+ " tmp['weights'] = _text_encodings.to(torch.uint8)\n",
695
+ " tmp['num_items'] = torch.tensor(num_items).to(torch.uint8)\n",
696
+ " tmp['scale'] = torch.tensor(0.0043)\n",
697
+ " save_file(tmp , f'{enc_filename}')\n",
698
+ "#------#"
699
  ],
700
  "metadata": {
701
  "cellView": "form",
702
+ "id": "9qgHW1Wr7kZn"
703
  },
704
  "execution_count": null,
705
  "outputs": []
 
707
  {
708
  "cell_type": "code",
709
  "source": [
710
+ "# Check the average value for this set\n",
711
+ "sims = torch.matmul(vocab_encodings.dequantize(),average.t())\n",
712
+ "sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
713
+ "for index in range(10):\n",
714
+ " print(prompts[f'{indices[index].item()}'])"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
715
  ],
716
  "metadata": {
717
+ "id": "XNHz0hfhHRUu"
 
718
  },
719
  "execution_count": null,
720
  "outputs": []