codeShare commited on
Commit
a05998f
β€’
1 Parent(s): 27c3570

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
@@ -264,24 +264,24 @@
264
  "source": [
265
  "# @title πŸ“š Select items to sample from\n",
266
  "\n",
267
- "prompt_features = True # @param {\"type\":\"boolean\",\"placeholder\":\"🦜\"}\n",
268
- "civitai_blue_set = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
269
- "civitai_yellow_set = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
270
- "artby_prompts = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
271
  "suffix = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
272
  "prefix = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
273
- "emojis = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
274
  "#------#\n",
275
- "\n",
276
  "first_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
277
  "last_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
278
- "celebs = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ†”πŸ‘¨\"}\n",
279
  "#-------#\n",
280
  "danbooru_tags = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸŽ€\"}\n",
281
- "lyrics = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
282
- "tripple_nouns = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
283
  "#-----#\n",
284
- "female_fullnames = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
285
  "debug = False\n",
286
  "#------#\n",
287
  "prompts = {}\n",
@@ -289,6 +289,9 @@
289
  "nA = 0\n",
290
  "#--------#\n",
291
  "\n",
 
 
 
292
  "\n",
293
  "if tripple_nouns:\n",
294
  " url = '/content/text-to-image-prompts/nouns'\n",
@@ -391,14 +394,14 @@
391
  "source": [
392
  "# @title \tβš„ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
393
  "# @markdown πŸ–ΌοΈ Choose a pre-encoded reference\n",
394
- "index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
395
  "PROMPT_INDEX = index\n",
396
  "\n",
397
  "import math\n",
398
  "# @markdown -----------\n",
399
  "# @markdown πŸ“βž• Enhance similarity to prompt(s)\n",
400
  "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
401
- "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
402
  "pos_strength = math.pow(10 ,log_strength-1)\n",
403
  "# @markdown -----------\n",
404
  "\n",
@@ -408,10 +411,12 @@
408
  "neg_strength = math.pow(10 ,log_strength-1)\n",
409
  "\n",
410
  "# @markdown ⏩ Skip item(s) containing the word\n",
411
- "SKIP = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
412
  "\n",
413
- "# @markdown βš–οΈ sim_ref = C* text_encoding + image_encoding*(1-C) <br>\n",
414
  "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
 
 
415
  "\n",
416
  "blacklist = SKIP\n",
417
  "# @markdown -----------\n",
@@ -449,8 +454,10 @@
449
  "\n",
450
  "\n",
451
  "def isBlacklisted(txt):\n",
 
452
  " if blacklist.strip() == '': return False\n",
453
  " for item in list(blacklist.split(',')):\n",
 
454
  " if txt.find(item.strip())> -1 : return True\n",
455
  " #------#\n",
456
  " return False\n",
@@ -488,7 +495,7 @@
488
  " image_features_A = target_image_encodings[f'{index}']\n",
489
  "\n",
490
  " # text-similarity\n",
491
- " sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
492
  "\n",
493
  " # Calculate negatives\n",
494
  " neg_sims = {}\n",
@@ -518,7 +525,7 @@
518
  "\n",
519
  " # plus image-similarity\n",
520
  " img_sims = torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
521
- " sims = sims + (1-C) * img_sims\n",
522
  "\n",
523
  "\n",
524
  " # plus POS-similarity\n",
@@ -546,13 +553,15 @@
546
  " for _index in range(start_at_index + RANGE):\n",
547
  " if _index < start_at_index : continue\n",
548
  "\n",
549
- " for iters in range(10000):\n",
550
  " found = True\n",
551
- " index = indices[_index + offset].item()\n",
552
  " if isBlacklisted(prompts[f'{index}'].lower()):\n",
553
  " offset = offset + 1\n",
554
  " found = False\n",
555
- " if (_index + offset)>NUM_VOCAB_ITEMS : found = True\n",
 
 
556
  " if found : break\n",
557
  " #-------#\n",
558
  "\n",
 
264
  "source": [
265
  "# @title πŸ“š Select items to sample from\n",
266
  "\n",
267
+ "prompt_features = False # @param {\"type\":\"boolean\",\"placeholder\":\"🦜\"}\n",
268
+ "civitai_blue_set = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
269
+ "civitai_yellow_set = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
270
+ "artby_prompts = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
271
  "suffix = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
272
  "prefix = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
273
+ "emojis = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
274
  "#------#\n",
275
+ "suffix_pairs = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
276
  "first_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
277
  "last_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
278
+ "celebs = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ†”πŸ‘¨\"}\n",
279
  "#-------#\n",
280
  "danbooru_tags = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸŽ€\"}\n",
281
+ "lyrics = False # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
282
+ "tripple_nouns = False # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
283
  "#-----#\n",
284
+ "female_fullnames = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
285
  "debug = False\n",
286
  "#------#\n",
287
  "prompts = {}\n",
 
289
  "nA = 0\n",
290
  "#--------#\n",
291
  "\n",
292
+ "if suffix_pairs:\n",
293
+ " url = '/content/text-to-image-prompts/suffix_pairs'\n",
294
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
295
  "\n",
296
  "if tripple_nouns:\n",
297
  " url = '/content/text-to-image-prompts/nouns'\n",
 
394
  "source": [
395
  "# @title \tβš„ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
396
  "# @markdown πŸ–ΌοΈ Choose a pre-encoded reference\n",
397
+ "index = 617 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
398
  "PROMPT_INDEX = index\n",
399
  "\n",
400
  "import math\n",
401
  "# @markdown -----------\n",
402
  "# @markdown πŸ“βž• Enhance similarity to prompt(s)\n",
403
  "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
404
+ "log_strength = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
405
  "pos_strength = math.pow(10 ,log_strength-1)\n",
406
  "# @markdown -----------\n",
407
  "\n",
 
411
  "neg_strength = math.pow(10 ,log_strength-1)\n",
412
  "\n",
413
  "# @markdown ⏩ Skip item(s) containing the word\n",
414
+ "SKIP = '_ass , ass_' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
415
  "\n",
416
+ "# @markdown βš–οΈ sim_ref =(10^(log_strength-1)) * ( C* text_encoding + image_encoding*(1-C) )<br>\n",
417
  "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
418
+ "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
419
+ "prompt_strength = math.pow(10 ,log_strength-1)\n",
420
  "\n",
421
  "blacklist = SKIP\n",
422
  "# @markdown -----------\n",
 
454
  "\n",
455
  "\n",
456
  "def isBlacklisted(txt):\n",
457
+ " if txt.strip().isnumeric(): return True\n",
458
  " if blacklist.strip() == '': return False\n",
459
  " for item in list(blacklist.split(',')):\n",
460
+ " if item.strip() == '' : continue\n",
461
  " if txt.find(item.strip())> -1 : return True\n",
462
  " #------#\n",
463
  " return False\n",
 
495
  " image_features_A = target_image_encodings[f'{index}']\n",
496
  "\n",
497
  " # text-similarity\n",
498
+ " sims = prompt_strength * C * torch.matmul(text_tensor, text_features_A.t())\n",
499
  "\n",
500
  " # Calculate negatives\n",
501
  " neg_sims = {}\n",
 
525
  "\n",
526
  " # plus image-similarity\n",
527
  " img_sims = torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
528
+ " sims = sims + prompt_strength * (1-C) * img_sims\n",
529
  "\n",
530
  "\n",
531
  " # plus POS-similarity\n",
 
553
  " for _index in range(start_at_index + RANGE):\n",
554
  " if _index < start_at_index : continue\n",
555
  "\n",
556
+ " for iters in range(1000):\n",
557
  " found = True\n",
558
+ " index = indices[min(_index + offset,NUM_VOCAB_ITEMS-1)].item()\n",
559
  " if isBlacklisted(prompts[f'{index}'].lower()):\n",
560
  " offset = offset + 1\n",
561
  " found = False\n",
562
+ " if (_index + offset)>=NUM_VOCAB_ITEMS-2 :\n",
563
+ " found = True\n",
564
+ " offset = NUM_VOCAB_ITEMS - _index -1\n",
565
  " if found : break\n",
566
  " #-------#\n",
567
  "\n",