codeShare commited on
Commit
7c663ac
β€’
1 Parent(s): 3ead44b

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
@@ -387,11 +387,6 @@
387
  "cell_type": "code",
388
  "source": [
389
  "# @title \tβš„ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
390
- "\n",
391
- "#image_index = 0 # @param {type:'number'}\n",
392
- "# @markdown πŸ“₯ Load the data (only required one time)\n",
393
- "load_the_data = True # @param {type:\"boolean\"}\n",
394
- "\n",
395
  "# @markdown πŸ–ΌοΈ Choose a pre-encoded reference\n",
396
  "index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
397
  "\n",
@@ -407,9 +402,47 @@
407
  "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
408
  "\n",
409
  "# @markdown Calculate most similiar items using above settings?\n",
410
- "enable = False # @param {type:\"boolean\"}\n",
411
  "\n",
412
- "if (load_the_data):\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413
  " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
414
  " from transformers import AutoTokenizer\n",
415
  " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
@@ -459,27 +492,6 @@
459
  " # Sort the items\n",
460
  " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
461
  "\n",
462
- " # @title βš™οΈπŸ“ Print the results (Advanced)\n",
463
- " list_size = 1000 # param {type:'number'}\n",
464
- " start_at_index = 0 # param {type:'number'}\n",
465
- " print_Similarity = True # param {type:\"boolean\"}\n",
466
- " print_Prompts = True # param {type:\"boolean\"}\n",
467
- " print_Prefix = True # param {type:\"boolean\"}\n",
468
- " print_Descriptions = True # param {type:\"boolean\"}\n",
469
- " compact_Output = True # param {type:\"boolean\"}\n",
470
- "\n",
471
- " # @markdown -----------\n",
472
- " # @markdown βš™οΈπŸ“ Printing options\n",
473
- " newline_Separator = False # @param {type:\"boolean\"}\n",
474
- "\n",
475
- " import random\n",
476
- " list_size2 = 1000 # param {type:'number'}\n",
477
- " start_at_index2 = 10000 # param {type:'number'}\n",
478
- " rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
479
- "\n",
480
- " # @markdown Repeat output N times\n",
481
- " N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
482
- "\n",
483
  " # title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
484
  " RANGE = list_size\n",
485
  " separator = '|'\n",
 
387
  "cell_type": "code",
388
  "source": [
389
  "# @title \tβš„ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
 
 
 
 
 
390
  "# @markdown πŸ–ΌοΈ Choose a pre-encoded reference\n",
391
  "index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
392
  "\n",
 
402
  "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
403
  "\n",
404
  "# @markdown Calculate most similiar items using above settings?\n",
 
405
  "\n",
406
+ "\n",
407
+ "# @title βš™οΈπŸ“ Print the results (Advanced)\n",
408
+ "list_size = 1000 # param {type:'number'}\n",
409
+ "start_at_index = 0 # param {type:'number'}\n",
410
+ "print_Similarity = True # param {type:\"boolean\"}\n",
411
+ "print_Prompts = True # param {type:\"boolean\"}\n",
412
+ "print_Prefix = True # param {type:\"boolean\"}\n",
413
+ "print_Descriptions = True # param {type:\"boolean\"}\n",
414
+ "compact_Output = True # param {type:\"boolean\"}\n",
415
+ "\n",
416
+ "# @markdown -----------\n",
417
+ "# @markdown πŸ“ Printing options\n",
418
+ "newline_Separator = False # @param {type:\"boolean\"}\n",
419
+ "\n",
420
+ "import random\n",
421
+ "list_size2 = 1000 # param {type:'number'}\n",
422
+ "start_at_index2 = 10000 # param {type:'number'}\n",
423
+ "rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
424
+ "\n",
425
+ "# @markdown Repeat output N times\n",
426
+ "N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
427
+ "\n",
428
+ "#image_index = 0 # @param {type:'number'}\n",
429
+ "# @markdown πŸ“₯ Reload vocab (required if you change the vocab)\n",
430
+ "reload_vocab = False # @param {type:\"boolean\"}\n",
431
+ "_load_the_data = reload_vocab\n",
432
+ "\n",
433
+ "#image_index = 0 # @param {type:'number'}\n",
434
+ "# @markdown βš™οΈ Do dot product calculation (disable if you only want to browse images)\n",
435
+ "run_script = True # @param {type:\"boolean\"}\n",
436
+ "enable = run_script\n",
437
+ "\n",
438
+ "\n",
439
+ "# Load the data if not already loaded\n",
440
+ "try:\n",
441
+ " loaded2\n",
442
+ "except:\n",
443
+ " _load_the_data = True\n",
444
+ "\n",
445
+ "if (_load_the_data):\n",
446
  " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
447
  " from transformers import AutoTokenizer\n",
448
  " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
 
492
  " # Sort the items\n",
493
  " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
494
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495
  " # title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
496
  " RANGE = list_size\n",
497
  " separator = '|'\n",