codeShare commited on
Commit
aa8eb2c
1 Parent(s): 060c964

Upload sd_token_similarity_calculator.ipynb

Browse files
Google Colab Notebooks/sd_token_similarity_calculator.ipynb CHANGED
@@ -365,6 +365,17 @@
365
  "execution_count": null,
366
  "outputs": []
367
  },
 
 
 
 
 
 
 
 
 
 
 
368
  {
369
  "cell_type": "code",
370
  "source": [
@@ -1123,9 +1134,9 @@
1123
  "import os\n",
1124
  "%cd {home_directory}\n",
1125
  "#os.remove(f'{home_directory}results.zip')\n",
1126
- "root_output_folder = home_directory + 'output/'\n",
1127
  "zip_dest = f'{home_directory}results.zip'\n",
1128
- "!zip -r {zip_dest} '/content/text-to-image-prompts/tokens'"
1129
  ],
1130
  "metadata": {
1131
  "id": "V4YCpmWlkPMG"
@@ -1133,6 +1144,57 @@
1133
  "execution_count": null,
1134
  "outputs": []
1135
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136
  {
1137
  "cell_type": "code",
1138
  "source": [
@@ -1190,6 +1252,95 @@
1190
  "execution_count": null,
1191
  "outputs": []
1192
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193
  {
1194
  "cell_type": "markdown",
1195
  "source": [
@@ -1964,7 +2115,8 @@
1964
  "# See this link for additional stuff to do with shelve: https://docs.python.org/3/library/shelve.html"
1965
  ],
1966
  "metadata": {
1967
- "id": "iWeFnT1gAx6A"
 
1968
  },
1969
  "execution_count": null,
1970
  "outputs": []
 
365
  "execution_count": null,
366
  "outputs": []
367
  },
368
+ {
369
+ "cell_type": "code",
370
+ "source": [
371
+ "!pip install datasets"
372
+ ],
373
+ "metadata": {
374
+ "id": "mNZOQYq5h1Sk"
375
+ },
376
+ "execution_count": null,
377
+ "outputs": []
378
+ },
379
  {
380
  "cell_type": "code",
381
  "source": [
 
1134
  "import os\n",
1135
  "%cd {home_directory}\n",
1136
  "#os.remove(f'{home_directory}results.zip')\n",
1137
+ "root_output_folder = home_directory + 'outputs/'\n",
1138
  "zip_dest = f'{home_directory}results.zip'\n",
1139
+ "!zip -r {zip_dest} {root_output_folder}"
1140
  ],
1141
  "metadata": {
1142
  "id": "V4YCpmWlkPMG"
 
1144
  "execution_count": null,
1145
  "outputs": []
1146
  },
1147
+ {
1148
+ "cell_type": "code",
1149
+ "source": [
1150
+ "\n",
1151
+ "# @title Make your own text_encodings .safetensor file for later use (using GPU is recommended to speed things up , but not required)\n",
1152
+ "import json\n",
1153
+ "import pandas as pd\n",
1154
+ "import os\n",
1155
+ "import shelve\n",
1156
+ "import torch\n",
1157
+ "from safetensors.torch import save_file\n",
1158
+ "import json\n",
1159
+ "\n",
1160
+ "# Determine if this notebook is running on Colab or Kaggle\n",
1161
+ "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
1162
+ "home_directory = '/content/'\n",
1163
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
1164
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
1165
+ "%cd {home_directory}\n",
1166
+ "#-------#\n",
1167
+ "\n",
1168
+ "# Load the data if not already loaded\n",
1169
+ "try:\n",
1170
+ " loaded\n",
1171
+ "except:\n",
1172
+ " %cd {home_directory}\n",
1173
+ " !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
1174
+ " loaded = True\n",
1175
+ "#--------#\n",
1176
+ "\n",
1177
+ "# User input\n",
1178
+ "target = home_directory + 'text-to-image-prompts/danbooru/'\n",
1179
+ "root_output_folder = home_directory + 'output/'\n",
1180
+ "output_folder = root_output_folder + 'danbooru/'\n",
1181
+ "root_filename = '🎀 fusion-t2i-danbooru-tags'\n",
1182
+ "NUM_FILES = 1\n",
1183
+ "#--------#\n",
1184
+ "\n",
1185
+ "# Setup environment\n",
1186
+ "def my_mkdirs(folder):\n",
1187
+ " if os.path.exists(folder)==False:\n",
1188
+ " os.makedirs(folder)\n",
1189
+ "#------------#\n",
1190
+ "\n"
1191
+ ],
1192
+ "metadata": {
1193
+ "id": "JCt-xelIkl1a"
1194
+ },
1195
+ "execution_count": null,
1196
+ "outputs": []
1197
+ },
1198
  {
1199
  "cell_type": "code",
1200
  "source": [
 
1252
  "execution_count": null,
1253
  "outputs": []
1254
  },
1255
+ {
1256
+ "cell_type": "code",
1257
+ "source": [
1258
+ "# @title Download nouns - import data\n",
1259
+ "import os\n",
1260
+ "import json\n",
1261
+ "\n",
1262
+ "# Setup environment\n",
1263
+ "def my_mkdirs(folder):\n",
1264
+ " if os.path.exists(folder)==False:\n",
1265
+ " os.makedirs(folder)\n",
1266
+ "#--------#\n",
1267
+ "\n",
1268
+ "# Determine if this notebook is running on Colab or Kaggle\n",
1269
+ "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
1270
+ "home_directory = '/content/'\n",
1271
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
1272
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
1273
+ "%cd {home_directory}\n",
1274
+ "#-------#\n",
1275
+ "\n",
1276
+ "root_output_folder = home_directory + 'outputs/'\n",
1277
+ "\n",
1278
+ "# @title Extract nouns\n",
1279
+ "my_mkdirs(root_output_folder)\n",
1280
+ "%cd {root_output_folder}\n",
1281
+ "\n",
1282
+ "!pip install datasets\n",
1283
+ "\n",
1284
+ "from datasets import load_dataset\n",
1285
+ "\n",
1286
+ "ds = load_dataset(\"bartoszmaj/nouns_one\")\n",
1287
+ "#ds2 = load_dataset(\"bartoszmaj/nouns_two\")\n",
1288
+ "#ds3 = load_dataset(\"bartoszmaj/nouns_three\")\n",
1289
+ "#ds4 = load_dataset(\"bartoszmaj/nouns_four\")\n",
1290
+ "\n"
1291
+ ],
1292
+ "metadata": {
1293
+ "cellView": "form",
1294
+ "id": "HC72wZW9llzw"
1295
+ },
1296
+ "execution_count": null,
1297
+ "outputs": []
1298
+ },
1299
+ {
1300
+ "cell_type": "code",
1301
+ "source": [
1302
+ "# @title Download nouns - pick three items at random and write in JSONs\n",
1303
+ "import random\n",
1304
+ "my_mkdirs(root_output_folder)\n",
1305
+ "%cd {root_output_folder}\n",
1306
+ "for file_index in range(21):\n",
1307
+ " if file_index <=0: continue\n",
1308
+ " tripple_nouns = {}\n",
1309
+ " for index in range (10000):\n",
1310
+ " word = \"\"\n",
1311
+ " for its in range(3):\n",
1312
+ " _index = random.randint(0,1000000-1)\n",
1313
+ " words = list(ds['train'][_index]['nouns'])\n",
1314
+ " if len(words)>0:\n",
1315
+ " _word = random.choice(words)\n",
1316
+ " word = word + ' ' + _word\n",
1317
+ " #---------#\n",
1318
+ " tripple_nouns[f'{index}'] = word\n",
1319
+ " #--------#\n",
1320
+ " with open(f'tripple_nouns_{file_index}.json', 'w') as f:\n",
1321
+ " json.dump(tripple_nouns, f)\n",
1322
+ " #----------#\n",
1323
+ "\n"
1324
+ ],
1325
+ "metadata": {
1326
+ "cellView": "form",
1327
+ "id": "CWlWk0KpuX55",
1328
+ "outputId": "418a74c3-f83c-4cfd-8514-437974a84601",
1329
+ "colab": {
1330
+ "base_uri": "https://localhost:8080/"
1331
+ }
1332
+ },
1333
+ "execution_count": 13,
1334
+ "outputs": [
1335
+ {
1336
+ "output_type": "stream",
1337
+ "name": "stdout",
1338
+ "text": [
1339
+ "/content/outputs\n"
1340
+ ]
1341
+ }
1342
+ ]
1343
+ },
1344
  {
1345
  "cell_type": "markdown",
1346
  "source": [
 
2115
  "# See this link for additional stuff to do with shelve: https://docs.python.org/3/library/shelve.html"
2116
  ],
2117
  "metadata": {
2118
+ "id": "iWeFnT1gAx6A",
2119
+ "cellView": "form"
2120
  },
2121
  "execution_count": null,
2122
  "outputs": []