Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
|
@@ -101,7 +101,8 @@
|
|
| 101 |
{
|
| 102 |
"cell_type": "markdown",
|
| 103 |
"source": [
|
| 104 |
-
"Feel free to skip these cells if you do not plan on using them
|
|
|
|
| 105 |
],
|
| 106 |
"metadata": {
|
| 107 |
"id": "Xf9zoq-Za3wi"
|
|
@@ -221,7 +222,7 @@
|
|
| 221 |
{
|
| 222 |
"cell_type": "markdown",
|
| 223 |
"source": [
|
| 224 |
-
"Save the reference prior to running the Interrogator"
|
| 225 |
],
|
| 226 |
"metadata": {
|
| 227 |
"id": "zeu6JcM-mk9z"
|
|
@@ -242,28 +243,20 @@
|
|
| 242 |
],
|
| 243 |
"metadata": {
|
| 244 |
"id": "lOQuTPfBMK82",
|
| 245 |
-
"
|
| 246 |
-
"colab": {
|
| 247 |
-
"base_uri": "https://localhost:8080/"
|
| 248 |
-
}
|
| 249 |
},
|
| 250 |
-
"execution_count":
|
| 251 |
-
"outputs": [
|
| 252 |
-
{
|
| 253 |
-
"output_type": "stream",
|
| 254 |
-
"name": "stdout",
|
| 255 |
-
"text": [
|
| 256 |
-
"/content\n"
|
| 257 |
-
]
|
| 258 |
-
}
|
| 259 |
-
]
|
| 260 |
},
|
| 261 |
{
|
| 262 |
"cell_type": "code",
|
| 263 |
"source": [
|
| 264 |
"# @title ⚄ Run the CLIP interrogator on the saved reference\n",
|
| 265 |
"LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
|
| 266 |
-
"
|
|
|
|
|
|
|
|
|
|
| 267 |
"# @markdown -----\n",
|
| 268 |
"# @markdown Select vocab\n",
|
| 269 |
"general = False # @param {type:\"boolean\"}\n",
|
|
@@ -401,7 +394,7 @@
|
|
| 401 |
" #-------#\n",
|
| 402 |
" continue\n",
|
| 403 |
"#---------#\n",
|
| 404 |
-
"print(f'\\nProcessed entire list of {total_items} items to find closest match.\\nSaved closest matching indices {START_AT} to {LIST_SIZE} as the dict \"similiar_prompts\" with {
|
| 405 |
"\n",
|
| 406 |
"# Print results\n",
|
| 407 |
"sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
|
|
@@ -428,12 +421,13 @@
|
|
| 428 |
" #-----#\n",
|
| 429 |
" prompt = (prompt + '}').replace('|}', '} ')\n",
|
| 430 |
" #------#\n",
|
| 431 |
-
" print(f'Similiar prompts: \\n\\n\\n
|
| 432 |
"image\n",
|
| 433 |
"#-----#\n"
|
| 434 |
],
|
| 435 |
"metadata": {
|
| 436 |
-
"id": "kOYZ8Ajn-DD8"
|
|
|
|
| 437 |
},
|
| 438 |
"execution_count": null,
|
| 439 |
"outputs": []
|
|
@@ -467,7 +461,8 @@
|
|
| 467 |
"save_file(_similiar_sims, 'similiar_sims.safetensors')\n"
|
| 468 |
],
|
| 469 |
"metadata": {
|
| 470 |
-
"id": "m-N553nXz9Jd"
|
|
|
|
| 471 |
},
|
| 472 |
"execution_count": null,
|
| 473 |
"outputs": []
|
|
@@ -497,6 +492,8 @@
|
|
| 497 |
"similiar_sims = _similiar_sims['weights'].to(dot_dtype)\n",
|
| 498 |
"\n",
|
| 499 |
"# @title ⚄ Run the CLIP interrogator on the saved reference\n",
|
|
|
|
|
|
|
| 500 |
"LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
|
| 501 |
"START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
|
| 502 |
"\n",
|
|
@@ -526,7 +523,8 @@
|
|
| 526 |
"#-----#\n"
|
| 527 |
],
|
| 528 |
"metadata": {
|
| 529 |
-
"id": "XOMkIKc9-wZz"
|
|
|
|
| 530 |
},
|
| 531 |
"execution_count": null,
|
| 532 |
"outputs": []
|
|
@@ -589,7 +587,8 @@
|
|
| 589 |
" json.dump(_savefile, f)\n"
|
| 590 |
],
|
| 591 |
"metadata": {
|
| 592 |
-
"id": "Q7vpNAXQilbf"
|
|
|
|
| 593 |
},
|
| 594 |
"execution_count": null,
|
| 595 |
"outputs": []
|
|
@@ -831,21 +830,6 @@
|
|
| 831 |
},
|
| 832 |
"execution_count": null,
|
| 833 |
"outputs": []
|
| 834 |
-
},
|
| 835 |
-
{
|
| 836 |
-
"cell_type": "code",
|
| 837 |
-
"source": [
|
| 838 |
-
"# Check the average value for this set\n",
|
| 839 |
-
"sims = torch.matmul(vocab_encodings.dequantize(),average.t())\n",
|
| 840 |
-
"sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
|
| 841 |
-
"for index in range(10):\n",
|
| 842 |
-
" print(prompts[f'{indices[index].item()}'])"
|
| 843 |
-
],
|
| 844 |
-
"metadata": {
|
| 845 |
-
"id": "XNHz0hfhHRUu"
|
| 846 |
-
},
|
| 847 |
-
"execution_count": null,
|
| 848 |
-
"outputs": []
|
| 849 |
}
|
| 850 |
]
|
| 851 |
}
|
|
|
|
| 101 |
{
|
| 102 |
"cell_type": "markdown",
|
| 103 |
"source": [
|
| 104 |
+
"**Feel free to skip these cells if you do not plan on using them**\n",
|
| 105 |
+
"\n"
|
| 106 |
],
|
| 107 |
"metadata": {
|
| 108 |
"id": "Xf9zoq-Za3wi"
|
|
|
|
| 222 |
{
|
| 223 |
"cell_type": "markdown",
|
| 224 |
"source": [
|
| 225 |
+
"**Save the reference prior to running the Interrogator**"
|
| 226 |
],
|
| 227 |
"metadata": {
|
| 228 |
"id": "zeu6JcM-mk9z"
|
|
|
|
| 243 |
],
|
| 244 |
"metadata": {
|
| 245 |
"id": "lOQuTPfBMK82",
|
| 246 |
+
"cellView": "form"
|
|
|
|
|
|
|
|
|
|
| 247 |
},
|
| 248 |
+
"execution_count": null,
|
| 249 |
+
"outputs": []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
},
|
| 251 |
{
|
| 252 |
"cell_type": "code",
|
| 253 |
"source": [
|
| 254 |
"# @title ⚄ Run the CLIP interrogator on the saved reference\n",
|
| 255 |
"LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
|
| 256 |
+
"_START_AT = '0' # @param [\"0\", \"10000\", \"50000\"] {allow-input: true}\n",
|
| 257 |
+
"START_AT = 0\n",
|
| 258 |
+
"if _START_AT.isnumeric(): START_AT = int(_START_AT)\n",
|
| 259 |
+
"\n",
|
| 260 |
"# @markdown -----\n",
|
| 261 |
"# @markdown Select vocab\n",
|
| 262 |
"general = False # @param {type:\"boolean\"}\n",
|
|
|
|
| 394 |
" #-------#\n",
|
| 395 |
" continue\n",
|
| 396 |
"#---------#\n",
|
| 397 |
+
"print(f'\\nProcessed entire list of {total_items} items to find closest match.\\nSaved closest matching indices {START_AT} to {START_AT + LIST_SIZE} as the dict \"similiar_prompts\" with {LIST_SIZE} items.\\n')\n",
|
| 398 |
"\n",
|
| 399 |
"# Print results\n",
|
| 400 |
"sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
|
|
|
|
| 421 |
" #-----#\n",
|
| 422 |
" prompt = (prompt + '}').replace('|}', '} ')\n",
|
| 423 |
" #------#\n",
|
| 424 |
+
" print(f'Similiar prompts: \\n\\n\\n{prompt} \\n\\n\\n//----//')\n",
|
| 425 |
"image\n",
|
| 426 |
"#-----#\n"
|
| 427 |
],
|
| 428 |
"metadata": {
|
| 429 |
+
"id": "kOYZ8Ajn-DD8",
|
| 430 |
+
"cellView": "form"
|
| 431 |
},
|
| 432 |
"execution_count": null,
|
| 433 |
"outputs": []
|
|
|
|
| 461 |
"save_file(_similiar_sims, 'similiar_sims.safetensors')\n"
|
| 462 |
],
|
| 463 |
"metadata": {
|
| 464 |
+
"id": "m-N553nXz9Jd",
|
| 465 |
+
"cellView": "form"
|
| 466 |
},
|
| 467 |
"execution_count": null,
|
| 468 |
"outputs": []
|
|
|
|
| 492 |
"similiar_sims = _similiar_sims['weights'].to(dot_dtype)\n",
|
| 493 |
"\n",
|
| 494 |
"# @title ⚄ Run the CLIP interrogator on the saved reference\n",
|
| 495 |
+
"\n",
|
| 496 |
+
"# @markdown Select which values within the saved list to print\n",
|
| 497 |
"LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
|
| 498 |
"START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
|
| 499 |
"\n",
|
|
|
|
| 523 |
"#-----#\n"
|
| 524 |
],
|
| 525 |
"metadata": {
|
| 526 |
+
"id": "XOMkIKc9-wZz",
|
| 527 |
+
"cellView": "form"
|
| 528 |
},
|
| 529 |
"execution_count": null,
|
| 530 |
"outputs": []
|
|
|
|
| 587 |
" json.dump(_savefile, f)\n"
|
| 588 |
],
|
| 589 |
"metadata": {
|
| 590 |
+
"id": "Q7vpNAXQilbf",
|
| 591 |
+
"cellView": "form"
|
| 592 |
},
|
| 593 |
"execution_count": null,
|
| 594 |
"outputs": []
|
|
|
|
| 830 |
},
|
| 831 |
"execution_count": null,
|
| 832 |
"outputs": []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 833 |
}
|
| 834 |
]
|
| 835 |
}
|