Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
|
@@ -25,6 +25,17 @@
|
|
| 25 |
"id": "cRV2YWomjMBU"
|
| 26 |
}
|
| 27 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
{
|
| 29 |
"cell_type": "code",
|
| 30 |
"source": [
|
|
@@ -57,7 +68,18 @@
|
|
| 57 |
" #----#\n",
|
| 58 |
" %cd {home_directory}\n",
|
| 59 |
" !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n",
|
| 60 |
-
" loaded = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
" %cd {home_directory + 'fusion-t2i-generator-data/'}\n",
|
| 62 |
" !unzip vocab.zip\n",
|
| 63 |
" !unzip reference.zip\n",
|
|
@@ -104,8 +126,7 @@
|
|
| 104 |
"for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
|
| 105 |
" index = index + 1;\n",
|
| 106 |
"#------#\n",
|
| 107 |
-
"NUM_REFERENCE_ITEMS = index
|
| 108 |
-
"\n"
|
| 109 |
],
|
| 110 |
"metadata": {
|
| 111 |
"id": "TC5lMJrS1HCC"
|
|
@@ -261,26 +282,136 @@
|
|
| 261 |
"image or print('No image found')"
|
| 262 |
],
|
| 263 |
"metadata": {
|
| 264 |
-
"id": "NqL_I3ZSrISq"
|
|
|
|
| 265 |
},
|
| 266 |
"execution_count": null,
|
| 267 |
"outputs": []
|
| 268 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
{
|
| 270 |
"cell_type": "code",
|
| 271 |
"source": [
|
| 272 |
-
"#
|
| 273 |
-
"
|
| 274 |
-
"
|
| 275 |
-
"
|
| 276 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
],
|
| 278 |
"metadata": {
|
| 279 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 280 |
},
|
| 281 |
"execution_count": null,
|
| 282 |
"outputs": []
|
| 283 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 284 |
{
|
| 285 |
"cell_type": "code",
|
| 286 |
"source": [
|
|
@@ -594,59 +725,54 @@
|
|
| 594 |
{
|
| 595 |
"cell_type": "code",
|
| 596 |
"source": [
|
| 597 |
-
"# @title \t⚄
|
| 598 |
-
"\n",
|
| 599 |
-
"def get_num_vocab_items(_url):\n",
|
| 600 |
-
" num_vocab_items = 0\n",
|
| 601 |
-
" for item in _url.split('_'):\n",
|
| 602 |
-
" if item.find('safetensors')>-1: num_vocab_items = int(item.replace('.safetensors', ''))\n",
|
| 603 |
-
" #------#\n",
|
| 604 |
-
" return num_vocab_items-1\n",
|
| 605 |
"\n",
|
|
|
|
|
|
|
|
|
|
| 606 |
"\n",
|
| 607 |
-
"
|
| 608 |
-
"
|
| 609 |
-
" _SCALE = torch.tensor(0.0043).to(dot_dtype)\n",
|
| 610 |
-
" _DIM = 768\n",
|
| 611 |
-
" _vocab = {}\n",
|
| 612 |
-
" #----#\n",
|
| 613 |
-
" inputs = tokenizer(text = _ref.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
| 614 |
-
" ref = model.get_text_features(**inputs)[0]\n",
|
| 615 |
-
" ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n",
|
| 616 |
-
" #-----#\n",
|
| 617 |
-
" num_vocab_items = 0\n",
|
| 618 |
-
" for url in urls:\n",
|
| 619 |
-
" num_vocab_items = num_vocab_items + get_num_vocab_items(url)\n",
|
| 620 |
-
" #------#\n",
|
| 621 |
-
" vocab = torch.zeros(num_vocab_items , _DIM).to(torch.uint8)\n",
|
| 622 |
" prompts = {}\n",
|
| 623 |
-
"
|
| 624 |
-
"
|
| 625 |
-
"
|
| 626 |
-
"
|
| 627 |
-
"
|
| 628 |
-
"
|
| 629 |
-
"
|
| 630 |
-
"
|
| 631 |
-
"
|
| 632 |
-
"
|
| 633 |
-
"
|
| 634 |
-
"
|
| 635 |
-
"
|
| 636 |
-
"
|
| 637 |
-
" for index in range(
|
| 638 |
-
"
|
| 639 |
-
"
|
| 640 |
-
"
|
| 641 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 642 |
" #-------#\n",
|
| 643 |
-
"
|
| 644 |
-
"
|
| 645 |
-
"\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 646 |
],
|
| 647 |
"metadata": {
|
| 648 |
"cellView": "form",
|
| 649 |
-
"id": "
|
| 650 |
},
|
| 651 |
"execution_count": null,
|
| 652 |
"outputs": []
|
|
@@ -654,31 +780,14 @@
|
|
| 654 |
{
|
| 655 |
"cell_type": "code",
|
| 656 |
"source": [
|
| 657 |
-
"
|
| 658 |
-
"
|
| 659 |
-
"
|
| 660 |
-
"
|
| 661 |
-
"
|
| 662 |
-
"\n",
|
| 663 |
-
" #'/content/fusion-t2i-generator-data/clip_vocab_q0043_541291.safetensors' , '/content/fusion-t2i-generator-data/lyrics_vocab_q0043_41905.safetensors' , '/content/fusion-t2i-generator-data/names_vocab_q0043_162977.safetensors' , '/content/fusion-t2i-generator-data/r34_vocab_q0043_96166.safetensors' ]\n",
|
| 664 |
-
"\n",
|
| 665 |
-
"indices , prompts , sims = get_similiar(ref , urls , LIST_SIZE)\n",
|
| 666 |
-
"\n",
|
| 667 |
-
"index = 0\n",
|
| 668 |
-
"_prompts = {}\n",
|
| 669 |
-
"for index in range(203662):\n",
|
| 670 |
-
" try:\n",
|
| 671 |
-
" key = prompts[f'{indices[index].item()}']\n",
|
| 672 |
-
" print(key)\n",
|
| 673 |
-
" except: print('Not found!')\n",
|
| 674 |
-
" #_prompts[f'{key}'] = sims[key].item()\n",
|
| 675 |
-
" index = index + 1\n",
|
| 676 |
-
" if index>LIST_SIZE:break\n",
|
| 677 |
-
"\n"
|
| 678 |
],
|
| 679 |
"metadata": {
|
| 680 |
-
"
|
| 681 |
-
"id": "Azz1kCza6LB3"
|
| 682 |
},
|
| 683 |
"execution_count": null,
|
| 684 |
"outputs": []
|
|
|
|
| 25 |
"id": "cRV2YWomjMBU"
|
| 26 |
}
|
| 27 |
},
|
| 28 |
+
{
|
| 29 |
+
"cell_type": "markdown",
|
| 30 |
+
"source": [
|
| 31 |
+
"THIS IS AN OLD VERSION OF THE CLIP INTERROGATOR.\n",
|
| 32 |
+
"\n",
|
| 33 |
+
"YOU WILL FIND THE UP TO DATE VERSION HERE:https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data/tree/main/Google%20Colab%20Jupyter%20Notebooks"
|
| 34 |
+
],
|
| 35 |
+
"metadata": {
|
| 36 |
+
"id": "9slWHq0JIX6D"
|
| 37 |
+
}
|
| 38 |
+
},
|
| 39 |
{
|
| 40 |
"cell_type": "code",
|
| 41 |
"source": [
|
|
|
|
| 68 |
" #----#\n",
|
| 69 |
" %cd {home_directory}\n",
|
| 70 |
" !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n",
|
| 71 |
+
" loaded = True"
|
| 72 |
+
],
|
| 73 |
+
"metadata": {
|
| 74 |
+
"id": "A30Xl4BswyEr"
|
| 75 |
+
},
|
| 76 |
+
"execution_count": null,
|
| 77 |
+
"outputs": []
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"cell_type": "code",
|
| 81 |
+
"source": [
|
| 82 |
+
"\n",
|
| 83 |
" %cd {home_directory + 'fusion-t2i-generator-data/'}\n",
|
| 84 |
" !unzip vocab.zip\n",
|
| 85 |
" !unzip reference.zip\n",
|
|
|
|
| 126 |
"for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
|
| 127 |
" index = index + 1;\n",
|
| 128 |
"#------#\n",
|
| 129 |
+
"NUM_REFERENCE_ITEMS = index"
|
|
|
|
| 130 |
],
|
| 131 |
"metadata": {
|
| 132 |
"id": "TC5lMJrS1HCC"
|
|
|
|
| 282 |
"image or print('No image found')"
|
| 283 |
],
|
| 284 |
"metadata": {
|
| 285 |
+
"id": "NqL_I3ZSrISq",
|
| 286 |
+
"cellView": "form"
|
| 287 |
},
|
| 288 |
"execution_count": null,
|
| 289 |
"outputs": []
|
| 290 |
},
|
| 291 |
+
{
|
| 292 |
+
"cell_type": "markdown",
|
| 293 |
+
"source": [],
|
| 294 |
+
"metadata": {
|
| 295 |
+
"id": "ouE3KYiJefac"
|
| 296 |
+
}
|
| 297 |
+
},
|
| 298 |
{
|
| 299 |
"cell_type": "code",
|
| 300 |
"source": [
|
| 301 |
+
"# @title ⚄ New interrogator code using quantized text corpus\n",
|
| 302 |
+
"ref = '' # @param {type:'string' , placeholder:'type a single prompt to match'}\n",
|
| 303 |
+
"LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
|
| 304 |
+
"\n",
|
| 305 |
+
"# @markdown Select vocab\n",
|
| 306 |
+
"fanfic = False # @param {type:\"boolean\"}\n",
|
| 307 |
+
"civitai = True # @param {type:\"boolean\"}\n",
|
| 308 |
+
"names = True # @param {type:\"boolean\"}\n",
|
| 309 |
+
"r34 = True # @param {type:\"boolean\"}\n",
|
| 310 |
+
"\n",
|
| 311 |
+
"from transformers import AutoTokenizer\n",
|
| 312 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
|
| 313 |
+
"from transformers import CLIPProcessor, CLIPModel\n",
|
| 314 |
+
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
|
| 315 |
+
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
|
| 316 |
+
"logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
|
| 317 |
+
"dot_dtype = torch.float32\n",
|
| 318 |
+
"inputs = tokenizer(text = ref.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
| 319 |
+
"ref = model.get_text_features(**inputs)[0]\n",
|
| 320 |
+
"ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n",
|
| 321 |
+
"#-----#\n",
|
| 322 |
+
"prompts_folder = f'{home_directory}fusion-t2i-generator-data/vocab/text'\n",
|
| 323 |
+
"encodings_folder = f'{home_directory}fusion-t2i-generator-data/vocab/text_encodings'\n",
|
| 324 |
+
"#----#\n",
|
| 325 |
+
"dim = 768\n",
|
| 326 |
+
"scale = 0.0043\n",
|
| 327 |
+
"size = 0\n",
|
| 328 |
+
"#------#\n",
|
| 329 |
+
"for filename in os.listdir(prompts_folder):\n",
|
| 330 |
+
" if (not civitai and filename.find('civitai')>-1):continue\n",
|
| 331 |
+
" if (not fanfic and filename.find('fanfic')>-1):continue\n",
|
| 332 |
+
" if (not r34 and filename.find('r34')>-1):continue\n",
|
| 333 |
+
" if (not names and filename.find('names')>-1):continue\n",
|
| 334 |
+
" size = size + LIST_SIZE\n",
|
| 335 |
+
"#-------#\n",
|
| 336 |
+
"similiar_sims = torch.zeros(size)\n",
|
| 337 |
+
"similiar_prompts = {}\n",
|
| 338 |
+
"_index = 0\n",
|
| 339 |
+
"#-------#\n",
|
| 340 |
+
"similiar_encodings = {}\n",
|
| 341 |
+
"for filename in os.listdir(prompts_folder):\n",
|
| 342 |
+
" if (not civitai and filename.find('civitai')>-1):continue\n",
|
| 343 |
+
" if (not fanfic and filename.find('fanfic')>-1):continue\n",
|
| 344 |
+
" if (not r34 and filename.find('r34')>-1):continue\n",
|
| 345 |
+
" if (not names and filename.find('names')>-1):continue\n",
|
| 346 |
+
" #------#\n",
|
| 347 |
+
" root_filename = filename.replace('.json', '')\n",
|
| 348 |
+
" %cd {prompts_folder}\n",
|
| 349 |
+
" prompts = {}\n",
|
| 350 |
+
" with open(f'{root_filename}.json', 'r') as f:\n",
|
| 351 |
+
" data = json.load(f).items()\n",
|
| 352 |
+
" for key,value in data:\n",
|
| 353 |
+
" prompts[key] = value\n",
|
| 354 |
+
" num_items = int(prompts['num_items'])\n",
|
| 355 |
+
" #------#\n",
|
| 356 |
+
" %cd {encodings_folder}\n",
|
| 357 |
+
" _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n",
|
| 358 |
+
"\n",
|
| 359 |
+
" text_encodings = torch.zeros(num_items , dim)\n",
|
| 360 |
+
" tmp = torch.ones(dim).to(dot_dtype)\n",
|
| 361 |
+
" for index in range(num_items):\n",
|
| 362 |
+
" text_encodings[index] = torch.sub(_text_encodings[index][1:dim+1].to(dot_dtype) , tmp , alpha= _text_encodings[index][0].to(dot_dtype))\n",
|
| 363 |
+
" #------#\n",
|
| 364 |
+
" sims = torch.matmul(text_encodings*scale, ref.t())\n",
|
| 365 |
+
" sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
|
| 366 |
+
" for index in range(LIST_SIZE):\n",
|
| 367 |
+
" key = indices[index].item()\n",
|
| 368 |
+
" prompt = prompts[f'{key}']\n",
|
| 369 |
+
" #-------#\n",
|
| 370 |
+
" similiar_sims[_index] = torch.tensor(round(sims[key].item(), 5))\n",
|
| 371 |
+
" similiar_prompts[f'{_index}'] = prompt\n",
|
| 372 |
+
" _index = _index + 1\n",
|
| 373 |
+
" #-------#\n",
|
| 374 |
+
" continue\n",
|
| 375 |
+
"#---------#\n"
|
| 376 |
],
|
| 377 |
"metadata": {
|
| 378 |
+
"cellView": "form",
|
| 379 |
+
"id": "w2dfozFY5IwM"
|
| 380 |
+
},
|
| 381 |
+
"execution_count": null,
|
| 382 |
+
"outputs": []
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"cell_type": "code",
|
| 386 |
+
"source": [
|
| 387 |
+
"# @title ⚄ Printing results from text corpus\n",
|
| 388 |
+
"sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
|
| 389 |
+
"\n",
|
| 390 |
+
"include_similiarity = False # @param {type:\"boolean\"}\n",
|
| 391 |
+
"for index in range(LIST_SIZE):\n",
|
| 392 |
+
" key = indices[index].item()\n",
|
| 393 |
+
" sim = similiar_sims[key].item()\n",
|
| 394 |
+
" prompt = similiar_prompts[f'{key}']\n",
|
| 395 |
+
" #-------#\n",
|
| 396 |
+
" if include_similiarity :print(f'{prompt} - {round(sim*100,1)} %')\n",
|
| 397 |
+
" else: print(f'{prompt}')"
|
| 398 |
+
],
|
| 399 |
+
"metadata": {
|
| 400 |
+
"cellView": "form",
|
| 401 |
+
"id": "E3kfOKXITDI9"
|
| 402 |
},
|
| 403 |
"execution_count": null,
|
| 404 |
"outputs": []
|
| 405 |
},
|
| 406 |
+
{
|
| 407 |
+
"cell_type": "markdown",
|
| 408 |
+
"source": [
|
| 409 |
+
"OTHER STUFF BELOW"
|
| 410 |
+
],
|
| 411 |
+
"metadata": {
|
| 412 |
+
"id": "FRIqYJDEebpf"
|
| 413 |
+
}
|
| 414 |
+
},
|
| 415 |
{
|
| 416 |
"cell_type": "code",
|
| 417 |
"source": [
|
|
|
|
| 725 |
{
|
| 726 |
"cell_type": "code",
|
| 727 |
"source": [
|
| 728 |
+
"# @title \t⚄ Quick fix for normalizing encoded text corpus tensors\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 729 |
"\n",
|
| 730 |
+
"import os\n",
|
| 731 |
+
"my_mkdirs('/content/output')\n",
|
| 732 |
+
"my_mkdirs('/content/output/text_encodings')\n",
|
| 733 |
"\n",
|
| 734 |
+
"for filename in os.listdir(f'{prompts_folder}'):\n",
|
| 735 |
+
" %cd {prompts_folder}\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 736 |
" prompts = {}\n",
|
| 737 |
+
" with open(f'{filename}', 'r') as f:\n",
|
| 738 |
+
" data = json.load(f).items()\n",
|
| 739 |
+
" for key,value in data:\n",
|
| 740 |
+
" prompts[key] = value\n",
|
| 741 |
+
" #------#\n",
|
| 742 |
+
" num_items = int(prompts['num_items'])\n",
|
| 743 |
+
"\n",
|
| 744 |
+
" %cd {encodings_folder}\n",
|
| 745 |
+
" enc_filename = filename.replace('json', 'safetensors')\n",
|
| 746 |
+
" _text_encodings = load_file(f'{enc_filename}')['weights'].to(torch.uint8)\n",
|
| 747 |
+
" text_encodings = torch.zeros(num_items , dim)\n",
|
| 748 |
+
" tmp = torch.ones(dim)\n",
|
| 749 |
+
" tmp2 = torch.tensor(1/0.0043)\n",
|
| 750 |
+
" zero_point = 0\n",
|
| 751 |
+
" for index in range(num_items):\n",
|
| 752 |
+
" text_encodings[index] = torch.tensor(0.0043) * torch.sub(_text_encodings[index][1:dim+1] , tmp , alpha= _text_encodings[index][0]).to(torch.float32)\n",
|
| 753 |
+
" text_encodings[index] = tmp2*text_encodings[index]/text_encodings[index].norm(p=2, dim=-1, keepdim = True)\n",
|
| 754 |
+
" test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
|
| 755 |
+
" less_than_zero = test<0\n",
|
| 756 |
+
" while(torch.any(less_than_zero).item()):\n",
|
| 757 |
+
" zero_point = zero_point + 1\n",
|
| 758 |
+
" test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
|
| 759 |
+
" less_than_zero = test<0\n",
|
| 760 |
+
" #------#\n",
|
| 761 |
+
" _text_encodings[index][0] = zero_point\n",
|
| 762 |
+
" _text_encodings[index][1:dim+1] = test\n",
|
| 763 |
" #-------#\n",
|
| 764 |
+
" %cd /content/output/text_encodings\n",
|
| 765 |
+
"\n",
|
| 766 |
+
" tmp = {}\n",
|
| 767 |
+
" tmp['weights'] = _text_encodings.to(torch.uint8)\n",
|
| 768 |
+
" tmp['num_items'] = torch.tensor(num_items).to(torch.uint8)\n",
|
| 769 |
+
" tmp['scale'] = torch.tensor(0.0043)\n",
|
| 770 |
+
" save_file(tmp , f'{enc_filename}')\n",
|
| 771 |
+
"#------#"
|
| 772 |
],
|
| 773 |
"metadata": {
|
| 774 |
"cellView": "form",
|
| 775 |
+
"id": "9qgHW1Wr7kZn"
|
| 776 |
},
|
| 777 |
"execution_count": null,
|
| 778 |
"outputs": []
|
|
|
|
| 780 |
{
|
| 781 |
"cell_type": "code",
|
| 782 |
"source": [
|
| 783 |
+
"# Check the average value for this set\n",
|
| 784 |
+
"sims = torch.matmul(vocab_encodings.dequantize(),average.t())\n",
|
| 785 |
+
"sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
|
| 786 |
+
"for index in range(10):\n",
|
| 787 |
+
" print(prompts[f'{indices[index].item()}'])"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 788 |
],
|
| 789 |
"metadata": {
|
| 790 |
+
"id": "XNHz0hfhHRUu"
|
|
|
|
| 791 |
},
|
| 792 |
"execution_count": null,
|
| 793 |
"outputs": []
|