| | |
| |
|
| | """ |
| | Take a CLIPTextModel compatible text encoder. |
| | Go through the official range of tokens IDs (0-49405) |
| | Generate the official "embedding" tensor for each one. |
| | Save the result set to "temp.allids.safetensors" |
| | |
| | Defaults to loading openai/clip-vit-large-patch14 from huggingface hub. |
| | However, can take optional pair of arguments to a .safetensor model, and config file |
| | RULES of the loader: |
| | 1. the model file must appear to be either in current directory or one down. So, |
| | badpath1=some/directory/tree/file.here |
| | badpath2=/absolutepath |
| | 2. yes, you MUST have a matching config.json file |
| | 3. if you have no alternative, you can get away with using pytorch_model.bin |
| | |
| | Sample location for such things that you can download: |
| | https://huggingface.co/stablediffusionapi/edge-of-realism/tree/main/text_encoder/ |
| | If there is a .safetensors AND a .bin file, ignore the .bin file |
| | |
| | You can also convert a singlefile model, such as is downloaded from civitai, |
| | by using the utility at |
| | https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py |
| | Args should look like |
| | convert_original_stable_diffusion_to_diffusers.py --checkpoint_file somemodel.safetensors \ |
| | --dump_path extractdir --to_safetensors --from_safetensors |
| | |
| | """ |
| |
|
| |
|
| | import sys |
| | import json |
| | import torch |
| | from safetensors.torch import save_file |
| | from transformers import CLIPProcessor,CLIPModel,CLIPTextModel |
| |
|
| | clipsrc="openai/clip-vit-large-patch14" |
| | processor=None |
| | model=None |
| | encfile=None |
| | configfile=None |
| |
|
| | if len(sys.argv) == 3: |
| | encfile=sys.argv[1] |
| | configfile=sys.argv[2] |
| |
|
| | device=torch.device("cuda") |
| |
|
| |
|
| | def init(): |
| | global processor |
| | global model |
| | global encfile |
| | global configfile |
| |
|
| | |
| | print("loading processor from "+clipsrc,file=sys.stderr) |
| | processor = CLIPProcessor.from_pretrained(clipsrc) |
| | print("done",file=sys.stderr) |
| |
|
| | |
| | |
| | |
| | |
| | if encfile != None: |
| | print("loading model from "+encfile,file=sys.stderr) |
| | model = CLIPTextModel.from_pretrained( |
| | encfile,config=configfile,local_files_only=True,use_safetensors=True |
| | ) |
| | else: |
| | print("loading model from "+clipsrc,file=sys.stderr) |
| | model = CLIPTextModel.from_pretrained(clipsrc) |
| |
|
| | print("done",file=sys.stderr) |
| |
|
| | model = model.to(device) |
| |
|
| |
|
| | |
| | def embed_from_inputs(inputs): |
| | with torch.no_grad(): |
| | |
| | |
| | |
| |
|
| | outputs = model(**inputs) |
| | embeddings = outputs.pooler_output |
| | embedding = embeddings |
| |
|
| | return embedding |
| |
|
| |
|
| | init() |
| | inputs = processor(text="dummy", return_tensors="pt") |
| | inputs.to(device) |
| |
|
| | all_embeddings = [] |
| |
|
| | for id in range(49405): |
| | inputs.input_ids[0][1]=id |
| |
|
| | emb=embed_from_inputs(inputs) |
| | all_embeddings.append(emb) |
| | if (id %100) ==0: |
| | print(id) |
| |
|
| | embs = torch.cat(all_embeddings,dim=0) |
| | print("Shape of result = ",embs.shape) |
| |
|
| | outputfile="cliptextmodel.temp.allids.safetensors" |
| | print(f"Saving the calculatiuons to {outputfile}...") |
| | save_file({"embeddings": embs}, outputfile) |
| |
|
| |
|
| |
|