|
|
--- |
|
|
pipeline_tag: image-text-to-text |
|
|
library_name: transformers |
|
|
--- |
|
|
# PaddleOCR-VL-0.9B |
|
|
|
|
|
Duplicated from https://huggingface.co/PaddlePaddle/PaddleOCR-VL |
|
|
|
|
|
Example use with transformers: |
|
|
|
|
|
```py |
|
|
from transformers import AutoModelForCausalLM, AutoProcessor |
|
|
import torch |
|
|
|
|
|
DEVICE="cuda" if torch.cuda.is_available() else "mps" if torch.mps.is_available() else "cpu" |
|
|
model_id = "pcuenq/PaddleOCR-VL-0.9B" |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_id, trust_remote_code=True, dtype=torch.bfloat16 |
|
|
).to(DEVICE).eval() |
|
|
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) |
|
|
|
|
|
from transformers.image_utils import load_image |
|
|
image_url = "https://fiverr-res.cloudinary.com/images/t_main1,q_auto,f_auto,q_auto,f_auto/gigs/154456946/original/41556aac80fc43dcb29ce656d786c0a6f9b4073f/do-handwritten-text-image-or-pdf-to-word-means-typing-form.jpg" |
|
|
image = load_image(image_url) |
|
|
|
|
|
messages = [{"role": "user", "content": "OCR"}] |
|
|
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
|
inputs = processor(text=[text], images=[image], return_tensors="pt").to(DEVICE) |
|
|
|
|
|
generated = model.generate(**inputs, max_new_tokens=200, do_sample=False) |
|
|
|
|
|
resp = processor.batch_decode(generated, skip_special_tokens=True)[0] |
|
|
answer = resp.split(text)[-1].strip() |
|
|
print(answer) |
|
|
``` |