Spaces:
Running
Running
File size: 1,184 Bytes
dd44da8 c693731 7fb9cbe dd44da8 c693731 7fb9cbe c693731 7fb9cbe dd44da8 7fb9cbe dd44da8 c693731 dd44da8 7fb9cbe dd44da8 7fb9cbe dd44da8 c693731 dd44da8 c693731 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import torch
from transformers import AutoProcessor, AutoModelForVision2Seq
from PIL import Image
# β
Define the correct model name from Hugging Face
MODEL_NAME = "deepseek-ai/deepseek-vl2-small"
# β
Load processor & model with `trust_remote_code=True`
processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
model = AutoModelForVision2Seq.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16,
trust_remote_code=True # β
This allows loading custom model implementations
).to("cuda" if torch.cuda.is_available() else "cpu")
# β
Test function to process an image
def predict(image_path):
image = Image.open(image_path).convert("RGB")
# Process input
inputs = processor(images=image, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
# Generate output
output = model.generate(**inputs)
# Decode response
generated_text = processor.batch_decode(output, skip_special_tokens=True)[0]
return generated_text
# β
Example Usage
if __name__ == "__main__":
test_image_path = "test.jpg" # Replace with an actual image path
print("Generated Output:", predict(test_image_path))
|