Spaces:
Build error
Build error
add decorator to functions
Browse files
app.py
CHANGED
|
@@ -45,37 +45,38 @@ aihub_deplot_model_path='./deplot_k.pt'
|
|
| 45 |
t5_model_path = './ke_t5.pt'
|
| 46 |
|
| 47 |
# Load first model ko-deplot
|
| 48 |
-
processor1 = Pix2StructProcessor.from_pretrained('nuua/ko-deplot')
|
| 49 |
-
model1 = Pix2StructForConditionalGeneration.from_pretrained('nuua/ko-deplot')
|
| 50 |
@spaces.GPU(enable_queue=True)
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
model1.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
# Load second model aihub-deplot
|
| 56 |
-
processor2 = AutoProcessor.from_pretrained("ybelkada/pix2struct-base")
|
| 57 |
-
model2 = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base")
|
| 58 |
@spaces.GPU(enable_queue=True)
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
|
|
|
| 61 |
|
| 62 |
-
tokenizer = T5Tokenizer.from_pretrained("KETI-AIR/ke-t5-base")
|
| 63 |
-
t5_model = T5ForConditionalGeneration.from_pretrained("KETI-AIR/ke-t5-base")
|
| 64 |
-
@spaces.GPU(enable_queue=True)
|
| 65 |
-
t5_model.load_state_dict(torch.load(t5_model_path, map_location=device))
|
| 66 |
-
|
| 67 |
-
@spaces.GPU(enable_queue=True)
|
| 68 |
-
model2.to(device)
|
| 69 |
-
@spaces.GPU(enable_queue=True)
|
| 70 |
-
t5_model.to(device)
|
| 71 |
|
| 72 |
#Load third model unichart
|
| 73 |
-
unichart_model_path = "./unichart"
|
| 74 |
-
model3 = VisionEncoderDecoderModel.from_pretrained(unichart_model_path)
|
| 75 |
-
processor3 = DonutProcessor.from_pretrained(unichart_model_path)
|
| 76 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 77 |
@spaces.GPU(enable_queue=True)
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
#ko-deplot 추론함수
|
| 81 |
# Function to format output
|
|
|
|
| 45 |
t5_model_path = './ke_t5.pt'
|
| 46 |
|
| 47 |
# Load first model ko-deplot
|
|
|
|
|
|
|
| 48 |
@spaces.GPU(enable_queue=True)
|
| 49 |
+
def load_model1():
|
| 50 |
+
processor1 = Pix2StructProcessor.from_pretrained('nuua/ko-deplot')
|
| 51 |
+
model1 = Pix2StructForConditionalGeneration.from_pretrained('nuua/ko-deplot')
|
| 52 |
+
model1.load_state_dict(torch.load(ko_deplot_model_path, map_location=device))
|
| 53 |
+
model1.to(device)
|
| 54 |
+
return processor1,model1
|
| 55 |
+
|
| 56 |
+
processor1,model1=load_model1()
|
| 57 |
|
| 58 |
# Load second model aihub-deplot
|
|
|
|
|
|
|
| 59 |
@spaces.GPU(enable_queue=True)
|
| 60 |
+
def load_model2():
|
| 61 |
+
processor2 = AutoProcessor.from_pretrained("ybelkada/pix2struct-base")
|
| 62 |
+
model2 = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base")
|
| 63 |
+
model2.load_state_dict(torch.load(aihub_deplot_model_path, map_location=device))
|
| 64 |
+
model2.to(device)
|
| 65 |
+
return processor2,model2
|
| 66 |
|
| 67 |
+
processor2,model2=load_model2()
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
#Load third model unichart
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
@spaces.GPU(enable_queue=True)
|
| 72 |
+
def load_model3():
|
| 73 |
+
unichart_model_path = "./unichart"
|
| 74 |
+
model3 = VisionEncoderDecoderModel.from_pretrained(unichart_model_path)
|
| 75 |
+
processor3 = DonutProcessor.from_pretrained(unichart_model_path)
|
| 76 |
+
model3.to(device)
|
| 77 |
+
return processor3,model3
|
| 78 |
+
|
| 79 |
+
processor3,model3=load_model3()
|
| 80 |
|
| 81 |
#ko-deplot 추론함수
|
| 82 |
# Function to format output
|