Update index.js
Browse files
index.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import { env, AutoTokenizer, RawImage, Tensor } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers';
|
| 2 |
import { getModelJSON } from "https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/src/utils/hub.js";
|
| 3 |
import * as ort from "https://cdn.jsdelivr.net/npm/[email protected]/dist/ort.webgpu.mjs";
|
| 4 |
|
|
@@ -22,6 +22,9 @@ const BASE_MODEL = "Qwen/Qwen2-VL-2B-Instruct";
|
|
| 22 |
const QUANTIZATION = "q4f16";
|
| 23 |
const MAX_SINGLE_CHAT_LENGTH = 10;
|
| 24 |
|
|
|
|
|
|
|
|
|
|
| 25 |
status.textContent = 'Loading model...';
|
| 26 |
status.textContent = 'Ready';
|
| 27 |
|
|
@@ -124,7 +127,7 @@ export async function imageTextToText(
|
|
| 124 |
const pixel_values = image.unsqueeze(0);
|
| 125 |
|
| 126 |
const ortSessionA = await ort.InferenceSession.create(
|
| 127 |
-
`${
|
| 128 |
{ executionProviders: ["webgpu"] }
|
| 129 |
);
|
| 130 |
|
|
|
|
| 1 |
+
import { env, AutoTokenizer, RawImage, Tensor, getSession } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers';
|
| 2 |
import { getModelJSON } from "https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/src/utils/hub.js";
|
| 3 |
import * as ort from "https://cdn.jsdelivr.net/npm/[email protected]/dist/ort.webgpu.mjs";
|
| 4 |
|
|
|
|
| 22 |
const QUANTIZATION = "q4f16";
|
| 23 |
const MAX_SINGLE_CHAT_LENGTH = 10;
|
| 24 |
|
| 25 |
+
// const ONNX_MODEL_BASE_URL=
|
| 26 |
+
console.log(getSession);
|
| 27 |
+
|
| 28 |
status.textContent = 'Loading model...';
|
| 29 |
status.textContent = 'Ready';
|
| 30 |
|
|
|
|
| 127 |
const pixel_values = image.unsqueeze(0);
|
| 128 |
|
| 129 |
const ortSessionA = await ort.InferenceSession.create(
|
| 130 |
+
`${ONNX_MODEL_BASE_URL}/QwenVL_A${suffix}.onnx`,
|
| 131 |
{ executionProviders: ["webgpu"] }
|
| 132 |
);
|
| 133 |
|