Datasets:
Uploading tokenizer_robustness_completion_farsi_spelled_out subset
Browse files
README.md
CHANGED
|
@@ -48,6 +48,10 @@ configs:
|
|
| 48 |
data_files:
|
| 49 |
- split: test
|
| 50 |
path: tokenizer_robustness_completion_farsi_romanization/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
dataset_info:
|
| 52 |
- config_name: tokenizer_robustness_completion_farsi_arabic_keyboard_for_farsi
|
| 53 |
features:
|
|
@@ -1289,6 +1293,130 @@ dataset_info:
|
|
| 1289 |
num_examples: 40
|
| 1290 |
download_size: 40527
|
| 1291 |
dataset_size: 21870
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1292 |
---
|
| 1293 |
|
| 1294 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 48 |
data_files:
|
| 49 |
- split: test
|
| 50 |
path: tokenizer_robustness_completion_farsi_romanization/test-*
|
| 51 |
+
- config_name: tokenizer_robustness_completion_farsi_spelled_out
|
| 52 |
+
data_files:
|
| 53 |
+
- split: test
|
| 54 |
+
path: tokenizer_robustness_completion_farsi_spelled_out/test-*
|
| 55 |
dataset_info:
|
| 56 |
- config_name: tokenizer_robustness_completion_farsi_arabic_keyboard_for_farsi
|
| 57 |
features:
|
|
|
|
| 1293 |
num_examples: 40
|
| 1294 |
download_size: 40527
|
| 1295 |
dataset_size: 21870
|
| 1296 |
+
- config_name: tokenizer_robustness_completion_farsi_spelled_out
|
| 1297 |
+
features:
|
| 1298 |
+
- name: question
|
| 1299 |
+
dtype: string
|
| 1300 |
+
- name: choices
|
| 1301 |
+
list: string
|
| 1302 |
+
- name: answer
|
| 1303 |
+
dtype: int64
|
| 1304 |
+
- name: answer_label
|
| 1305 |
+
dtype: string
|
| 1306 |
+
- name: split
|
| 1307 |
+
dtype: string
|
| 1308 |
+
- name: subcategories
|
| 1309 |
+
dtype: string
|
| 1310 |
+
- name: category
|
| 1311 |
+
dtype: string
|
| 1312 |
+
- name: lang
|
| 1313 |
+
dtype: string
|
| 1314 |
+
- name: second_lang
|
| 1315 |
+
dtype: string
|
| 1316 |
+
- name: notes
|
| 1317 |
+
dtype: string
|
| 1318 |
+
- name: id
|
| 1319 |
+
dtype: string
|
| 1320 |
+
- name: set_id
|
| 1321 |
+
dtype: string
|
| 1322 |
+
- name: variation_id
|
| 1323 |
+
dtype: string
|
| 1324 |
+
- name: vanilla_cos_sim_to_canonical
|
| 1325 |
+
struct:
|
| 1326 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1327 |
+
dtype: float64
|
| 1328 |
+
- name: Qwen/Qwen3-8B
|
| 1329 |
+
dtype: float64
|
| 1330 |
+
- name: bigscience/bloom
|
| 1331 |
+
dtype: float64
|
| 1332 |
+
- name: common-pile/comma-v0.1-1t
|
| 1333 |
+
dtype: float64
|
| 1334 |
+
- name: facebook/xglm-564M
|
| 1335 |
+
dtype: float64
|
| 1336 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1337 |
+
dtype: float64
|
| 1338 |
+
- name: google/byt5-small
|
| 1339 |
+
dtype: float64
|
| 1340 |
+
- name: google/gemma-2-2b
|
| 1341 |
+
dtype: float64
|
| 1342 |
+
- name: gpt2
|
| 1343 |
+
dtype: float64
|
| 1344 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1345 |
+
dtype: float64
|
| 1346 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1347 |
+
dtype: float64
|
| 1348 |
+
- name: mistralai/tekken
|
| 1349 |
+
dtype: float64
|
| 1350 |
+
- name: tiktoken/gpt-4o
|
| 1351 |
+
dtype: float64
|
| 1352 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1353 |
+
dtype: float64
|
| 1354 |
+
- name: trimmed_cos_sim_to_canonical
|
| 1355 |
+
struct:
|
| 1356 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1357 |
+
dtype: float64
|
| 1358 |
+
- name: Qwen/Qwen3-8B
|
| 1359 |
+
dtype: float64
|
| 1360 |
+
- name: bigscience/bloom
|
| 1361 |
+
dtype: float64
|
| 1362 |
+
- name: common-pile/comma-v0.1-1t
|
| 1363 |
+
dtype: float64
|
| 1364 |
+
- name: facebook/xglm-564M
|
| 1365 |
+
dtype: float64
|
| 1366 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1367 |
+
dtype: float64
|
| 1368 |
+
- name: google/byt5-small
|
| 1369 |
+
dtype: float64
|
| 1370 |
+
- name: google/gemma-2-2b
|
| 1371 |
+
dtype: float64
|
| 1372 |
+
- name: gpt2
|
| 1373 |
+
dtype: float64
|
| 1374 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1375 |
+
dtype: float64
|
| 1376 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1377 |
+
dtype: float64
|
| 1378 |
+
- name: mistralai/tekken
|
| 1379 |
+
dtype: float64
|
| 1380 |
+
- name: tiktoken/gpt-4o
|
| 1381 |
+
dtype: float64
|
| 1382 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1383 |
+
dtype: float64
|
| 1384 |
+
- name: token_counts
|
| 1385 |
+
struct:
|
| 1386 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1387 |
+
dtype: int64
|
| 1388 |
+
- name: Qwen/Qwen3-8B
|
| 1389 |
+
dtype: int64
|
| 1390 |
+
- name: bigscience/bloom
|
| 1391 |
+
dtype: int64
|
| 1392 |
+
- name: common-pile/comma-v0.1-1t
|
| 1393 |
+
dtype: int64
|
| 1394 |
+
- name: facebook/xglm-564M
|
| 1395 |
+
dtype: int64
|
| 1396 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1397 |
+
dtype: int64
|
| 1398 |
+
- name: google/byt5-small
|
| 1399 |
+
dtype: int64
|
| 1400 |
+
- name: google/gemma-2-2b
|
| 1401 |
+
dtype: int64
|
| 1402 |
+
- name: gpt2
|
| 1403 |
+
dtype: int64
|
| 1404 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1405 |
+
dtype: int64
|
| 1406 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1407 |
+
dtype: int64
|
| 1408 |
+
- name: mistralai/tekken
|
| 1409 |
+
dtype: int64
|
| 1410 |
+
- name: tiktoken/gpt-4o
|
| 1411 |
+
dtype: int64
|
| 1412 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1413 |
+
dtype: int64
|
| 1414 |
+
splits:
|
| 1415 |
+
- name: test
|
| 1416 |
+
num_bytes: 8526
|
| 1417 |
+
num_examples: 14
|
| 1418 |
+
download_size: 31471
|
| 1419 |
+
dataset_size: 8526
|
| 1420 |
---
|
| 1421 |
|
| 1422 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_farsi_spelled_out/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:749756a138205ff9490bb36094c659b0e4c15025cac409f5558d5c441e8db689
|
| 3 |
+
size 31471
|