gsaltintas commited on
Commit
44b6ea3
·
verified ·
1 Parent(s): dd2a5fd

Uploading tokenizer_robustness_completion_farsi_dialects subset

Browse files
README.md CHANGED
@@ -24,6 +24,10 @@ configs:
24
  data_files:
25
  - split: test
26
  path: tokenizer_robustness_completion_farsi_colloquial/test-*
 
 
 
 
27
  dataset_info:
28
  - config_name: tokenizer_robustness_completion_farsi_arabic_keyboard_for_farsi
29
  features:
@@ -521,6 +525,130 @@ dataset_info:
521
  num_examples: 40
522
  download_size: 40120
523
  dataset_size: 23404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524
  ---
525
 
526
  # Dataset Card for Tokenization Robustness
 
24
  data_files:
25
  - split: test
26
  path: tokenizer_robustness_completion_farsi_colloquial/test-*
27
+ - config_name: tokenizer_robustness_completion_farsi_dialects
28
+ data_files:
29
+ - split: test
30
+ path: tokenizer_robustness_completion_farsi_dialects/test-*
31
  dataset_info:
32
  - config_name: tokenizer_robustness_completion_farsi_arabic_keyboard_for_farsi
33
  features:
 
525
  num_examples: 40
526
  download_size: 40120
527
  dataset_size: 23404
528
+ - config_name: tokenizer_robustness_completion_farsi_dialects
529
+ features:
530
+ - name: question
531
+ dtype: string
532
+ - name: choices
533
+ list: string
534
+ - name: answer
535
+ dtype: int64
536
+ - name: answer_label
537
+ dtype: string
538
+ - name: split
539
+ dtype: string
540
+ - name: subcategories
541
+ dtype: string
542
+ - name: category
543
+ dtype: string
544
+ - name: lang
545
+ dtype: string
546
+ - name: second_lang
547
+ dtype: string
548
+ - name: notes
549
+ dtype: string
550
+ - name: id
551
+ dtype: string
552
+ - name: set_id
553
+ dtype: string
554
+ - name: variation_id
555
+ dtype: string
556
+ - name: vanilla_cos_sim_to_canonical
557
+ struct:
558
+ - name: CohereLabs/aya-expanse-8b
559
+ dtype: float64
560
+ - name: Qwen/Qwen3-8B
561
+ dtype: float64
562
+ - name: bigscience/bloom
563
+ dtype: float64
564
+ - name: common-pile/comma-v0.1-1t
565
+ dtype: float64
566
+ - name: facebook/xglm-564M
567
+ dtype: float64
568
+ - name: google-bert/bert-base-multilingual-cased
569
+ dtype: float64
570
+ - name: google/byt5-small
571
+ dtype: float64
572
+ - name: google/gemma-2-2b
573
+ dtype: float64
574
+ - name: gpt2
575
+ dtype: float64
576
+ - name: meta-llama/Llama-3.2-1B
577
+ dtype: float64
578
+ - name: microsoft/Phi-3-mini-4k-instruct
579
+ dtype: float64
580
+ - name: mistralai/tekken
581
+ dtype: float64
582
+ - name: tiktoken/gpt-4o
583
+ dtype: float64
584
+ - name: tokenmonster/englishcode-32000-consistent-v1
585
+ dtype: float64
586
+ - name: trimmed_cos_sim_to_canonical
587
+ struct:
588
+ - name: CohereLabs/aya-expanse-8b
589
+ dtype: float64
590
+ - name: Qwen/Qwen3-8B
591
+ dtype: float64
592
+ - name: bigscience/bloom
593
+ dtype: float64
594
+ - name: common-pile/comma-v0.1-1t
595
+ dtype: float64
596
+ - name: facebook/xglm-564M
597
+ dtype: float64
598
+ - name: google-bert/bert-base-multilingual-cased
599
+ dtype: float64
600
+ - name: google/byt5-small
601
+ dtype: float64
602
+ - name: google/gemma-2-2b
603
+ dtype: float64
604
+ - name: gpt2
605
+ dtype: float64
606
+ - name: meta-llama/Llama-3.2-1B
607
+ dtype: float64
608
+ - name: microsoft/Phi-3-mini-4k-instruct
609
+ dtype: float64
610
+ - name: mistralai/tekken
611
+ dtype: float64
612
+ - name: tiktoken/gpt-4o
613
+ dtype: float64
614
+ - name: tokenmonster/englishcode-32000-consistent-v1
615
+ dtype: float64
616
+ - name: token_counts
617
+ struct:
618
+ - name: CohereLabs/aya-expanse-8b
619
+ dtype: int64
620
+ - name: Qwen/Qwen3-8B
621
+ dtype: int64
622
+ - name: bigscience/bloom
623
+ dtype: int64
624
+ - name: common-pile/comma-v0.1-1t
625
+ dtype: int64
626
+ - name: facebook/xglm-564M
627
+ dtype: int64
628
+ - name: google-bert/bert-base-multilingual-cased
629
+ dtype: int64
630
+ - name: google/byt5-small
631
+ dtype: int64
632
+ - name: google/gemma-2-2b
633
+ dtype: int64
634
+ - name: gpt2
635
+ dtype: int64
636
+ - name: meta-llama/Llama-3.2-1B
637
+ dtype: int64
638
+ - name: microsoft/Phi-3-mini-4k-instruct
639
+ dtype: int64
640
+ - name: mistralai/tekken
641
+ dtype: int64
642
+ - name: tiktoken/gpt-4o
643
+ dtype: int64
644
+ - name: tokenmonster/englishcode-32000-consistent-v1
645
+ dtype: int64
646
+ splits:
647
+ - name: test
648
+ num_bytes: 217454
649
+ num_examples: 360
650
+ download_size: 105228
651
+ dataset_size: 217454
652
  ---
653
 
654
  # Dataset Card for Tokenization Robustness
tokenizer_robustness_completion_farsi_dialects/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3372237a274a3008aaa46245e8d0f425b03cd4db5ee73c070282d2e59a62bafc
3
+ size 105228