gsaltintas commited on
Commit
0724b45
·
verified ·
1 Parent(s): a7123be

Uploading tokenizer_robustness_completion_farsi_romanization subset

Browse files
README.md CHANGED
@@ -44,6 +44,10 @@ configs:
44
  data_files:
45
  - split: test
46
  path: tokenizer_robustness_completion_farsi_optional_diacritics/test-*
 
 
 
 
47
  dataset_info:
48
  - config_name: tokenizer_robustness_completion_farsi_arabic_keyboard_for_farsi
49
  features:
@@ -1161,6 +1165,130 @@ dataset_info:
1161
  num_examples: 40
1162
  download_size: 41784
1163
  dataset_size: 24849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1164
  ---
1165
 
1166
  # Dataset Card for Tokenization Robustness
 
44
  data_files:
45
  - split: test
46
  path: tokenizer_robustness_completion_farsi_optional_diacritics/test-*
47
+ - config_name: tokenizer_robustness_completion_farsi_romanization
48
+ data_files:
49
+ - split: test
50
+ path: tokenizer_robustness_completion_farsi_romanization/test-*
51
  dataset_info:
52
  - config_name: tokenizer_robustness_completion_farsi_arabic_keyboard_for_farsi
53
  features:
 
1165
  num_examples: 40
1166
  download_size: 41784
1167
  dataset_size: 24849
1168
+ - config_name: tokenizer_robustness_completion_farsi_romanization
1169
+ features:
1170
+ - name: question
1171
+ dtype: string
1172
+ - name: choices
1173
+ list: string
1174
+ - name: answer
1175
+ dtype: int64
1176
+ - name: answer_label
1177
+ dtype: string
1178
+ - name: split
1179
+ dtype: string
1180
+ - name: subcategories
1181
+ dtype: string
1182
+ - name: category
1183
+ dtype: string
1184
+ - name: lang
1185
+ dtype: string
1186
+ - name: second_lang
1187
+ dtype: string
1188
+ - name: notes
1189
+ dtype: string
1190
+ - name: id
1191
+ dtype: string
1192
+ - name: set_id
1193
+ dtype: string
1194
+ - name: variation_id
1195
+ dtype: string
1196
+ - name: vanilla_cos_sim_to_canonical
1197
+ struct:
1198
+ - name: CohereLabs/aya-expanse-8b
1199
+ dtype: float64
1200
+ - name: Qwen/Qwen3-8B
1201
+ dtype: float64
1202
+ - name: bigscience/bloom
1203
+ dtype: float64
1204
+ - name: common-pile/comma-v0.1-1t
1205
+ dtype: float64
1206
+ - name: facebook/xglm-564M
1207
+ dtype: float64
1208
+ - name: google-bert/bert-base-multilingual-cased
1209
+ dtype: float64
1210
+ - name: google/byt5-small
1211
+ dtype: float64
1212
+ - name: google/gemma-2-2b
1213
+ dtype: float64
1214
+ - name: gpt2
1215
+ dtype: float64
1216
+ - name: meta-llama/Llama-3.2-1B
1217
+ dtype: float64
1218
+ - name: microsoft/Phi-3-mini-4k-instruct
1219
+ dtype: float64
1220
+ - name: mistralai/tekken
1221
+ dtype: float64
1222
+ - name: tiktoken/gpt-4o
1223
+ dtype: float64
1224
+ - name: tokenmonster/englishcode-32000-consistent-v1
1225
+ dtype: float64
1226
+ - name: trimmed_cos_sim_to_canonical
1227
+ struct:
1228
+ - name: CohereLabs/aya-expanse-8b
1229
+ dtype: float64
1230
+ - name: Qwen/Qwen3-8B
1231
+ dtype: float64
1232
+ - name: bigscience/bloom
1233
+ dtype: float64
1234
+ - name: common-pile/comma-v0.1-1t
1235
+ dtype: float64
1236
+ - name: facebook/xglm-564M
1237
+ dtype: float64
1238
+ - name: google-bert/bert-base-multilingual-cased
1239
+ dtype: float64
1240
+ - name: google/byt5-small
1241
+ dtype: float64
1242
+ - name: google/gemma-2-2b
1243
+ dtype: float64
1244
+ - name: gpt2
1245
+ dtype: float64
1246
+ - name: meta-llama/Llama-3.2-1B
1247
+ dtype: float64
1248
+ - name: microsoft/Phi-3-mini-4k-instruct
1249
+ dtype: float64
1250
+ - name: mistralai/tekken
1251
+ dtype: float64
1252
+ - name: tiktoken/gpt-4o
1253
+ dtype: float64
1254
+ - name: tokenmonster/englishcode-32000-consistent-v1
1255
+ dtype: float64
1256
+ - name: token_counts
1257
+ struct:
1258
+ - name: CohereLabs/aya-expanse-8b
1259
+ dtype: int64
1260
+ - name: Qwen/Qwen3-8B
1261
+ dtype: int64
1262
+ - name: bigscience/bloom
1263
+ dtype: int64
1264
+ - name: common-pile/comma-v0.1-1t
1265
+ dtype: int64
1266
+ - name: facebook/xglm-564M
1267
+ dtype: int64
1268
+ - name: google-bert/bert-base-multilingual-cased
1269
+ dtype: int64
1270
+ - name: google/byt5-small
1271
+ dtype: int64
1272
+ - name: google/gemma-2-2b
1273
+ dtype: int64
1274
+ - name: gpt2
1275
+ dtype: int64
1276
+ - name: meta-llama/Llama-3.2-1B
1277
+ dtype: int64
1278
+ - name: microsoft/Phi-3-mini-4k-instruct
1279
+ dtype: int64
1280
+ - name: mistralai/tekken
1281
+ dtype: int64
1282
+ - name: tiktoken/gpt-4o
1283
+ dtype: int64
1284
+ - name: tokenmonster/englishcode-32000-consistent-v1
1285
+ dtype: int64
1286
+ splits:
1287
+ - name: test
1288
+ num_bytes: 21870
1289
+ num_examples: 40
1290
+ download_size: 40527
1291
+ dataset_size: 21870
1292
  ---
1293
 
1294
  # Dataset Card for Tokenization Robustness
tokenizer_robustness_completion_farsi_romanization/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e227606e04158c49dc8fa91879da5d419cdc557158e6468e705473e9b02119a
3
+ size 40527