|
|
import os |
|
|
import csv |
|
|
import datasets |
|
|
|
|
|
class DatasetConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for MyCustomDataset.""" |
|
|
def __init__(self, name: str, subset_dir: str, **kwargs): |
|
|
""" |
|
|
Args: |
|
|
name: The name of the dataset configuration (e.g., '4k', '8k'). |
|
|
This is crucial for datasets to identify the config. |
|
|
subset_dir: The name of the subdirectory within 'data/' for this subset (e.g., "4k"). |
|
|
**kwargs: Keyword arguments passed to the base BuilderConfig (e.g., 'description'). |
|
|
""" |
|
|
super().__init__(name, **kwargs) |
|
|
self.subset_dir = subset_dir |
|
|
|
|
|
|
|
|
|
|
|
class MLNeedle(datasets.GeneratorBasedBuilder): |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
BUILDER_CONFIGS = [ |
|
|
DatasetConfig(name="baseline", subset_dir="baseline", description="Baseline subset data."), |
|
|
DatasetConfig(name="4k", subset_dir="4k", description="Dataset subset with 4k context size."), |
|
|
DatasetConfig(name="8k", subset_dir="8k", description="Dataset subset with 8k context size."), |
|
|
DatasetConfig(name="16k", subset_dir="16k", description="Dataset subset with 16k context size."), |
|
|
DatasetConfig(name="32k", subset_dir="32k", description="Dataset subset with 32k context size."), |
|
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "baseline" |
|
|
|
|
|
_BUILDER_CONFIGS_GROUPED_BY_DATASET_NAME = { |
|
|
config.name: config for config in BUILDER_CONFIGS |
|
|
} |
|
|
|
|
|
def _info(self): |
|
|
"""Defines the dataset schema and metadata.""" |
|
|
return datasets.DatasetInfo( |
|
|
description=""" |
|
|
Multilingual Needle in a Haystack dataset for evaluating large language models |
|
|
on their ability to retrieve specific information from long contexts across multiple languages. |
|
|
Each subset (e.g., 4k, 8k) corresponds to different context lengths, and each split |
|
|
(e.g., en, es) represents a language. |
|
|
""", |
|
|
features=datasets.Features({ |
|
|
"id": datasets.Value("string"), |
|
|
"needle_lang": datasets.Value("string"), |
|
|
"question_lang": datasets.Value("string"), |
|
|
"distractor_lang": datasets.Value("string"), |
|
|
"needle_position": datasets.Value("string"), |
|
|
"answer_text_format": datasets.Value("string"), |
|
|
"answer_start_index": datasets.Value("int32"), |
|
|
"answer_sentence": datasets.Value("string"), |
|
|
"prompt": datasets.Value("string"), |
|
|
}), |
|
|
supervised_keys=None, |
|
|
homepage="https://huggingface.co/datasets/ameyhengle/Multilingual-Needle-in-a-Haystack", |
|
|
license="MIT", |
|
|
) |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
|
""" |
|
|
Args: |
|
|
filepath: The full path to the CSV file to be processed. |
|
|
""" |
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
reader = csv.DictReader(f) |
|
|
for i, row in enumerate(reader): |
|
|
try: |
|
|
answer_start_index = int(row["answer_start_index"]) |
|
|
except (ValueError, TypeError): |
|
|
print(f"Warning: Could not convert 'answer_start_index' to int for row {i} in {filepath}. Defaulting to 0.") |
|
|
answer_start_index = 0 |
|
|
|
|
|
|
|
|
yield i, { |
|
|
"id": row["id"], |
|
|
"needle_lang": row["needle_lang"], |
|
|
"question_lang": row["question_lang"], |
|
|
"distractor_lang": row["distractor_lang"], |
|
|
"needle_position": row["needle_position"], |
|
|
"answer_text_format": row["answer_text_format"], |
|
|
"answer_start_index": answer_start_index, |
|
|
"answer_sentence": row["answer_sentence"], |
|
|
"prompt": row["prompt"], |
|
|
} |
|
|
|