DanBenAmi commited on
Commit
38f2cd8
·
1 Parent(s): 5b83e71

Fix HF Dataset Viewer: add hub loader script

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. herbench.py +168 -0
  3. herbench_loader.py +0 -333
README.md CHANGED
@@ -189,7 +189,7 @@ HERBench/
189
  │ ├── videos.tar.part.XX # |
190
  │ ├── videos.tar.checksums.txt # SHA256 checksums
191
  │ └── videos_lite_info.txt # Info about archive structure
192
- ── herbench_loader.py # Python dataloader (supports both)
193
  ```
194
 
195
  **Archive Structure:** Videos are organized so that Lite videos are in the first archive parts (00-03), and Full-only videos are in the remaining parts. This allows efficient downloading of either version without duplication.
 
189
  │ ├── videos.tar.part.XX # |
190
  │ ├── videos.tar.checksums.txt # SHA256 checksums
191
  │ └── videos_lite_info.txt # Info about archive structure
192
+ ── herbench.py # HF Hub loading script (powers Dataset Viewer)
193
  ```
194
 
195
  **Archive Structure:** Videos are organized so that Lite videos are in the first archive parts (00-03), and Full-only videos are in the remaining parts. This allows efficient downloading of either version without duplication.
herbench.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HERBench Hugging Face Datasets loading script.
3
+
4
+ Why this file exists:
5
+ - Hugging Face Dataset Viewer auto-parses JSON files if no loading script is detected.
6
+ - Auto-parsing uses pandas->pyarrow inference and can fail when nested fields (like `metadata`)
7
+ have inconsistent shapes across rows (common in multi-task benchmarks).
8
+ - By providing a proper datasets loading script named after the repo (`herbench.py` for HERBench),
9
+ the Hub will use this builder instead, with an explicit, stable schema.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import json
15
+ from typing import Any, Dict, Iterator, Optional
16
+
17
+ import datasets
18
+
19
+
20
+ _DESCRIPTION = """\
21
+ HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering.
22
+
23
+ This dataset contains multiple-choice questions grounded in long videos and designed to
24
+ require integration of multiple temporally separated cues (high evidential requirement).
25
+ """
26
+
27
+ _HOMEPAGE = "https://github.com/DanBenAmi/HERBench"
28
+ _LICENSE = "CC-BY-NC-SA-4.0"
29
+
30
+ _CITATION = """\
31
+ @article{herbench2025,
32
+ title={HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering},
33
+ author={Ben-Ami, Dan and Serussi, Gabriele and Cohen, Kobi and Baskin, Chaim},
34
+ journal={arXiv preprint arXiv:XXXX.XXXXX},
35
+ year={2025}
36
+ }
37
+ """
38
+
39
+ _VERSION = "1.0.1"
40
+
41
+
42
+ class HERBenchConfig(datasets.BuilderConfig):
43
+ """BuilderConfig for HERBench."""
44
+
45
+
46
+ class HERBench(datasets.GeneratorBasedBuilder):
47
+ """HERBench Dataset: Multi-Evidence Integration in Video QA."""
48
+
49
+ VERSION = datasets.Version(_VERSION)
50
+
51
+ BUILDER_CONFIGS = [
52
+ HERBenchConfig(
53
+ name="full",
54
+ version=VERSION,
55
+ description="Full HERBench dataset (all questions; large video collection).",
56
+ ),
57
+ HERBenchConfig(
58
+ name="lite",
59
+ version=VERSION,
60
+ description="HERBench-Lite subset (smaller for quick prototyping and the Dataset Viewer).",
61
+ ),
62
+ ]
63
+
64
+ # Make the Hub viewer default to the smaller config (faster and less error-prone).
65
+ DEFAULT_CONFIG_NAME = "lite"
66
+
67
+ def _info(self) -> datasets.DatasetInfo:
68
+ # IMPORTANT: Keep features stable across all rows.
69
+ #
70
+ # `metadata` in the raw JSON varies by task (different keys / nested lists).
71
+ # To keep the schema consistent for Arrow + Dataset Viewer:
72
+ # - expose a few common metadata fields as flat columns
73
+ # - store the full raw metadata dict as a JSON string in `metadata_json`
74
+ features = datasets.Features(
75
+ {
76
+ "question_id": datasets.Value("string"),
77
+ "video_id": datasets.Value("string"),
78
+ "video_path": datasets.Value("string"),
79
+ "question": datasets.Value("string"),
80
+ "choices": datasets.Sequence(datasets.Value("string")),
81
+ "answer": datasets.Value("string"),
82
+ "answer_index": datasets.Value("int32"),
83
+ "answer_text": datasets.Value("string"),
84
+ "task_type": datasets.Value("string"),
85
+ # Common metadata (flat)
86
+ "source_dataset": datasets.Value("string"),
87
+ "duration": datasets.Value("float32"),
88
+ "resolution": datasets.Value("string"),
89
+ "evidence_count": datasets.Value("int32"),
90
+ "difficulty": datasets.Value("string"),
91
+ # Full raw metadata as JSON string (stable column type)
92
+ "metadata_json": datasets.Value("string"),
93
+ }
94
+ )
95
+
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ license=_LICENSE,
101
+ citation=_CITATION,
102
+ version=self.VERSION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
106
+ if self.config.name == "lite":
107
+ annotations_file = "data/herbench_annotations_lite.json"
108
+ else:
109
+ annotations_file = "data/herbench_annotations.json"
110
+
111
+ data_files = dl_manager.download(
112
+ {
113
+ "annotations": annotations_file,
114
+ }
115
+ )
116
+
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TEST,
120
+ gen_kwargs={"annotations_file": data_files["annotations"]},
121
+ )
122
+ ]
123
+
124
+ def _generate_examples(self, annotations_file: str) -> Iterator[tuple[int, Dict[str, Any]]]:
125
+ with open(annotations_file, encoding="utf-8") as f:
126
+ annotations = json.load(f)
127
+
128
+ for idx, ann in enumerate(annotations):
129
+ md = ann.get("metadata") or {}
130
+ if not isinstance(md, dict):
131
+ # Very defensive: keep schema stable even if a row has unexpected metadata type.
132
+ md = {"_raw_metadata": md}
133
+
134
+ source_dataset = md.get("source_dataset", "unknown")
135
+ duration = md.get("duration", 0.0)
136
+ resolution = md.get("resolution", "unknown")
137
+ evidence_count = md.get("evidence_count", 0)
138
+ difficulty = md.get("difficulty", "unknown")
139
+
140
+ # Normalize numeric types (avoid Arrow type inference issues)
141
+ try:
142
+ duration_f = float(duration)
143
+ except Exception:
144
+ duration_f = 0.0
145
+ try:
146
+ evidence_i = int(evidence_count)
147
+ except Exception:
148
+ evidence_i = 0
149
+
150
+ yield idx, {
151
+ "question_id": str(ann.get("question_id", f"HER_{idx:06d}")),
152
+ "video_id": str(ann.get("video_id", "")),
153
+ "video_path": str(ann.get("video_path", "")),
154
+ "question": str(ann.get("question", "")),
155
+ "choices": [str(x) for x in (ann.get("choices") or [])],
156
+ "answer": str(ann.get("answer", "")),
157
+ "answer_index": int(ann.get("answer_index", 0) or 0),
158
+ "answer_text": str(ann.get("answer_text", "")),
159
+ "task_type": str(ann.get("task_type", "unknown")),
160
+ "source_dataset": str(source_dataset),
161
+ "duration": duration_f,
162
+ "resolution": str(resolution),
163
+ "evidence_count": evidence_i,
164
+ "difficulty": str(difficulty),
165
+ "metadata_json": json.dumps(md, ensure_ascii=False),
166
+ }
167
+
168
+
herbench_loader.py DELETED
@@ -1,333 +0,0 @@
1
- """
2
- HERBench Dataset Loader for Hugging Face
3
-
4
- This module provides a Hugging Face datasets loader for HERBench, a benchmark for
5
- multi-evidence integration in video question answering.
6
-
7
- Usage:
8
- # Option 1: Load via Hugging Face datasets library
9
- from datasets import load_dataset
10
- dataset = load_dataset("DanBenAmi/HERBench")
11
- print(dataset['test'][0])
12
-
13
- # Option 2: Load locally
14
- from datasets import load_dataset
15
- dataset = load_dataset("path/to/HERBench/herbench_loader.py")
16
-
17
- Example:
18
- >>> from datasets import load_dataset
19
- >>> dataset = load_dataset("DanBenAmi/HERBench")
20
- >>> sample = dataset['test'][0]
21
- >>> print(sample['question'])
22
- >>> print(sample['choices'])
23
- >>> print(sample['answer'])
24
-
25
- For more information, visit:
26
- - GitHub: https://github.com/DanBenAmi/HERBench
27
- - Paper: https://arxiv.org/abs/XXXX.XXXXX (coming soon)
28
- - Project Page: https://danbenami.github.io/herbench (coming soon)
29
- """
30
-
31
- import json
32
- from pathlib import Path
33
- from typing import Dict, List, Optional
34
-
35
- import datasets
36
-
37
-
38
- _DESCRIPTION = """\
39
- HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering
40
-
41
- HERBench is a challenging benchmark designed to evaluate vision-language models on
42
- multi-evidence integration in long videos. Unlike existing benchmarks where questions
43
- can often be answered from single frames, HERBench enforces a High Evidential Requirement
44
- (ER) where each question requires aggregating at least k ≥ 3 distinct, temporally
45
- separated visual cues.
46
-
47
- Key Features:
48
- - 27,936 five-way multiple-choice questions (Full) or ~5,600 questions (Lite)
49
- - 335 unique videos (Full) or ~67 videos (Lite)
50
- - Average video length of 395 seconds (6.6 minutes)
51
- - 12 compositional task types covering temporal, spatial, and causal reasoning
52
- - Mean Minimum Required Frame-Set (MRFS) of 5.49
53
- - Questions designed to prevent single-frame shortcuts
54
- - Comprehensive evaluation of multi-evidence reasoning capabilities
55
-
56
- Available in two versions:
57
- - Full: 27,936 questions, 335 videos (~161 GB) - Complete benchmark
58
- - Lite: ~5,600 questions, ~67 videos (~35 GB) - 20% subset for quick prototyping
59
-
60
- The benchmark includes videos from diverse sources:
61
- - WildTrack: Multi-camera pedestrian tracking scenes
62
- - HD-EPIC: First-person egocentric videos of daily activities
63
- - PersonPath22: Person tracking in various environments
64
- - Movie Trailers: Narrative story understanding
65
-
66
- Each question is carefully designed to require:
67
- 1. Multiple pieces of evidence (k ≥ 3 frames)
68
- 2. Temporal separation between evidence frames
69
- 3. Compositional reasoning across evidence
70
- 4. Integration of visual information from different moments
71
- """
72
-
73
- _HOMEPAGE = "https://github.com/DanBenAmi/HERBench"
74
-
75
- _LICENSE = "CC-BY-NC-SA-4.0"
76
-
77
- _CITATION = """\
78
- @article{herbench2025,
79
- title={HERBench: A Benchmark for Multi-Evidence Integration in Video Question Answering},
80
- author={Ben-Ami, Dan and Serussi, Gabriele and Cohen, Kobi and Baskin, Chaim},
81
- journal={arXiv preprint arXiv:XXXX.XXXXX},
82
- year={2025}
83
- }
84
- """
85
-
86
- _VERSION = "1.0.0"
87
-
88
-
89
- class HERBenchConfig(datasets.BuilderConfig):
90
- """BuilderConfig for HERBench."""
91
-
92
- def __init__(self, **kwargs):
93
- """BuilderConfig for HERBench.
94
-
95
- Args:
96
- **kwargs: keyword arguments forwarded to super.
97
- """
98
- super(HERBenchConfig, self).__init__(**kwargs)
99
-
100
-
101
- class HERBench(datasets.GeneratorBasedBuilder):
102
- """HERBench Dataset: Multi-Evidence Integration in Video QA."""
103
-
104
- VERSION = datasets.Version(_VERSION)
105
-
106
- BUILDER_CONFIGS = [
107
- HERBenchConfig(
108
- name="full",
109
- version=VERSION,
110
- description="Full HERBench dataset with all 27,936 questions and 335 videos (~161GB)",
111
- ),
112
- HERBenchConfig(
113
- name="lite",
114
- version=VERSION,
115
- description="HERBench-Lite: 20% subset with ~5,600 questions and ~67 videos (~35GB)",
116
- ),
117
- ]
118
-
119
- DEFAULT_CONFIG_NAME = "full"
120
-
121
- def _info(self):
122
- """Specify the datasets.DatasetInfo object."""
123
- features = datasets.Features({
124
- "question_id": datasets.Value("string"),
125
- "video_id": datasets.Value("string"),
126
- "video_path": datasets.Value("string"),
127
- "question": datasets.Value("string"),
128
- "choices": datasets.Sequence(datasets.Value("string")),
129
- "answer": datasets.Value("string"),
130
- "answer_index": datasets.Value("int32"),
131
- "answer_text": datasets.Value("string"),
132
- "task_type": datasets.Value("string"),
133
- "metadata": datasets.Features({
134
- "source_dataset": datasets.Value("string"),
135
- "duration": datasets.Value("float32"),
136
- "resolution": datasets.Value("string"),
137
- "evidence_count": datasets.Value("int32"),
138
- "difficulty": datasets.Value("string"),
139
- }),
140
- })
141
-
142
- return datasets.DatasetInfo(
143
- description=_DESCRIPTION,
144
- features=features,
145
- homepage=_HOMEPAGE,
146
- license=_LICENSE,
147
- citation=_CITATION,
148
- version=self.VERSION,
149
- )
150
-
151
- def _split_generators(self, dl_manager):
152
- """Return SplitGenerators."""
153
- # Determine which annotation file to use based on config
154
- if self.config.name == "lite":
155
- annotations_file = "data/herbench_annotations_lite.json"
156
- else:
157
- annotations_file = "data/herbench_annotations.json"
158
-
159
- # Download and extract data files
160
- data_files = dl_manager.download({
161
- "annotations": annotations_file,
162
- "task_metadata": "data/task_metadata.json",
163
- "video_metadata": "data/video_metadata.json",
164
- })
165
-
166
- return [
167
- datasets.SplitGenerator(
168
- name=datasets.Split.TEST,
169
- gen_kwargs={
170
- "annotations_file": data_files["annotations"],
171
- "task_metadata_file": data_files["task_metadata"],
172
- "video_metadata_file": data_files["video_metadata"],
173
- },
174
- ),
175
- ]
176
-
177
- def _generate_examples(self, annotations_file, task_metadata_file, video_metadata_file):
178
- """Yield examples as (key, example) tuples."""
179
- # Load annotations
180
- with open(annotations_file, encoding="utf-8") as f:
181
- annotations = json.load(f)
182
-
183
- # Yield each annotation
184
- for idx, annotation in enumerate(annotations):
185
- # Ensure metadata exists
186
- if "metadata" not in annotation:
187
- annotation["metadata"] = {
188
- "source_dataset": "unknown",
189
- "duration": 0.0,
190
- "resolution": "unknown",
191
- "evidence_count": 0,
192
- "difficulty": "unknown"
193
- }
194
- else:
195
- # Ensure required metadata fields exist
196
- metadata = annotation["metadata"]
197
- if "source_dataset" not in metadata:
198
- metadata["source_dataset"] = "unknown"
199
- if "duration" not in metadata:
200
- metadata["duration"] = 0.0
201
- if "resolution" not in metadata:
202
- metadata["resolution"] = "unknown"
203
- if "evidence_count" not in metadata:
204
- metadata["evidence_count"] = 0
205
- if "difficulty" not in metadata:
206
- metadata["difficulty"] = "unknown"
207
-
208
- yield idx, {
209
- "question_id": annotation.get("question_id", f"HER_{idx:06d}"),
210
- "video_id": annotation.get("video_id", ""),
211
- "video_path": annotation.get("video_path", ""),
212
- "question": annotation.get("question", ""),
213
- "choices": annotation.get("choices", []),
214
- "answer": annotation.get("answer", ""),
215
- "answer_index": int(annotation.get("answer_index", 0)),
216
- "answer_text": annotation.get("answer_text", ""),
217
- "task_type": annotation.get("task_type", "unknown"),
218
- "metadata": annotation["metadata"],
219
- }
220
-
221
-
222
- # Example usage and helper functions
223
- def load_herbench(cache_dir: Optional[str] = None) -> datasets.DatasetDict:
224
- """
225
- Load HERBench dataset using Hugging Face datasets library.
226
-
227
- Args:
228
- cache_dir: Optional directory to cache the dataset.
229
-
230
- Returns:
231
- DatasetDict with 'test' split containing all questions.
232
-
233
- Example:
234
- >>> dataset = load_herbench()
235
- >>> print(f"Total questions: {len(dataset['test'])}")
236
- >>> print(dataset['test'][0])
237
- """
238
- return datasets.load_dataset(
239
- "DanBenAmi/HERBench",
240
- cache_dir=cache_dir
241
- )
242
-
243
-
244
- def get_questions_by_task(dataset, task_type: str) -> List[Dict]:
245
- """
246
- Filter questions by task type.
247
-
248
- Args:
249
- dataset: HERBench dataset or test split.
250
- task_type: Task type to filter (e.g., 'temporal_reasoning').
251
-
252
- Returns:
253
- List of questions matching the task type.
254
-
255
- Example:
256
- >>> dataset = load_herbench()
257
- >>> temporal_qs = get_questions_by_task(dataset['test'], 'temporal_reasoning')
258
- >>> print(f"Temporal reasoning questions: {len(temporal_qs)}")
259
- """
260
- if isinstance(dataset, datasets.DatasetDict):
261
- dataset = dataset['test']
262
-
263
- return [q for q in dataset if q['task_type'] == task_type]
264
-
265
-
266
- def get_questions_by_video(dataset, video_id: str) -> List[Dict]:
267
- """
268
- Get all questions for a specific video.
269
-
270
- Args:
271
- dataset: HERBench dataset or test split.
272
- video_id: Video identifier.
273
-
274
- Returns:
275
- List of questions for the specified video.
276
-
277
- Example:
278
- >>> dataset = load_herbench()
279
- >>> video_qs = get_questions_by_video(dataset['test'], 'cam2_segment_4_180s_240s')
280
- >>> print(f"Questions for video: {len(video_qs)}")
281
- """
282
- if isinstance(dataset, datasets.DatasetDict):
283
- dataset = dataset['test']
284
-
285
- return [q for q in dataset if q['video_id'] == video_id]
286
-
287
-
288
- def print_sample(sample: Dict) -> None:
289
- """
290
- Pretty print a sample from the dataset.
291
-
292
- Args:
293
- sample: A single sample from HERBench.
294
-
295
- Example:
296
- >>> dataset = load_herbench()
297
- >>> print_sample(dataset['test'][0])
298
- """
299
- duration = sample['metadata'].get('duration', 0.0)
300
- print(f"Question ID: {sample['question_id']}")
301
- print(f"Video: {sample['video_id']} ({duration:.1f}s)")
302
- print(f"Resolution: {sample['metadata'].get('resolution', 'unknown')}")
303
- print(f"Task: {sample['task_type']}")
304
- print(f"\nQuestion: {sample['question']}")
305
- print(f"\nChoices:")
306
- for i, choice in enumerate(sample['choices']):
307
- marker = "→" if i == sample['answer_index'] else " "
308
- print(f" {marker} {choice}")
309
- print(f"\nCorrect Answer: {sample['answer']} (index: {sample['answer_index']})")
310
- if sample.get('answer_text'):
311
- print(f"Answer Text: {sample['answer_text']}")
312
- print(f"Source: {sample['metadata']['source_dataset']}")
313
- print("-" * 60)
314
-
315
-
316
- if __name__ == "__main__":
317
- # Example usage when run as script
318
- print("Loading HERBench dataset...")
319
- dataset = load_herbench()
320
-
321
- print(f"\nDataset loaded successfully!")
322
- print(f"Total questions: {len(dataset['test'])}")
323
-
324
- print(f"\nFirst sample:")
325
- print_sample(dataset['test'][0])
326
-
327
- # Show task distribution
328
- from collections import Counter
329
- task_counts = Counter(q['task_type'] for q in dataset['test'])
330
-
331
- print(f"\nTask distribution:")
332
- for task, count in task_counts.most_common():
333
- print(f" {task}: {count}")