Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
json
Languages:
Vietnamese
Size:
1K - 10K
DOI:
License:
| #!/usr/bin/env python3 | |
| """Generate statistics for the UTS2017_Bank dataset.""" | |
| import json | |
| import statistics as stats | |
| from collections import Counter | |
| from pathlib import Path | |
| def load_jsonl(file_path): | |
| """Load JSONL file and return list of items.""" | |
| with open(file_path, encoding="utf-8") as f: | |
| return [json.loads(line.strip()) for line in f] | |
| def text_stats(items): | |
| """Calculate text length statistics.""" | |
| word_counts = [len(item["text"].split()) for item in items] | |
| return { | |
| "avg": stats.mean(word_counts), | |
| "min": min(word_counts), | |
| "max": max(word_counts), | |
| "median": stats.median(word_counts), | |
| } | |
| def print_subset_stats(subset_name, emoji): | |
| """Print statistics for a dataset subset.""" | |
| print(f"\n{emoji} {subset_name.upper()} SUBSET") | |
| print("-" * 40) | |
| for split in ["train", "test"]: | |
| file_path = Path(f"data/{subset_name}/{split}.jsonl") | |
| items = load_jsonl(file_path) | |
| print(f"\n{split.capitalize()}: {len(items)} examples") | |
| # Text statistics | |
| text_data = text_stats(items) | |
| print(f" Words: avg={text_data['avg']:.1f}, range={text_data['min']}-{text_data['max']}") | |
| # Subset-specific stats | |
| if subset_name == "classification": | |
| labels = Counter(item["label"] for item in items) | |
| print(f" Top labels: {', '.join(f'{k}({v})' for k, v in labels.most_common(3))}") | |
| elif subset_name == "sentiment": | |
| sentiments = Counter(item["sentiment"] for item in items) | |
| print(f" Sentiments: {', '.join(f'{k}({v})' for k, v in sentiments.most_common())}") | |
| elif subset_name == "aspect_sentiment": | |
| multi_aspect = sum(1 for item in items if len(item["aspects"]) > 1) | |
| print(f" Multi-aspect: {multi_aspect}/{len(items)} examples") | |
| def main(): | |
| """Generate and display dataset statistics.""" | |
| print("๐ UTS2017_Bank Dataset Statistics") | |
| print("=" * 50) | |
| # Overall stats | |
| train_items = load_jsonl("data/classification/train.jsonl") | |
| test_items = load_jsonl("data/classification/test.jsonl") | |
| total = len(train_items) + len(test_items) | |
| print(f"\n๐ OVERALL: {total} examples ({len(train_items)} train, {len(test_items)} test)") | |
| # Subset statistics | |
| print_subset_stats("classification", "๐ท๏ธ") | |
| print_subset_stats("sentiment", "๐") | |
| print_subset_stats("aspect_sentiment", "๐ฏ") | |
| # Available configurations | |
| print("\n๐ก USAGE:") | |
| print(" load_dataset('undertheseanlp/UTS2017_Bank', 'classification')") | |
| print(" load_dataset('undertheseanlp/UTS2017_Bank', 'sentiment')") | |
| print(" load_dataset('undertheseanlp/UTS2017_Bank', 'aspect_sentiment')") | |
| if __name__ == "__main__": | |
| main() | |