kurry commited on
Commit
f3ded37
·
1 Parent(s): 71db6d3

Remove Python builder and requirements; keep only Parquet data and README.

Browse files
Files changed (2) hide show
  1. requirements.txt +0 -2
  2. sp500_earnings_transcripts.py +0 -87
requirements.txt DELETED
@@ -1,2 +0,0 @@
1
- pandas
2
- pyarrow
 
 
 
sp500_earnings_transcripts.py DELETED
@@ -1,87 +0,0 @@
1
- """
2
- DatasetBuilder for S&P 500 Earnings Call Transcripts (2005-2025) dataset.
3
- """
4
- import os
5
- from pathlib import Path
6
-
7
- import datasets
8
- import pyarrow.parquet as pq
9
-
10
- _CITATION = """
11
- @dataset{kurry2025sp500earnings,
12
- author = {Kurry},
13
- title = {S&P 500 Earnings Transcripts Dataset},
14
- year = {2025},
15
- publisher = {Hugging Face},
16
- url = {https://huggingface.co/datasets/kurry/sp500_earnings_transcripts}
17
- }
18
- """
19
-
20
- _DESCRIPTION = """
21
- Full earnings call transcripts for S&P 500 companies and US large-cap companies
22
- from 2005 to 2025, including metadata and structured speaker-by-speaker dialogue.
23
- """
24
-
25
- _HOMEPAGE = "https://huggingface.co/datasets/kurry/sp500_earnings_transcripts"
26
- _LICENSE = "mit"
27
-
28
-
29
- class Sp500EarningsTranscripts(datasets.GeneratorBasedBuilder):
30
- """S&P 500 Earnings Call Transcripts Dataset"""
31
-
32
- VERSION = datasets.Version("1.0.0")
33
- BUILDER_CONFIGS = [
34
- datasets.BuilderConfig(name="default", version=VERSION,
35
- description="S&P 500 earnings call transcripts dataset")
36
- ]
37
- DEFAULT_CONFIG_NAME = "default"
38
-
39
- def _info(self):
40
- features = datasets.Features({
41
- "symbol": datasets.Value("string"),
42
- "company_name": datasets.Value("string"),
43
- "company_id": datasets.Value("float64"),
44
- "year": datasets.Value("int32"),
45
- "quarter": datasets.Value("int32"),
46
- "date": datasets.Value("string"),
47
- "content": datasets.Value("string"),
48
- "structured_content": datasets.Sequence(
49
- datasets.Features({
50
- "speaker": datasets.Value("string"),
51
- "text": datasets.Value("string"),
52
- })
53
- ),
54
- })
55
- return datasets.DatasetInfo(
56
- description=_DESCRIPTION,
57
- features=features,
58
- homepage=_HOMEPAGE,
59
- license=_LICENSE,
60
- citation=_CITATION,
61
- )
62
-
63
- def _split_generators(self, dl_manager):
64
- # data is provided locally in parquet_files directory
65
- data_dir = Path(__file__).resolve().parent / "parquet_files"
66
- # collect all parquet files
67
- filepaths = sorted(data_dir.glob("*.parquet"))
68
- return [
69
- datasets.SplitGenerator(
70
- name=datasets.Split.TRAIN,
71
- gen_kwargs={"filepaths": filepaths},
72
- )
73
- ]
74
-
75
- def _generate_examples(self, filepaths):
76
- """
77
- Generate examples from parquet files.
78
- Args:
79
- filepaths: list of pathlib.Path to parquet files
80
- """
81
- idx = 0
82
- for path in filepaths:
83
- # read parquet file with nested structured_content
84
- table = pq.read_table(path)
85
- for record in table.to_pylist():
86
- yield idx, record
87
- idx += 1