lakshaychhabra commited on
Commit
d3da0a4
·
verified ·
1 Parent(s): 5ea087e

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -59
  2. .gitignore +4 -0
  3. LICENSE +23 -0
  4. README.md +83 -3
  5. data/filings.parquet +3 -0
  6. data/ohlcv.parquet +3 -0
  7. scripts/fetch_raw.py +151 -0
.gitattributes CHANGED
@@ -1,59 +1 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ data/*.parquet filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ raw/
2
+ __pycache__/
3
+ *.pyc
4
+ .DS_Store
LICENSE ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LICENSE (MIT)
2
+
3
+ MIT License
4
+
5
+ Copyright (c) 2025 Lakshay Chhabra
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the “Software”), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in
15
+ all copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23
+ THE SOFTWARE.
README.md CHANGED
@@ -1,3 +1,83 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FinLucRAG Corpus (OHLCV + SEC/PR Index)
2
+
3
+ One-stop **corpus metadata** for building finance RAG systems.
4
+ This repo ships **two Parquet tables** and a **fetcher**. It does **not** re-host filings/press releases.
5
+
6
+ ## What’s included
7
+ - `data/ohlcv.parquet` — daily OHLCV for ~30 tickers (2022-01-01 → 2025-09-05).
8
+ - `data/filings.parquet` — document-level index of SEC filings & earnings press releases:
9
+ doc IDs/keys, statement dates, EDGAR **doc URLs** (stable, file-specific), checksums, and basic classification.
10
+ - `scripts/fetch_raw.py` — polite downloader that saves originals to `raw/{doc_key}`.
11
+
12
+ ## What’s *not* included
13
+ - No raw HTML/PDF (legal/size reasons). Fetch on demand with the script.
14
+ - No paragraph splits (that’s on the user and their workflow).
15
+ - No benchmark Q&A (that will live in a separate benchmark repo).
16
+
17
+ ---
18
+
19
+ ## File layout
20
+ data/
21
+ ohlcv.parquet
22
+ filings.parquet
23
+ scripts/
24
+ fetch_raw.py
25
+
26
+
27
+ ## Schemas (frozen)
28
+
29
+ ### `data/ohlcv.parquet`
30
+ | column | type | notes |
31
+ |---|---|---|
32
+ | `date` | date | trading day (UTC) |
33
+ | `ticker` | string (UPPER) | e.g., AAPL |
34
+ | `open, high, low, close` | float64 | |
35
+ | `volume` | int64 | |
36
+
37
+ ### `data/filings.parquet`
38
+ | column | type | notes |
39
+ |---|---|---|
40
+ | `doc_id` | string | `{ticker}_{accession}_{filename}` |
41
+ | `doc_key` | string | `{ticker}/{accession}/{source}/{filename}` |
42
+ | `doc_local_hint` | string | `raw/{doc_key}` target path |
43
+ | `ticker` | string | |
44
+ | `cik` | string | zero-padded |
45
+ | `cik_int` | int | numeric CIK |
46
+ | `accession` | string | with dashes |
47
+ | `acc_nodash` | string | digits only |
48
+ | `form` | string | `8-K`, `10-Q`, `10-K`, … |
49
+ | `source` | enum | `primary`, `press_release`, `exhibits` |
50
+ | `statement_date` | date | doc day (PR/filing) |
51
+ | `event_date` | date | same as `statement_date` |
52
+ | `next_trading_hint` | date | `event_date + 1 day` (convenience) |
53
+ | `event_type` | enum | `earnings` or `other` (heuristic: 8-K Item `2.02` / EX-99.* PR text) |
54
+ | `item_codes` | string/NA | e.g., `2.02,9.01` for 8-K |
55
+ | `doc_code` | string/NA | e.g., `EX-99.1` |
56
+ | `title` | string/NA | if known |
57
+ | `url` | string | **doc-level** EDGAR URL (ends with `filename`) |
58
+ | `content_type` | string | `text/html`, `application/pdf`, `text/plain` |
59
+ | `byte_len` | int | size when indexed |
60
+ | `sha256` | string | checksum of local copy at index time |
61
+ | `filename` | string | file name |
62
+
63
+ ---
64
+
65
+ ## Quick start
66
+
67
+ ### Load with pandas
68
+ ```python
69
+ import pandas as pd
70
+ ohlcv = pd.read_parquet("data/ohlcv.parquet")
71
+ fil = pd.read_parquet("data/filings.parquet")
72
+ print(len(ohlcv), len(fil))
73
+ ```
74
+ # Fetch originals (on your machine)
75
+ ```python
76
+ python scripts/fetch_raw.py \
77
+ --parquet data/filings.parquet \
78
+ --out raw \
79
+ --only event_type=earnings \
80
+ --sources press_release,primary \
81
+ --rate 2 --verify-sha
82
+ # Files land under: raw/{doc_key}
83
+ ```
data/filings.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86d6a40d5221b4b1affb04b50f7f6ec7d0e42a87927988bc43126e00c94e6757
3
+ size 273409
data/ohlcv.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be532515f2cff1543ca49410eb73f9785f4bab35641b99ed4f88386b0a3a4e9a
3
+ size 592117
scripts/fetch_raw.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Fetch raw documents listed in data/filings.parquet to raw/{doc_key}.
4
+ - Respects SEC by default: 2 requests/second, retries, proper User-Agent.
5
+ - Skips existing files unless --overwrite. Can verify sha256 if present.
6
+
7
+ Usage:
8
+ python scripts/fetch_raw.py --parquet data/filings.parquet --out raw --only event_type=earnings --sources press_release,primary --rate 2 --max 100 --verify-sha
9
+ """
10
+
11
+ import argparse, os, time, re, sys, hashlib
12
+ from pathlib import Path
13
+ import pandas as pd
14
+ import requests
15
+ from requests.adapters import HTTPAdapter, Retry
16
+
17
+ def sha256_file(p: Path, chunk=1<<20):
18
+ h = hashlib.sha256()
19
+ with p.open("rb") as f:
20
+ for b in iter(lambda: f.read(chunk), b""):
21
+ h.update(b)
22
+ return h.hexdigest()
23
+
24
+ def parse_kv_filters(s: str):
25
+ # "k1=v1,k2=v2" -> dict
26
+ out = {}
27
+ if not s:
28
+ return out
29
+ for part in s.split(","):
30
+ if "=" not in part:
31
+ continue
32
+ k, v = part.split("=", 1)
33
+ out[k.strip()] = v.strip()
34
+ return out
35
+
36
+ def main():
37
+ ap = argparse.ArgumentParser()
38
+ ap.add_argument("--parquet", default="data/filings.parquet")
39
+ ap.add_argument("--out", default="raw")
40
+ ap.add_argument("--tickers", default="", help="Comma list, e.g. AAPL,MSFT")
41
+ ap.add_argument("--sources", default="", help="Comma list, e.g. press_release,primary,exhibits")
42
+ ap.add_argument("--only", default="", help="Comma key=val filters, e.g. event_type=earnings,form=8-K")
43
+ ap.add_argument("--max", type=int, default=0, help="Cap downloads (0 = no cap)")
44
+ ap.add_argument("--overwrite", action="store_true", help="Re-download even if file exists")
45
+ ap.add_argument("--verify-sha", action="store_true", help="Verify against parquet sha256 when available")
46
+ ap.add_argument("--rate", type=float, default=2.0, help="Requests per second (float). Default 2.0")
47
+ ap.add_argument("--timeout", type=float, default=40.0, help="HTTP timeout seconds")
48
+ ap.add_argument("--user-agent", default="FinLucRAG/1.0 (+contact: YOUR_EMAIL@EXAMPLE.COM)",
49
+ help="Set a real contact email here before heavy usage.")
50
+ ap.add_argument("--dry-run", action="store_true")
51
+ args = ap.parse_args()
52
+
53
+ outdir = Path(args.out)
54
+ outdir.mkdir(parents=True, exist_ok=True)
55
+
56
+ df = pd.read_parquet(args.parquet)
57
+
58
+ # Basic filtering
59
+ if args.tickers:
60
+ tickers = set([t.strip().upper() for t in args.tickers.split(",") if t.strip()])
61
+ df = df[df["ticker"].str.upper().isin(tickers)]
62
+ if args.sources:
63
+ srcs = set([s.strip() for s in args.sources.split(",") if s.strip()])
64
+ df = df[df["source"].isin(srcs)]
65
+ kv = parse_kv_filters(args.only)
66
+ for k, v in kv.items():
67
+ if k not in df.columns:
68
+ print(f"[WARN] filter key '{k}' not in columns; ignoring")
69
+ continue
70
+ df = df[df[k].astype(str) == v]
71
+
72
+ # Sort by event_date then source
73
+ if "event_date" in df.columns:
74
+ df = df.sort_values(["event_date", "ticker", "source"])
75
+ else:
76
+ df = df.sort_values(["statement_date", "ticker", "source"])
77
+
78
+ # Session with retries
79
+ sess = requests.Session()
80
+ retries = Retry(total=5, backoff_factor=0.5, status_forcelist=(429, 500, 502, 503, 504))
81
+ sess.mount("https://", HTTPAdapter(max_retries=retries))
82
+ sess.headers.update({
83
+ "User-Agent": args.user_agent,
84
+ "Accept": "*/*",
85
+ "Connection": "keep-alive",
86
+ })
87
+
88
+ per_req_delay = 1.0 / max(args.rate, 0.001)
89
+ done = 0
90
+
91
+ for _, row in df.iterrows():
92
+ url = row["url"]
93
+ doc_key = row.get("doc_key")
94
+ filename = row.get("filename")
95
+ if not doc_key or not filename:
96
+ print(f"[SKIP] missing doc_key/filename for {row.get('doc_id')}")
97
+ continue
98
+
99
+ dest = outdir / doc_key
100
+ dest.parent.mkdir(parents=True, exist_ok=True)
101
+
102
+ if dest.exists() and not args.overwrite:
103
+ if args.verify_sha and isinstance(row.get("sha256"), str) and row["sha256"]:
104
+ cur = sha256_file(dest)
105
+ if cur == row["sha256"]:
106
+ print(f"[OK] exists+verified {dest}")
107
+ pass
108
+ else:
109
+ print(f"[WARN] hash mismatch for {dest}; redownloading")
110
+ dest.unlink(missing_ok=True)
111
+ else:
112
+ print(f"[SKIP] exists {dest}")
113
+ continue
114
+
115
+ if args.dry_run:
116
+ print(f"[DRY] would fetch -> {url} -> {dest}")
117
+ done += 1
118
+ if args.max and done >= args.max:
119
+ break
120
+ continue
121
+
122
+ try:
123
+ resp = sess.get(url, timeout=args.timeout, stream=True)
124
+ resp.raise_for_status()
125
+ tmp = dest.with_suffix(dest.suffix + ".part")
126
+ with tmp.open("wb") as f:
127
+ for chunk in resp.iter_content(chunk_size=1<<16):
128
+ if chunk:
129
+ f.write(chunk)
130
+ if args.verify_sha and isinstance(row.get("sha256"), str) and row["sha256"]:
131
+ cur = sha256_file(tmp)
132
+ if cur != row["sha256"]:
133
+ tmp.unlink(missing_ok=True)
134
+ print(f"[FAIL] sha256 mismatch for {dest.name} (expected {row['sha256']}, got {cur})")
135
+ time.sleep(per_req_delay)
136
+ continue
137
+ tmp.rename(dest)
138
+ print(f"[OK] {url} -> {dest}")
139
+ done += 1
140
+ if args.max and done >= args.max:
141
+ break
142
+ time.sleep(per_req_delay)
143
+ except requests.RequestException as e:
144
+ print(f"[ERR] {url} -> {e}")
145
+ time.sleep(per_req_delay)
146
+ continue
147
+
148
+ print(f"[DONE] fetched={done}")
149
+
150
+ if __name__ == "__main__":
151
+ main()