#!/usr/bin/env python3 """ scrape_glossary.py — Fetch the RocketReviews.com glossary index and scrape each detail page, saving structured JSON to source/glossary/. Output ------ source/glossary/index.jsonl one record per term (raw index fields) source/glossary/detail/{slug}.json full parsed detail per term Usage ----- python scripts/glossary/01_scrape.py python scripts/glossary/01_scrape.py --delay 1.0 --limit 10 python scripts/glossary/01_scrape.py --force # re-scrape existing files """ from __future__ import annotations import argparse import json import logging import re import sys import time from datetime import datetime, timezone from pathlib import Path from typing import Optional import requests from bs4 import BeautifulSoup, Tag from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry # --------------------------------------------------------------------------- # Config # --------------------------------------------------------------------------- BASE_URL = "https://www.rocketreviews.com" INDEX_URL = f"{BASE_URL}/glossary.html" USER_AGENT = "RocketReviews-Dataset/1.0" DEFAULT_DELAY = 1.0 ROOT = Path(__file__).parent.parent.parent SOURCE_DIR = ROOT / "source" / "glossary" DETAIL_DIR = SOURCE_DIR / "detail" # --------------------------------------------------------------------------- # Logging # --------------------------------------------------------------------------- logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s", handlers=[logging.StreamHandler(sys.stdout)], ) log = logging.getLogger(__name__) # --------------------------------------------------------------------------- # HTTP session # --------------------------------------------------------------------------- def _build_session() -> requests.Session: s = requests.Session() s.headers["User-Agent"] = USER_AGENT retry = Retry( total=3, backoff_factor=2.0, status_forcelist=[429, 500, 502, 503, 504], allowed_methods=["GET"], ) s.mount("https://", HTTPAdapter(max_retries=retry)) s.mount("http://", HTTPAdapter(max_retries=retry)) return s class RateLimiter: def __init__(self, delay: float) -> None: self.delay = delay self._last: float = 0.0 def wait(self) -> None: elapsed = time.monotonic() - self._last if elapsed < self.delay: time.sleep(self.delay - elapsed) self._last = time.monotonic() # --------------------------------------------------------------------------- # Index parsing # --------------------------------------------------------------------------- def _slug_from_path(path: str) -> str: """ Extract the slug from a url path. e.g. '/1010-extrusion-230702231652.html' -> '1010-extrusion' """ name = path.lstrip("/").removesuffix(".html") name = re.sub(r"-\d{10,}$", "", name) return name def _parse_index(html: str) -> list[dict]: """ Parse the glossary.html static page and return one record per term with name and url. """ soup = BeautifulSoup(html, "lxml") records = [] # Glossary terms are usually in for link in soup.find_all("a", class_="entry"): path = link.get("href") if not path or not path.endswith(".html"): continue name = link.get_text(strip=True) if not name: continue slug = _slug_from_path(path) url = f"{BASE_URL}{path}" if path.startswith("/") else path # Find the short description in the following blockquote description = None parent_p = link.find_parent("p") if parent_p: blockquote = parent_p.find_next_sibling("blockquote") if blockquote: # Remove the [Read More] link text read_more = blockquote.find("a", string=re.compile(r"Read More", re.I)) if read_more: # Also try to remove the surrounding brackets if they exist as text nodes for sib in read_more.previous_siblings: if isinstance(sib, str) and "[" in sib: sib.replace_with(sib.replace("[", "")) for sib in read_more.next_siblings: if isinstance(sib, str) and "]" in sib: sib.replace_with(sib.replace("]", "")) read_more.extract() description = blockquote.get_text(separator=" ", strip=True) or None records.append({ "slug": slug, "url": url, "term": name, "short_description": description, }) return records # --------------------------------------------------------------------------- # Detail page parsing # --------------------------------------------------------------------------- def _parse_detail(html: str, index_rec: dict) -> dict: """Extract description from the detail page.""" soup = BeautifulSoup(html, "lxml") article = soup.find("div", class_="article") description = None if article: description = article.get_text(separator="\n", strip=True) return { **index_rec, "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), "description": description, } # --------------------------------------------------------------------------- # Fetch helpers # --------------------------------------------------------------------------- def fetch_index(session: requests.Session) -> list[dict]: log.info("Fetching glossary index from %s", INDEX_URL) resp = session.get(INDEX_URL, timeout=30) resp.raise_for_status() records = _parse_index(resp.text) log.info("Index returned %d records.", len(records)) return records def scrape_detail( session: requests.Session, rate: RateLimiter, index_rec: dict, force: bool = False, ) -> Optional[dict]: slug = index_rec["slug"] shard = slug[0].lower() if slug else "_" shard_dir = DETAIL_DIR / shard dest = shard_dir / f"{slug}.json" if dest.exists() and not force: log.debug("Already scraped %s, skipping.", slug) return None url = index_rec["url"] rate.wait() try: resp = session.get(url, timeout=30) resp.raise_for_status() except requests.RequestException as exc: log.warning("Failed to fetch glossary term %s: %s", slug, exc) return None return _parse_detail(resp.text, index_rec) # --------------------------------------------------------------------------- # Main # --------------------------------------------------------------------------- def main() -> None: parser = argparse.ArgumentParser(description="Scrape RocketReviews.com glossary.") parser.add_argument( "--delay", type=float, default=DEFAULT_DELAY, help=f"Seconds between requests (default: {DEFAULT_DELAY})", ) parser.add_argument( "--limit", type=int, default=None, help="Stop after scraping this many detail pages (useful for testing)", ) parser.add_argument( "--force", action="store_true", help="Re-scrape terms that already have a saved detail file", ) args = parser.parse_args() SOURCE_DIR.mkdir(parents=True, exist_ok=True) DETAIL_DIR.mkdir(parents=True, exist_ok=True) session = _build_session() rate = RateLimiter(args.delay) # ------------------------------------------------------------------ # Step 1: fetch index and write index.jsonl # ------------------------------------------------------------------ records = fetch_index(session) scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") index_path = SOURCE_DIR / "index.jsonl" with index_path.open("w", encoding="utf-8") as f: for rec in records: f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n") log.info("Wrote %d index records to %s", len(records), index_path) # ------------------------------------------------------------------ # Step 2: scrape each detail page # ------------------------------------------------------------------ if args.limit: records = records[: args.limit] ok = skipped = failed = 0 total = len(records) for i, rec in enumerate(records, 1): result = scrape_detail(session, rate, rec, force=args.force) if result is None: skipped += 1 continue slug = rec["slug"] shard = slug[0].lower() if slug else "_" shard_dir = DETAIL_DIR / shard shard_dir.mkdir(parents=True, exist_ok=True) dest = shard_dir / f"{slug}.json" try: dest.write_text( json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8" ) ok += 1 log.debug("Saved %s", dest.name) except OSError as exc: log.warning("Could not write %s: %s", dest, exc) failed += 1 if i % 25 == 0 or i == total: log.info( "Progress: %d/%d — ok=%d skipped=%d failed=%d", i, total, ok, skipped, failed, ) log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed) if __name__ == "__main__": main()