| |
| """ |
| scrape_designs.py — Fetch the RocketReviews.com OpenRocket and RockSim indexes |
| and scrape each detail page, downloading the binary design file and saving |
| structured JSON metadata to source/designs/. |
| |
| Output |
| ------ |
| source/designs/index.jsonl one record per design (raw index fields) |
| source/designs/detail/{id}.json full parsed metadata per design |
| source/designs/files/ork/{id}.ork downloaded OpenRocket files |
| source/designs/files/rkt/{id}.rkt downloaded RockSim files |
| |
| Usage |
| ----- |
| python scripts/designs/01_scrape.py |
| python scripts/designs/01_scrape.py --delay 1.0 --limit 10 |
| python scripts/designs/01_scrape.py --force # re-scrape existing files |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import logging |
| import re |
| import sys |
| import time |
| from datetime import datetime, timezone |
| from pathlib import Path |
| from typing import Optional, Tuple |
|
|
| import requests |
| from bs4 import BeautifulSoup |
| from requests.adapters import HTTPAdapter |
| from urllib3.util.retry import Retry |
|
|
| |
| |
| |
|
|
| BASE_URL = "https://www.rocketreviews.com" |
| ENDPOINTS = [ |
| { |
| "format": "openrocket", |
| "url": f"{BASE_URL}/data/openrocket/openrocket.php?search=&optimized=&type=", |
| "ext": "ork" |
| }, |
| { |
| "format": "rocksim", |
| "url": f"{BASE_URL}/data/rocksim/rocksim.php?search=&optimized=&type=", |
| "ext": "rkt" |
| } |
| ] |
|
|
| USER_AGENT = "RocketReviews-Dataset/1.0" |
| DEFAULT_DELAY = 1.0 |
|
|
| ROOT = Path(__file__).parent.parent.parent |
| SOURCE_DIR = ROOT / "source" / "designs" |
| DETAIL_DIR = SOURCE_DIR / "detail" |
| FILES_DIR = SOURCE_DIR / "files" |
|
|
| |
| |
| |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s %(levelname)s %(message)s", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| log = logging.getLogger(__name__) |
|
|
| |
| |
| |
|
|
|
|
| def _build_session() -> requests.Session: |
| s = requests.Session() |
| s.headers["User-Agent"] = USER_AGENT |
| retry = Retry( |
| total=3, |
| backoff_factor=2.0, |
| status_forcelist=[429, 500, 502, 503, 504], |
| allowed_methods=["GET"], |
| ) |
| s.mount("https://", HTTPAdapter(max_retries=retry)) |
| s.mount("http://", HTTPAdapter(max_retries=retry)) |
| return s |
|
|
|
|
| class RateLimiter: |
| def __init__(self, delay: float) -> None: |
| self.delay = delay |
| self._last: float = 0.0 |
|
|
| def wait(self) -> None: |
| elapsed = time.monotonic() - self._last |
| if elapsed < self.delay: |
| time.sleep(self.delay - elapsed) |
| self._last = time.monotonic() |
|
|
|
|
| |
| |
| |
|
|
| def _normalize_string(val: Optional[str]) -> Optional[str]: |
| if not val: |
| return None |
| val = val.strip() |
| if val in ("-Unknown-", "-", "", "Unknown"): |
| return None |
| return val |
|
|
| def _parse_measure(text: Optional[str]) -> Tuple[Optional[float], Optional[str]]: |
| """Parse '28.6470 inches from front' -> (28.647, 'Front')""" |
| if not text: |
| return None, None |
| m = re.search(r"([\d\.]+)\s+inches\s+from\s+(front|rear)", text, re.I) |
| if m: |
| return float(m.group(1)), m.group(2).title() |
| return None, None |
|
|
| def _parse_margin(text: Optional[str]) -> Tuple[Optional[float], Optional[str]]: |
| """Parse '4.03 Overstable' -> (4.03, 'Overstable')""" |
| if not text: |
| return None, None |
| m = re.search(r"([\d\.]+)\s+(\w+)", text) |
| if m: |
| return float(m.group(1)), m.group(2).title() |
| return None, None |
|
|
| def _extract_person(soup: BeautifulSoup, label: str) -> Optional[dict]: |
| """ |
| Extract a person (Contributor or Designer) and their URL. |
| Checks for <strong>Designer:</strong> Name or Contributed by <a href="...">Name</a> |
| """ |
| |
| strong = soup.find("strong", string=re.compile(rf"{label}:?", re.I)) |
| if strong: |
| |
| a_tag = strong.find_next_sibling("a") |
| if a_tag and a_tag.get("href"): |
| name = a_tag.get_text(strip=True) |
| url = a_tag["href"] if a_tag["href"].startswith("http") else f"{BASE_URL}{a_tag['href']}" |
| return {"name": _normalize_string(name), "url": url} |
| elif strong.next_sibling: |
| name = strong.next_sibling.get_text(strip=True) if hasattr(strong.next_sibling, "get_text") else str(strong.next_sibling).strip() |
| return {"name": _normalize_string(name), "url": None} |
| |
| |
| text_node = soup.find(string=re.compile(rf"{label}", re.I)) |
| if text_node and text_node.parent.name in ("p", "div"): |
| parent = text_node.parent |
| a_tag = parent.find("a") |
| if a_tag and a_tag.get("href"): |
| name = a_tag.get_text(strip=True) |
| url = a_tag["href"] if a_tag["href"].startswith("http") else f"{BASE_URL}{a_tag['href']}" |
| return {"name": _normalize_string(name), "url": url} |
| |
| raw_text = parent.get_text(strip=True) |
| name = re.sub(rf"{label}(?: by)?:?\s*", "", raw_text, flags=re.I).strip() |
| return {"name": _normalize_string(name), "url": None} |
|
|
| return None |
|
|
| def _slug_from_path(path: str) -> str: |
| """ |
| Extract the slug from a url path. |
| e.g. '/custom-18-aircraft-plywood-trapezoid-fin-set-3163.html' -> |
| 'custom-18-aircraft-plywood-trapezoid-fin-set' |
| """ |
| name = path.lstrip("/").removesuffix(".html") |
| |
| name = re.sub(r"-\d{10,}$", "", name) |
| |
| name = re.sub(r"-\d{4,8}$", "", name) |
| return name |
|
|
|
|
| def _parse_detail(html: str, index_rec: dict, format_ext: str) -> dict: |
| soup = BeautifulSoup(html, "lxml") |
| |
| |
| dl_link = soup.find('a', href=lambda h: h and ('/file-' in h)) |
| file_url = None |
| if dl_link: |
| href = dl_link['href'] |
| file_url = href if href.startswith("http") else f"{BASE_URL}{href}" |
| |
| |
| external_url = None |
| for img in soup.find_all('img', src=lambda s: s and 'download.gif' in s): |
| a = img.find_parent('a') |
| if a and a.get('href') and '/file-' not in a['href']: |
| external_url = a['href'] |
| break |
| |
| if not external_url: |
| ext_link = soup.find('a', href=lambda h: h and 'rocketryforum.com' in h.lower()) |
| external_url = ext_link['href'] if ext_link else None |
| |
| |
| contributor = _extract_person(soup, "Contributed") |
| designer = _extract_person(soup, "Designer") |
| |
| |
| cg_raw, cp_raw, margin_raw, comments = None, None, None, None |
| for b in soup.find_all(['b', 'strong']): |
| text = b.get_text(strip=True) |
| if "CG:" in text: |
| cg_raw = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("CG:", "").strip() |
| elif "CP:" in text: |
| cp_raw = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("CP:", "").strip() |
| elif "Margin:" in text: |
| margin_raw = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("Margin:", "").strip() |
| elif "Comments:" in text: |
| comments = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("Comments:", "").strip() |
|
|
| cg_loc, cg_from = _parse_measure(cg_raw) |
| cp_loc, cp_from = _parse_measure(cp_raw) |
| margin_val, margin_status = _parse_margin(margin_raw) |
|
|
| |
| parts = [] |
| parts_h4 = soup.find(['h4', 'h5'], string=re.compile(r'Parts Breakdown', re.I)) |
| if parts_h4: |
| parts_list = parts_h4.find_next_sibling('ul') |
| if parts_list: |
| for a in parts_list.find_all('a'): |
| href = a.get('href') |
| if href: |
| parts.append(_slug_from_path(href)) |
|
|
| design_id = int(index_rec["id"]) |
| local_path = f"files/{format_ext}/{design_id:06d}.{format_ext}" |
|
|
| return { |
| "id": design_id, |
| "format": index_rec["format"], |
| "name": _normalize_string(index_rec.get("name")), |
| "title": _normalize_string(index_rec.get("title")), |
| "type": _normalize_string(index_rec.get("type")), |
| "optimized": _normalize_string(index_rec.get("optimized")), |
| "manufacturer": _normalize_string(index_rec.get("manufacturer")), |
| "added_date": index_rec.get("added"), |
| "url": index_rec.get("url"), |
| "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), |
| "contributor": contributor, |
| "designer": designer, |
| "comments": _normalize_string(comments), |
| "cg": {"location_in": cg_loc, "location_from": cg_from} if cg_loc is not None else None, |
| "cp": {"location_in": cp_loc, "location_from": cp_from} if cp_loc is not None else None, |
| "margin": margin_val, |
| "margin_status": margin_status, |
| "parts": parts, |
| "file_url": file_url, |
| "external_url": external_url, |
| "local_path": local_path if file_url else None |
| } |
|
|
|
|
| |
| |
| |
|
|
| def fetch_index(session: requests.Session, endpoint: dict) -> list[dict]: |
| log.info("Fetching %s index from %s", endpoint["format"], endpoint["url"]) |
| resp = session.get(endpoint["url"], timeout=30) |
| resp.raise_for_status() |
| records = resp.json().get("records", []) |
| |
| |
| normalized = [] |
| for rec in records: |
| path = rec.get("url", "") |
| url = f"{BASE_URL}{path}" if path.startswith("/") else path |
| normalized.append({ |
| **rec, |
| "format": endpoint["format"], |
| "url": url, |
| "_ext": endpoint["ext"] |
| }) |
| |
| log.info("Index returned %d records for %s.", len(normalized), endpoint["format"]) |
| return normalized |
|
|
| def download_file(session: requests.Session, url: str, dest: Path) -> bool: |
| """Download a file streaming to disk. Returns True if successful.""" |
| try: |
| with session.get(url, stream=True, timeout=30) as r: |
| r.raise_for_status() |
| with dest.open("wb") as f: |
| for chunk in r.iter_content(chunk_size=8192): |
| f.write(chunk) |
| return True |
| except requests.RequestException as exc: |
| log.warning("Failed to download file %s: %s", url, exc) |
| |
| if dest.exists(): |
| dest.unlink() |
| return False |
|
|
| def scrape_detail( |
| session: requests.Session, |
| rate: RateLimiter, |
| index_rec: dict, |
| force: bool = False, |
| ) -> Optional[dict]: |
| design_id = int(index_rec["id"]) |
| shard = f"{design_id // 1000:03d}" |
| shard_dir = DETAIL_DIR / shard |
| dest_json = shard_dir / f"{design_id:06d}.json" |
| format_ext = index_rec["_ext"] |
| dest_file = FILES_DIR / format_ext / f"{design_id:06d}.{format_ext}" |
|
|
| if dest_json.exists() and not force: |
| try: |
| with dest_json.open("r", encoding="utf-8") as f: |
| cached = json.load(f) |
| if cached.get("local_path") is None or dest_file.exists(): |
| log.debug("Already scraped %s, skipping.", design_id) |
| return None |
| except (OSError, json.JSONDecodeError): |
| pass |
|
|
| url = index_rec["url"] |
| rate.wait() |
|
|
| try: |
| resp = session.get(url, timeout=30) |
| resp.raise_for_status() |
| except requests.RequestException as exc: |
| log.warning("Failed to fetch design %s: %s", design_id, exc) |
| return None |
|
|
| |
| metadata = _parse_detail(resp.text, index_rec, format_ext) |
| |
| |
| file_url = metadata.get("file_url") |
| if file_url: |
| rate.wait() |
| success = download_file(session, file_url, dest_file) |
| if not success: |
| log.warning("Could not download binary file for %s, skipping metadata.", design_id) |
| return None |
| elif metadata.get("external_url"): |
| log.info("No direct file for %s, but external link found. Saving metadata.", design_id) |
| else: |
| log.warning("No file download link or external link found for %s on page %s", design_id, url) |
|
|
| return metadata |
|
|
|
|
| |
| |
| |
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser(description="Scrape RocketReviews.com designs (OpenRocket & RockSim).") |
| parser.add_argument( |
| "--delay", |
| type=float, |
| default=DEFAULT_DELAY, |
| help=f"Seconds between requests (default: {DEFAULT_DELAY})", |
| ) |
| parser.add_argument( |
| "--limit", |
| type=int, |
| default=None, |
| help="Stop after scraping this many detail pages (useful for testing)", |
| ) |
| parser.add_argument( |
| "--force", |
| action="store_true", |
| help="Re-scrape designs that already have saved files", |
| ) |
| args = parser.parse_args() |
|
|
| SOURCE_DIR.mkdir(parents=True, exist_ok=True) |
| DETAIL_DIR.mkdir(parents=True, exist_ok=True) |
| (FILES_DIR / "ork").mkdir(parents=True, exist_ok=True) |
| (FILES_DIR / "rkt").mkdir(parents=True, exist_ok=True) |
|
|
| session = _build_session() |
| rate = RateLimiter(args.delay) |
|
|
| |
| |
| |
| all_records = [] |
| for endpoint in ENDPOINTS: |
| records = fetch_index(session, endpoint) |
| all_records.extend(records) |
|
|
| scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") |
| index_path = SOURCE_DIR / "index.jsonl" |
| with index_path.open("w", encoding="utf-8") as f: |
| for rec in all_records: |
| |
| out_rec = {k: v for k, v in rec.items() if k != "_ext"} |
| f.write(json.dumps({**out_rec, "scraped_at": scraped_at}) + "\n") |
| log.info("Wrote %d total index records to %s", len(all_records), index_path) |
|
|
| |
| |
| |
| if args.limit: |
| all_records = all_records[: args.limit] |
|
|
| ok = skipped = failed = 0 |
| total = len(all_records) |
|
|
| for i, rec in enumerate(all_records, 1): |
| result = scrape_detail(session, rate, rec, force=args.force) |
|
|
| if result is None: |
| skipped += 1 |
| continue |
|
|
| design_id = int(rec["id"]) |
| shard = f"{design_id // 1000:03d}" |
| shard_dir = DETAIL_DIR / shard |
| shard_dir.mkdir(parents=True, exist_ok=True) |
| dest = shard_dir / f"{design_id:06d}.json" |
| |
| try: |
| dest.write_text( |
| json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8" |
| ) |
| ok += 1 |
| log.debug("Saved metadata %s", dest.name) |
| except OSError as exc: |
| log.warning("Could not write %s: %s", dest, exc) |
| failed += 1 |
|
|
| if i % 25 == 0 or i == total: |
| log.info( |
| "Progress: %d/%d — ok=%d skipped=%d failed=%d", |
| i, |
| total, |
| ok, |
| skipped, |
| failed, |
| ) |
|
|
| log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|