#!/usr/bin/env python3 """ Playlists build script — Singular Particular Space spaces.exopraxist.org Data sources: MusicBrainz (no key) — recording link + artist URL (official site, Bandcamp, etc.) Odesli / song.link — YouTube video ID for embeds (10 req/min without key) First run: ~3 hours (cached). Subsequent runs: seconds. Script resumes from cache if interrupted — safe to run overnight and re-run. Usage: python3 build.py # full build python3 build.py --hub # regenerate hub only (instant) python3 build.py --playlist # single playlist test python3 build.py --force-odesli # re-fetch Odesli data only python3 build.py --force-mb # re-fetch MusicBrainz data only Optional env vars (no keys required — just speeds things up): ODESLI_API_KEY — higher rate limit from Odesli (email developers@song.link) """ import csv import json import os import re import sys import time import urllib.parse import urllib.request from pathlib import Path # ─── Config ─────────────────────────────────────────────────────────────────── SCRIPT_DIR = Path(__file__).parent CACHE_FILE = SCRIPT_DIR / "cache.json" ODESLI_KEY = os.environ.get("ODESLI_API_KEY", "") FORCE_ODESLI = "--force-odesli" in sys.argv FORCE_MB = "--force-mb" in sys.argv HUB_ONLY = "--hub" in sys.argv _playlist_arg = None if "--playlist" in sys.argv: i = sys.argv.index("--playlist") if i + 1 < len(sys.argv): _playlist_arg = sys.argv[i + 1] # ─── Rate limiters ──────────────────────────────────────────────────────────── _last_mb_call = 0.0 _last_odesli_call = 0.0 MB_INTERVAL = 1.1 # 1 req/sec free tier ODESLI_INTERVAL = 8.0 # 10 req/min without key — 8s gives safe margin # Sentinel: call failed with rate limit or error — do not cache FETCH_FAILED = object() def _wait(last: float, interval: float) -> float: elapsed = time.time() - last if elapsed < interval: time.sleep(interval - elapsed) return time.time() # ─── HTTP helpers ───────────────────────────────────────────────────────────── MB_HEADERS = {"User-Agent": "SingularParticularSpace/1.0 (spaces.exopraxist.org)"} def http_get(url: str, headers: dict = None): """ Returns parsed JSON dict on success or 404. Returns FETCH_FAILED sentinel on 429 / 5xx / network error (do not cache). """ try: req = urllib.request.Request(url, headers=headers or {}) with urllib.request.urlopen(req, timeout=15) as resp: return json.loads(resp.read().decode("utf-8")) except urllib.error.HTTPError as e: if e.code == 404: return {} # Not found — cache as empty, won't change if e.code == 429: print(f" 429 rate limit — backing off 30s", file=sys.stderr) time.sleep(30) return FETCH_FAILED print(f" HTTP {e.code}: {url}", file=sys.stderr) return FETCH_FAILED except Exception as e: print(f" error: {e}", file=sys.stderr) return FETCH_FAILED def mb_get(url: str): global _last_mb_call _last_mb_call = _wait(_last_mb_call, MB_INTERVAL) return http_get(url, MB_HEADERS) def odesli_get(url: str): global _last_odesli_call _last_odesli_call = _wait(_last_odesli_call, ODESLI_INTERVAL) return http_get(url) # ─── Cache ──────────────────────────────────────────────────────────────────── # Flat dict with namespaced keys: # "mb:isrc:{ISRC}" → { mb_recording_url, mb_artist_id } # "mb:artist:{MB_ID}" → { artist_url, artist_url_type } # "odesli:{SPOTIFY_ID}" → { youtube_video_id, odesli_page_url } def load_cache() -> dict: if CACHE_FILE.exists(): try: return json.loads(CACHE_FILE.read_text("utf-8")) except Exception: return {} return {} def save_cache(cache: dict): CACHE_FILE.write_text(json.dumps(cache, indent=2, ensure_ascii=False), "utf-8") # ─── MusicBrainz ────────────────────────────────────────────────────────────── def mb_isrc_lookup(isrc: str): """ISRC → { mb_recording_url, mb_artist_id } or FETCH_FAILED""" url = f"https://musicbrainz.org/ws/2/isrc/{isrc}?inc=artist-credits&fmt=json" data = mb_get(url) if data is FETCH_FAILED: return FETCH_FAILED result = {"mb_recording_url": "", "mb_artist_id": ""} recs = data.get("recordings", []) if not recs: return result rec = recs[0] rec_id = rec.get("id", "") if rec_id: result["mb_recording_url"] = f"https://musicbrainz.org/recording/{rec_id}" credits = rec.get("artist-credit", []) for credit in credits: if isinstance(credit, dict) and "artist" in credit: result["mb_artist_id"] = credit["artist"].get("id", "") break return result # Artist URL type priority — ordered best to worst ARTIST_URL_PRIORITY = [ "official homepage", "bandcamp", "soundcloud", "patreon", "linktree", "youtube", "myspace", "instagram", "twitter", "facebook", "last.fm", "discogs", "wikidata", "wikipedia", ] def mb_artist_url_lookup(mb_artist_id: str): """MB artist ID → { artist_url, artist_url_type } or FETCH_FAILED""" url = f"https://musicbrainz.org/ws/2/artist/{mb_artist_id}?inc=url-rels&fmt=json" data = mb_get(url) if data is FETCH_FAILED: return FETCH_FAILED result = {"artist_url": "", "artist_url_type": ""} best_rank = len(ARTIST_URL_PRIORITY) + 1 for rel in data.get("relations", []): rel_type = rel.get("type", "").lower() href = rel.get("url", {}).get("resource", "") if not href: continue for i, ptype in enumerate(ARTIST_URL_PRIORITY): if ptype in rel_type or ptype in href: if i < best_rank: best_rank = i result["artist_url"] = href result["artist_url_type"] = rel_type break return result # ─── Odesli ─────────────────────────────────────────────────────────────────── def odesli_lookup(spotify_track_id: str): """Spotify track ID → { youtube_video_id, odesli_page_url } or FETCH_FAILED""" spotify_uri = f"spotify:track:{spotify_track_id}" params = f"url={urllib.parse.quote(spotify_uri)}&platform=spotify&type=song" if ODESLI_KEY: params += f"&key={ODESLI_KEY}" url = f"https://api.song.link/v1-alpha.1/links?{params}" data = odesli_get(url) if data is FETCH_FAILED: return FETCH_FAILED result = {"youtube_video_id": "", "odesli_page_url": ""} if not data: return result result["odesli_page_url"] = data.get("pageUrl", "") yt_url = data.get("linksByPlatform", {}).get("youtube", {}).get("url", "") if yt_url: result["youtube_video_id"] = extract_youtube_id(yt_url) return result def extract_youtube_id(url: str) -> str: m = re.search(r"youtu\.be/([A-Za-z0-9_\-]{11})", url) if m: return m.group(1) m = re.search(r"[?&]v=([A-Za-z0-9_\-]{11})", url) if m: return m.group(1) return "" # ─── CSV / slug helpers ─────────────────────────────────────────────────────── def parse_csv(path: Path) -> list: with open(path, newline="", encoding="utf-8") as f: return list(csv.DictReader(f)) def make_slug(csv_filename: str) -> str: name = Path(csv_filename).stem name = name.replace("_", "-").lower() name = re.sub(r"[^a-z0-9\-]", "", name) name = re.sub(r"-{2,}", "-", name) return name.strip("-") def make_display_name(csv_filename: str) -> str: name = Path(csv_filename).stem.strip("_").replace("_", " ") return name.title() def spotify_track_id(uri: str) -> str: parts = uri.split(":") return parts[2] if len(parts) == 3 and parts[1] == "track" else "" def ms_to_mmss(ms) -> str: try: s = int(ms) // 1000 return f"{s // 60}:{s % 60:02d}" except Exception: return "—" def ms_to_hhmmss(ms: int) -> str: s = ms // 1000 h, m, s = s // 3600, (s % 3600) // 60, s % 60 return f"{h}:{m:02d}:{s:02d}" if h else f"{m}:{s:02d}" def get_year(date: str) -> str: return date[:4] if date else "" def esc(s: str) -> str: return (str(s) .replace("&", "&").replace("<", "<") .replace(">", ">").replace('"', """)) # ─── Fetch pipeline ─────────────────────────────────────────────────────────── def fetch_all(playlists: list, cache: dict): """ playlists: list of (slug, display_name, tracks, csv_path) Fills cache in-place. Saves to disk every 50 calls. """ # Collect unique ISRCs and track IDs isrc_map = {} # isrc (upper) → (artist_name, track_name) trackid_map = {} # spotify_track_id → True for slug, display, tracks, _ in playlists: for t in tracks: isrc = t.get("ISRC", "").strip().upper() tid = spotify_track_id(t.get("Track URI", "")) if isrc and isrc not in isrc_map: artist = t.get("Artist Name(s)", "").split(",")[0].strip() title = t.get("Track Name", "").strip() isrc_map[isrc] = (artist, title) if tid: trackid_map[tid] = True # ── MusicBrainz ISRC lookups ────────────────────────────────────────────── mb_key = lambda isrc: f"mb:isrc:{isrc}" uncached_isrcs = [ i for i in isrc_map if FORCE_MB or mb_key(i) not in cache ] total = len(uncached_isrcs) print(f"MusicBrainz: {len(isrc_map)} ISRCs total, {total} to fetch") for n, isrc in enumerate(uncached_isrcs, 1): result = mb_isrc_lookup(isrc) if result is not FETCH_FAILED: cache[mb_key(isrc)] = result if n % 50 == 0: save_cache(cache) print(f" MB ISRC {n}/{total}") save_cache(cache) # ── MusicBrainz artist URL lookups ──────────────────────────────────────── # Collect unique MB artist IDs from ISRC results artist_ids = set() for isrc in isrc_map: mb_data = cache.get(mb_key(isrc), {}) aid = mb_data.get("mb_artist_id", "") if aid: artist_ids.add(aid) art_key = lambda aid: f"mb:artist:{aid}" uncached_artists = [ a for a in artist_ids if FORCE_MB or art_key(a) not in cache ] total = len(uncached_artists) print(f"MusicBrainz: {len(artist_ids)} artists total, {total} to fetch") for n, aid in enumerate(uncached_artists, 1): result = mb_artist_url_lookup(aid) if result is not FETCH_FAILED: cache[art_key(aid)] = result if n % 50 == 0: save_cache(cache) print(f" MB artist {n}/{total}") save_cache(cache) # ── Odesli track lookups ────────────────────────────────────────────────── od_key = lambda tid: f"odesli:{tid}" uncached_tracks = [ tid for tid in trackid_map if FORCE_ODESLI or od_key(tid) not in cache ] total = len(uncached_tracks) mins = round(total * ODESLI_INTERVAL / 60) print(f"Odesli: {len(trackid_map)} tracks total, {total} to fetch (~{mins} min)") for n, tid in enumerate(uncached_tracks, 1): result = odesli_lookup(tid) if result is not FETCH_FAILED: cache[od_key(tid)] = result if n % 20 == 0: save_cache(cache) print(f" Odesli {n}/{total}") save_cache(cache) print("Fetch complete.") # ─── HTML ───────────────────────────────────────────────────────────────────── GOOGLE_FONTS = '' SHARED_CSS = """ :root { --bg-void: #04060b; --text-warm: #e8d5b8; --text-muted: #7a6f5e; --ff-primary: #a855f7; --ff-bright: #c084fc; --ff-deep: #6d28d9; --ff-glow: rgba(168, 85, 247, 0.18); } * { box-sizing: border-box; margin: 0; padding: 0; } body { background-color: var(--bg-void); color: var(--text-warm); font-family: 'Rambla', sans-serif; line-height: 1.5; min-height: 100vh; } """ def build_hub(playlists: list) -> str: """playlists: sorted list of {slug, display_name, track_count}""" total = sum(p["track_count"] for p in playlists) n = len(playlists) cards = "\n".join( f' \n' f'
{esc(p["display_name"])}
\n' f'
{p["track_count"]} tracks
\n' f'
' for p in playlists ) return f""" PLAYLISTS | Singular Particular {GOOGLE_FONTS} ← Space

PLAYLISTS

{n} playlists • {total:,} tracks

{cards}
""" def build_track_card(track: dict, idx: int, cache: dict) -> str: num = f"{idx:02d}" name = esc(track.get("Track Name", "")) artists_raw = track.get("Artist Name(s)", "") artists = esc(artists_raw) album = esc(track.get("Album Name", "")) year = esc(get_year(track.get("Album Release Date", ""))) duration = ms_to_mmss(track.get("Track Duration (ms)", 0)) art_url = esc(track.get("Album Image URL", "")) isrc = track.get("ISRC", "").strip().upper() tid = spotify_track_id(track.get("Track URI", "")) # Pull cached data mb_data = cache.get(f"mb:isrc:{isrc}", {}) mb_rec_url = esc(mb_data.get("mb_recording_url", "")) mb_art_id = mb_data.get("mb_artist_id", "") art_data = cache.get(f"mb:artist:{mb_art_id}", {}) if mb_art_id else {} artist_url = esc(art_data.get("artist_url", "")) od_data = cache.get(f"odesli:{tid}", {}) if tid else {} yt_id = od_data.get("youtube_video_id", "") spotify_url = esc(f"https://open.spotify.com/track/{tid}") if tid else "" # Spotify link spotify_link = ( f'[SPOTIFY]' if spotify_url else '[SPOTIFY]' ) # MusicBrainz link mb_link = ( f'[MUSICBRAINZ]' if mb_rec_url else '[MUSICBRAINZ]' ) # YouTube — embed toggle or search link yt_search = f"https://www.youtube.com/results?search_query={urllib.parse.quote(artists_raw + ' ' + track.get('Track Name', ''))}" if yt_id: yt_link = f'' embed_html = f"""
""" else: yt_link = f'[YOUTUBE]' embed_html = "" # Artist link artist_link = ( f'[ARTIST]' if artist_url else '[ARTIST]' ) return f"""
{num} {duration}
{name}
{artists}
{album} • {year}
{embed_html}
""" def build_playlist_page(display: str, slug: str, tracks: list, cache: dict) -> str: total_ms = sum(int(t.get("Track Duration (ms)", 0) or 0) for t in tracks) total_time = ms_to_hhmmss(total_ms) n = len(tracks) cards = "".join(build_track_card(t, i + 1, cache) for i, t in enumerate(tracks)) return f""" {esc(display)} | PLAYLISTS {GOOGLE_FONTS}

{esc(display)}

{n} TRACKS • {total_time}
{cards}
""" # ─── Main ───────────────────────────────────────────────────────────────────── def main(): csv_files = sorted(SCRIPT_DIR.glob("*.csv")) if not csv_files: print("No CSV files found.", file=sys.stderr); sys.exit(1) # Parse all CSVs all_playlists = [] for csv_path in csv_files: slug = make_slug(csv_path.name) display = make_display_name(csv_path.name) tracks = parse_csv(csv_path) all_playlists.append((slug, display, tracks, csv_path)) playlists_meta = sorted([ {"slug": slug, "display_name": display, "track_count": len(tracks)} for slug, display, tracks, _ in all_playlists ], key=lambda p: p["display_name"].lower()) # Single playlist test mode if _playlist_arg: match = next( ((s, d, t, p) for s, d, t, p in all_playlists if s == _playlist_arg or p.stem == _playlist_arg), None ) if not match: slugs = ", ".join(s for s, *_ in all_playlists) print(f"Not found: '{_playlist_arg}'\nAvailable: {slugs}", file=sys.stderr) sys.exit(1) slug, display, tracks, _ = match print(f"Test: '{display}' ({len(tracks)} tracks)") cache = load_cache() fetch_all([(slug, display, tracks, None)], cache) out = SCRIPT_DIR / f"{slug}.html" out.write_text(build_playlist_page(display, slug, tracks, cache), "utf-8") print(f"Written → {slug}.html") return # Hub only if HUB_ONLY: hub = build_hub(playlists_meta) (SCRIPT_DIR / "playlists.html").write_text(hub, "utf-8") print("Hub written → playlists.html") return # Full build cache = load_cache() fetch_all(all_playlists, cache) (SCRIPT_DIR / "playlists.html").write_text(build_hub(playlists_meta), "utf-8") print("Hub written → playlists.html") for slug, display, tracks, _ in all_playlists: out = SCRIPT_DIR / f"{slug}.html" out.write_text(build_playlist_page(display, slug, tracks, cache), "utf-8") print(f" → {slug}.html ({len(tracks)} tracks)") total = sum(p["track_count"] for p in playlists_meta) print(f"\nDone. {len(playlists_meta)} playlists, {total:,} tracks.") if __name__ == "__main__": main()