1028 lines
41 KiB
Python
1028 lines
41 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
PLAYLIST PIRATE v2.0
|
|
CSV → resolve → search → build → download
|
|
|
|
Pipeline:
|
|
resolve Parse CSV(s) into *-playlist.md tracking files
|
|
search Find YouTube URLs via yt-dlp (resumable, no API key)
|
|
build Generate static HTML pages with embedded players
|
|
download Download tracks as MP3 (opt-in)
|
|
|
|
Each step is discrete. Nothing runs automatically.
|
|
"""
|
|
|
|
import csv
|
|
import json
|
|
import re
|
|
import time
|
|
import sys
|
|
import os
|
|
import random
|
|
import argparse
|
|
import urllib.parse
|
|
import urllib.request
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
from typing import List, Optional, Tuple
|
|
|
|
# ─── Dependency Check ─────────────────────────────────────────────────────────
|
|
|
|
missing = []
|
|
try:
|
|
import yt_dlp
|
|
except ImportError:
|
|
missing.append("yt-dlp")
|
|
try:
|
|
from rich.console import Console
|
|
from rich.theme import Theme
|
|
except ImportError:
|
|
missing.append("rich")
|
|
try:
|
|
from mutagen.id3 import ID3, TIT2, TPE1, TSRC, TALB, ID3NoHeaderError
|
|
except ImportError:
|
|
missing.append("mutagen")
|
|
|
|
if missing:
|
|
print(f"[FATAL] Missing: {', '.join(missing)}")
|
|
print("Install: pip install yt-dlp rich mutagen")
|
|
sys.exit(1)
|
|
|
|
|
|
# ─── Terminal ─────────────────────────────────────────────────────────────────
|
|
|
|
console = Console(
|
|
theme=Theme({
|
|
"ok": "green",
|
|
"accent": "bold bright_green",
|
|
"dim": "dim green",
|
|
"warn": "yellow",
|
|
"err": "bold red",
|
|
}),
|
|
style="green on black",
|
|
highlight=False,
|
|
)
|
|
|
|
LOGO = """\
|
|
██████╗ ██╗ █████╗ ██╗ ██╗██╗ ██╗███████╗████████╗
|
|
██╔══██╗██║ ██╔══██╗╚██╗ ██╔╝██║ ██║██╔════╝╚══██╔══╝
|
|
██████╔╝██║ ███████║ ╚████╔╝ ██║ ██║███████╗ ██║
|
|
██╔═══╝ ██║ ██╔══██║ ╚██╔╝ ██║ ██║╚════██║ ██║
|
|
██║ ███████╗██║ ██║ ██║ ███████╗██║███████║ ██║
|
|
╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝╚══════╝ ╚═╝
|
|
██████╗ ██╗██████╗ █████╗ ████████╗███████╗
|
|
██╔══██╗██║██╔══██╗██╔══██╗╚══██╔══╝██╔════╝
|
|
██████╔╝██║██████╔╝███████║ ██║ █████╗
|
|
██╔═══╝ ██║██╔══██╗██╔══██║ ██║ ██╔══╝
|
|
██║ ██║██║ ██║██║ ██║ ██║ ███████╗
|
|
╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚══════╝"""
|
|
|
|
DIVIDER = "─" * 60
|
|
|
|
def boot(module: str):
|
|
console.print(f"\n[accent]{LOGO}[/accent]")
|
|
console.print(f"[dim]PLAYLIST PIRATE v2.0 // {module.upper()} MODULE[/dim]")
|
|
console.print(f"[dim]{DIVIDER}[/dim]\n")
|
|
time.sleep(0.2)
|
|
|
|
def out(msg: str, style: str = "ok"):
|
|
console.print(msg, style=style)
|
|
|
|
|
|
# ─── Fire Spectrum ─────────────────────────────────────────────────────────────
|
|
# Each playlist gets a unique fire accent. Assigned by sorted alphabetical index.
|
|
|
|
FIRE_SPECTRUM = [
|
|
("#ff3300", "#ff6633", "#cc2200", "rgba(255,51,0,0.18)"), # fire red
|
|
("#ff6600", "#ff8833", "#cc4400", "rgba(255,102,0,0.18)"), # orange
|
|
("#ff9900", "#ffbb44", "#cc7700", "rgba(255,153,0,0.18)"), # amber-orange
|
|
("#ffcc00", "#ffdd55", "#cc9900", "rgba(255,204,0,0.18)"), # gold
|
|
("#e8943a", "#f0ad60", "#b86820", "rgba(232,148,58,0.18)"), # fire amber
|
|
("#d4654a", "#e07d64", "#a04030", "rgba(212,101,74,0.18)"), # coral
|
|
("#cc3333", "#dd5555", "#992222", "rgba(204,51,51,0.18)"), # crimson
|
|
("#ff4d6d", "#ff7090", "#cc2244", "rgba(255,77,109,0.18)"), # hot pink-red
|
|
("#f472b6", "#f79ed0", "#c04080", "rgba(244,114,182,0.18)"), # fairy pink
|
|
("#c558d9", "#d880e8", "#8830a0", "rgba(197,88,217,0.18)"), # orchid
|
|
("#a855f7", "#c084fc", "#6d28d9", "rgba(168,85,247,0.18)"), # violet
|
|
("#7c3aed", "#a06af0", "#4c1d95", "rgba(124,58,237,0.18)"), # indigo-violet
|
|
("#3fbfaf", "#66d0c4", "#288070", "rgba(63,191,175,0.18)"), # waterfall
|
|
("#2ac4b3", "#55d4c6", "#1a8077", "rgba(42,196,179,0.18)"), # teal
|
|
("#00b4d8", "#33c8e8", "#007a99", "rgba(0,180,216,0.18)"), # sky blue
|
|
("#32dc8c", "#66e8aa", "#1a9955", "rgba(50,220,140,0.18)"), # neon green
|
|
("#00ff41", "#55ff77", "#00aa22", "rgba(0,255,65,0.18)"), # phosphor
|
|
("#ff7f3f", "#ffa066", "#cc5500", "rgba(255,127,63,0.18)"), # paradise
|
|
("#ffcf40", "#ffdd77", "#cc9900", "rgba(255,207,64,0.18)"), # toucan
|
|
("#8b2020", "#bb4444", "#5a0f0f", "rgba(139,32,32,0.18)"), # deep red
|
|
("#ff5500", "#ff7733", "#cc3300", "rgba(255,85,0,0.18)"), # orange-red
|
|
]
|
|
|
|
def get_fire(idx: int) -> dict:
|
|
p, b, d, g = FIRE_SPECTRUM[idx % len(FIRE_SPECTRUM)]
|
|
return {"primary": p, "bright": b, "deep": d, "glow": g}
|
|
|
|
|
|
# ─── Data Model ───────────────────────────────────────────────────────────────
|
|
|
|
LINE_RE = re.compile(
|
|
r'^- \[( |x|-)\] (.+?) \| (.+?) \| ISRC:([A-Z0-9\-]{3,15}|-) \| SP:([A-Za-z0-9]+|-) \| (.+)$'
|
|
)
|
|
LINE_RE_LEGACY = re.compile(
|
|
r'^- \[( |x|-)\] (.+?) \| (.+?) \| ISRC:([A-Z0-9\-]{3,15}|-) \| (.+)$'
|
|
)
|
|
PENDING = " "
|
|
DONE = "x"
|
|
NOT_FOUND = "-"
|
|
|
|
|
|
class Track:
|
|
def __init__(self, status, title, artists, isrc, url, album="", spotify_id="-"):
|
|
self.status = status
|
|
self.title = title.strip()
|
|
self.artists = artists.strip()
|
|
self.isrc = isrc.strip() if isrc else "-"
|
|
self.url = url.strip()
|
|
self.album = album.strip()
|
|
self.spotify_id = spotify_id.strip() if spotify_id else "-"
|
|
|
|
@property
|
|
def needs_search(self):
|
|
return self.url == "?" and self.status == PENDING
|
|
|
|
@property
|
|
def needs_download(self):
|
|
return self.url not in ("?", "NOT_FOUND") and self.status == PENDING
|
|
|
|
@property
|
|
def youtube_id(self):
|
|
if not self.url or self.url in ("?", "NOT_FOUND"):
|
|
return ""
|
|
m = re.search(r"youtu\.be/([A-Za-z0-9_\-]{11})", self.url)
|
|
if m: return m.group(1)
|
|
m = re.search(r"[?&]v=([A-Za-z0-9_\-]{11})", self.url)
|
|
if m: return m.group(1)
|
|
return ""
|
|
|
|
@property
|
|
def search_query(self):
|
|
parts = [a.strip() for a in self.artists.split(",")][:2]
|
|
return f"{self.title} {', '.join(parts)}"
|
|
|
|
def to_md(self):
|
|
return (
|
|
f"- [{self.status}] {self.title} | {self.artists} "
|
|
f"| ISRC:{self.isrc} | SP:{self.spotify_id} | {self.url}"
|
|
)
|
|
|
|
|
|
class Playlist:
|
|
def __init__(self, name, source, tracks, slug=""):
|
|
self.name = name
|
|
self.source = source
|
|
self.tracks = tracks
|
|
self.slug = slug or _make_slug(name)
|
|
|
|
@classmethod
|
|
def from_md(cls, path: Path):
|
|
text = path.read_text(encoding="utf-8")
|
|
lines = text.splitlines()
|
|
name = path.stem.replace("-playlist", "").replace("-", " ").replace("_", " ").title()
|
|
if lines and lines[0].startswith("#"):
|
|
name = lines[0].lstrip("#").strip()
|
|
source = ""
|
|
if len(lines) > 1:
|
|
m = re.search(r"source:\s*([^|]+)", lines[1])
|
|
if m: source = m.group(1).strip()
|
|
tracks = []
|
|
for line in lines:
|
|
m = LINE_RE.match(line.strip())
|
|
if m:
|
|
tracks.append(Track(m.group(1), m.group(2), m.group(3), m.group(4), m.group(6), spotify_id=m.group(5)))
|
|
else:
|
|
m = LINE_RE_LEGACY.match(line.strip())
|
|
if m:
|
|
tracks.append(Track(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)))
|
|
slug = _make_slug(path.stem.replace("-playlist", ""))
|
|
return cls(name, source, tracks, slug)
|
|
|
|
def to_md(self):
|
|
ts = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
|
|
body = [f"# {self.name}", f"<!-- source: {self.source} | updated: {ts} -->", ""]
|
|
body += [t.to_md() for t in self.tracks]
|
|
return "\n".join(body) + "\n"
|
|
|
|
def save(self, path: Path):
|
|
path.write_text(self.to_md(), encoding="utf-8")
|
|
|
|
|
|
# ─── CSV Parser ───────────────────────────────────────────────────────────────
|
|
|
|
TRACK_KEYS = ["track name", "title", "song name", "song", "name", "track"]
|
|
ARTIST_KEYS = ["artist name(s)", "artist names", "artist name", "artists", "artist"]
|
|
ISRC_KEYS = ["isrc"]
|
|
ALBUM_KEYS = ["album name", "album title", "album"]
|
|
SPOTIFY_KEYS = ["track uri", "spotify uri", "track id", "spotify id"]
|
|
|
|
def _find_col(headers, keys):
|
|
lower = {h.lower(): h for h in headers}
|
|
return next((lower[k] for k in keys if k in lower), None)
|
|
|
|
def _make_slug(name: str) -> str:
|
|
s = name.lower().replace(" ", "-")
|
|
s = re.sub(r"[^a-z0-9\-]", "", s)
|
|
s = re.sub(r"-{2,}", "-", s)
|
|
return s.strip("-")
|
|
|
|
def _clean_artists(raw: str) -> str:
|
|
parts = [a.strip() for a in raw.split(",") if a.strip()]
|
|
return ", ".join(parts[:2])
|
|
|
|
def parse_csv(path: Path) -> Playlist:
|
|
with path.open(encoding="utf-8-sig", newline="") as f:
|
|
reader = csv.DictReader(f)
|
|
headers = list(reader.fieldnames or [])
|
|
tc = _find_col(headers, TRACK_KEYS)
|
|
ac = _find_col(headers, ARTIST_KEYS)
|
|
ic = _find_col(headers, ISRC_KEYS)
|
|
lc = _find_col(headers, ALBUM_KEYS)
|
|
sc = _find_col(headers, SPOTIFY_KEYS)
|
|
if not tc or not ac:
|
|
raise ValueError(f"Cannot find track/artist columns.\nHeaders: {headers}")
|
|
tracks = []
|
|
for row in reader:
|
|
title = row[tc].strip()
|
|
artists = _clean_artists(row[ac])
|
|
isrc = row.get(ic, "").strip().upper() if ic else "-"
|
|
album = row.get(lc, "").strip() if lc else ""
|
|
spotify_id = "-"
|
|
if sc:
|
|
raw = row.get(sc, "").strip()
|
|
# Accept full URI (spotify:track:ID) or bare ID
|
|
m = re.match(r"spotify:track:([A-Za-z0-9]+)", raw)
|
|
spotify_id = m.group(1) if m else (raw if re.match(r"^[A-Za-z0-9]{10,}$", raw) else "-")
|
|
if not isrc: isrc = "-"
|
|
if title:
|
|
t = Track(PENDING, title, artists, isrc, "?", album=album, spotify_id=spotify_id)
|
|
tracks.append(t)
|
|
name = path.stem.replace("-", " ").replace("_", " ").title()
|
|
return Playlist(name, path.name, tracks, _make_slug(path.stem))
|
|
|
|
|
|
# ─── Batch helpers ────────────────────────────────────────────────────────────
|
|
|
|
def resolve_inputs(inputs: List[str], suffix: str) -> List[Path]:
|
|
"""Expand inputs (files or directories) to a list of matching Path objects."""
|
|
paths = []
|
|
for inp in inputs:
|
|
p = Path(inp)
|
|
if p.is_dir():
|
|
paths.extend(sorted(p.glob(f"*{suffix}")))
|
|
elif p.exists():
|
|
paths.append(p)
|
|
else:
|
|
out(f"> [WARN] Not found: {inp}", "warn")
|
|
return paths
|
|
|
|
|
|
# ─── MusicBrainz ──────────────────────────────────────────────────────────────
|
|
|
|
MB_HEADERS = {"User-Agent": "PlaylistPirate/2.0 (spaces.exopraxist.org)"}
|
|
MB_INTERVAL = 1.2
|
|
_last_mb = 0.0
|
|
FETCH_FAILED = object()
|
|
|
|
ARTIST_URL_PRIORITY = [
|
|
"official homepage", "bandcamp", "soundcloud", "patreon",
|
|
"linktree", "youtube", "instagram", "twitter", "facebook",
|
|
"last.fm", "discogs", "wikidata", "wikipedia",
|
|
]
|
|
|
|
def _mb_get(url: str):
|
|
global _last_mb
|
|
elapsed = time.time() - _last_mb
|
|
if elapsed < MB_INTERVAL:
|
|
time.sleep(MB_INTERVAL - elapsed)
|
|
_last_mb = time.time()
|
|
try:
|
|
req = urllib.request.Request(url, headers=MB_HEADERS)
|
|
with urllib.request.urlopen(req, timeout=15) as r:
|
|
return json.loads(r.read().decode("utf-8"))
|
|
except urllib.error.HTTPError as e:
|
|
if e.code == 404: return {}
|
|
if e.code == 429:
|
|
out(" MB rate limit — waiting 30s", "warn")
|
|
time.sleep(30)
|
|
return FETCH_FAILED
|
|
except Exception:
|
|
return FETCH_FAILED
|
|
|
|
def mb_isrc_lookup(isrc: str):
|
|
url = f"https://musicbrainz.org/ws/2/isrc/{isrc}?inc=artist-credits&fmt=json"
|
|
data = _mb_get(url)
|
|
if data is FETCH_FAILED: return FETCH_FAILED
|
|
result = {"mb_recording_url": "", "mb_artist_id": ""}
|
|
recs = data.get("recordings", [])
|
|
if not recs: return result
|
|
rec = recs[0]
|
|
if rec.get("id"):
|
|
result["mb_recording_url"] = f"https://musicbrainz.org/recording/{rec['id']}"
|
|
for credit in rec.get("artist-credit", []):
|
|
if isinstance(credit, dict) and "artist" in credit:
|
|
result["mb_artist_id"] = credit["artist"].get("id", "")
|
|
break
|
|
return result
|
|
|
|
def mb_artist_lookup(mb_artist_id: str):
|
|
url = f"https://musicbrainz.org/ws/2/artist/{mb_artist_id}?inc=url-rels&fmt=json"
|
|
data = _mb_get(url)
|
|
if data is FETCH_FAILED: return FETCH_FAILED
|
|
result = {"artist_url": "", "artist_url_type": ""}
|
|
best_rank = len(ARTIST_URL_PRIORITY) + 1
|
|
for rel in data.get("relations", []):
|
|
rel_type = rel.get("type", "").lower()
|
|
href = rel.get("url", {}).get("resource", "")
|
|
if not href: continue
|
|
for i, ptype in enumerate(ARTIST_URL_PRIORITY):
|
|
if ptype in rel_type or ptype in href:
|
|
if i < best_rank:
|
|
best_rank = i
|
|
result["artist_url"] = href
|
|
result["artist_url_type"] = rel_type
|
|
break
|
|
return result
|
|
|
|
|
|
# ─── Build cache ──────────────────────────────────────────────────────────────
|
|
|
|
def load_build_cache(cache_path: Path) -> dict:
|
|
if cache_path.exists():
|
|
try: return json.loads(cache_path.read_text("utf-8"))
|
|
except Exception: pass
|
|
return {}
|
|
|
|
def save_build_cache(cache: dict, cache_path: Path):
|
|
cache_path.write_text(json.dumps(cache, indent=2, ensure_ascii=False), "utf-8")
|
|
|
|
|
|
# ─── HTML helpers ─────────────────────────────────────────────────────────────
|
|
|
|
GOOGLE_FONTS = (
|
|
'<link href="https://fonts.googleapis.com/css2?family=Faculty+Glyphic'
|
|
'&family=Rambla:wght@400;700'
|
|
'&family=Share+Tech+Mono&display=swap" rel="stylesheet">'
|
|
)
|
|
|
|
def esc(s) -> str:
|
|
return (str(s)
|
|
.replace("&", "&").replace("<", "<")
|
|
.replace(">", ">").replace('"', """))
|
|
|
|
def ms_to_mmss(ms) -> str:
|
|
try:
|
|
s = int(ms) // 1000
|
|
return f"{s // 60}:{s % 60:02d}"
|
|
except Exception: return ""
|
|
|
|
def ms_to_hhmmss(total_ms: int) -> str:
|
|
s = total_ms // 1000
|
|
h, m, s = s // 3600, (s % 3600) // 60, s % 60
|
|
return f"{h}:{m:02d}:{s:02d}" if h else f"{m}:{s:02d}"
|
|
|
|
SHARED_CSS = """
|
|
:root {{
|
|
--bg-void: #04060b;
|
|
--text-warm: #e8d5b8;
|
|
--text-muted: #7a6f5e;
|
|
--fp: {primary};
|
|
--fb: {bright};
|
|
--fd: {deep};
|
|
--fg: {glow};
|
|
}}
|
|
* {{ box-sizing: border-box; margin: 0; padding: 0; }}
|
|
body {{
|
|
background: var(--bg-void);
|
|
color: var(--text-warm);
|
|
font-family: 'Rambla', sans-serif;
|
|
line-height: 1.5; min-height: 100vh;
|
|
}}
|
|
"""
|
|
|
|
|
|
def build_hub_html(playlists_meta: list) -> str:
|
|
"""playlists_meta: list of {slug, name, track_count, fire_idx}"""
|
|
total = sum(p["track_count"] for p in playlists_meta)
|
|
n = len(playlists_meta)
|
|
|
|
cards = []
|
|
for p in playlists_meta:
|
|
f = get_fire(p["fire_idx"])
|
|
cards.append(
|
|
f' <a href="{p["slug"]}.html" class="playlist-card" '
|
|
f'style="--fp:{f["primary"]};--fb:{f["bright"]};--fd:{f["deep"]};--fg:{f["glow"]}">\n'
|
|
f' <div class="playlist-name">{esc(p["name"])}</div>\n'
|
|
f' <div class="track-count">{p["track_count"]} tracks</div>\n'
|
|
f' </a>'
|
|
)
|
|
|
|
fire = get_fire(0)
|
|
css = SHARED_CSS.format(**fire)
|
|
|
|
return f"""<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>PLAYLISTS | Singular Particular</title>
|
|
{GOOGLE_FONTS}
|
|
<style>
|
|
{css}
|
|
body {{ padding: 2rem; }}
|
|
h1 {{
|
|
font-family: 'Faculty Glyphic', sans-serif;
|
|
color: var(--fp); font-weight: normal;
|
|
font-size: clamp(3rem, 10vw, 6rem);
|
|
letter-spacing: -0.02em; margin-bottom: 0.5rem;
|
|
}}
|
|
.subtitle {{
|
|
font-family: 'Rambla', sans-serif;
|
|
color: var(--text-muted); font-size: 1.1rem; margin-bottom: 3rem;
|
|
}}
|
|
.back-link {{
|
|
display: inline-block;
|
|
font-family: 'Share Tech Mono', monospace;
|
|
color: var(--fp); text-decoration: none;
|
|
margin-bottom: 2rem; font-size: 1.1rem;
|
|
transition: color 100ms ease;
|
|
}}
|
|
.back-link:hover {{ color: var(--fb); }}
|
|
.playlist-grid {{
|
|
display: grid; grid-template-columns: 1fr;
|
|
gap: 1.5rem; max-width: 1400px;
|
|
}}
|
|
@media (min-width: 640px) {{ .playlist-grid {{ grid-template-columns: repeat(2,1fr); }} }}
|
|
@media (min-width: 1024px) {{ .playlist-grid {{ grid-template-columns: repeat(3,1fr); }} }}
|
|
.playlist-card {{
|
|
display: block; text-decoration: none; color: inherit;
|
|
padding: 1.5rem; border-left: 2px solid var(--fd);
|
|
transition: background 100ms ease, border-color 100ms ease;
|
|
}}
|
|
.playlist-card:hover {{ background: var(--fg); border-color: var(--fp); }}
|
|
.playlist-name {{
|
|
font-family: 'Faculty Glyphic', sans-serif;
|
|
font-size: 1.5rem; color: var(--text-warm);
|
|
line-height: 1.2; margin-bottom: 0.5rem;
|
|
}}
|
|
.track-count {{
|
|
font-family: 'Share Tech Mono', monospace;
|
|
color: var(--text-muted); font-size: 0.9rem; text-transform: uppercase;
|
|
}}
|
|
footer {{
|
|
margin-top: 5rem; border-top: 1px solid var(--fd);
|
|
padding-top: 2rem; font-family: 'Share Tech Mono', monospace;
|
|
color: var(--text-muted); font-size: 0.8rem; text-align: center;
|
|
}}
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<a href="../index.html" class="back-link">← Space</a>
|
|
<header>
|
|
<h1>PLAYLISTS</h1>
|
|
<p class="subtitle">{n} playlists • {total:,} tracks</p>
|
|
</header>
|
|
<main class="playlist-grid">
|
|
{chr(10).join(cards)}
|
|
</main>
|
|
<footer>
|
|
Music data via <a href="https://musicbrainz.org" style="color:var(--fp)" target="_blank" rel="noopener">MusicBrainz</a>
|
|
• — SINGULAR PARTICULAR SPACE —
|
|
</footer>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
|
|
def build_track_html(track: Track, idx: int, mb_data: dict, artist_url: str) -> str:
|
|
num = f"{idx:02d}"
|
|
name = esc(track.title)
|
|
artists = esc(track.artists)
|
|
album = esc(track.album)
|
|
yt_id = track.youtube_id
|
|
yt_search = f"https://www.youtube.com/results?search_query={urllib.parse.quote(track.artists + ' ' + track.title)}"
|
|
mb_rec_url = esc(mb_data.get("mb_recording_url", ""))
|
|
art_url = esc(artist_url)
|
|
|
|
mb_link = (
|
|
f'<a href="{mb_rec_url}" class="tl" target="_blank" rel="noopener">[MUSICBRAINZ]</a>'
|
|
if mb_rec_url else '<span class="tl na">[MUSICBRAINZ]</span>'
|
|
)
|
|
artist_link = (
|
|
f'<a href="{art_url}" class="tl" target="_blank" rel="noopener">[ARTIST]</a>'
|
|
if art_url else '<span class="tl na">[ARTIST]</span>'
|
|
)
|
|
spotify_link = (
|
|
f'<a href="https://open.spotify.com/track/{esc(track.spotify_id)}" class="tl sp" target="_blank" rel="noopener">[SPOTIFY]</a>'
|
|
if track.spotify_id and track.spotify_id != "-" else ""
|
|
)
|
|
|
|
if yt_id:
|
|
yt_link = f'<button class="tl yt-toggle" data-vid="{esc(yt_id)}">[YOUTUBE]</button>'
|
|
embed_html = f"""
|
|
<div class="embed-wrap">
|
|
<div class="embed-ratio">
|
|
<iframe src="" data-src="https://www.youtube.com/embed/{esc(yt_id)}"
|
|
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
|
allowfullscreen></iframe>
|
|
</div>
|
|
</div>"""
|
|
else:
|
|
yt_link = f'<a href="{esc(yt_search)}" class="tl" target="_blank" rel="noopener">[YOUTUBE]</a>'
|
|
embed_html = ""
|
|
|
|
return f"""
|
|
<article class="track-card">
|
|
<div class="track-row">
|
|
<div class="zone-l">
|
|
<span class="track-num">{num}</span>
|
|
</div>
|
|
<div class="zone-r">
|
|
<div class="track-name">{name}</div>
|
|
<div class="artist-name">{artists}</div>
|
|
<div class="album-meta">{album}</div>
|
|
<div class="links-row">
|
|
{mb_link}
|
|
{yt_link}
|
|
{artist_link}
|
|
{spotify_link}
|
|
</div>
|
|
</div>
|
|
</div>{embed_html}
|
|
</article>"""
|
|
|
|
|
|
def build_playlist_html(playlist: Playlist, fire: dict, cache: dict) -> str:
|
|
css = SHARED_CSS.format(**fire)
|
|
n = len(playlist.tracks)
|
|
cards = "".join(
|
|
build_track_html(t, i + 1,
|
|
cache.get(f"mb:isrc:{t.isrc}", {}),
|
|
cache.get(f"mb:artist:{cache.get(f'mb:isrc:{t.isrc}', {}).get('mb_artist_id','')}", {}).get("artist_url","")
|
|
)
|
|
for i, t in enumerate(playlist.tracks)
|
|
)
|
|
has_yt = sum(1 for t in playlist.tracks if t.youtube_id)
|
|
|
|
return f"""<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>{esc(playlist.name)} | PLAYLISTS</title>
|
|
{GOOGLE_FONTS}
|
|
<style>
|
|
{css}
|
|
header {{
|
|
position: sticky; top: 0; z-index: 100;
|
|
background: var(--bg-void);
|
|
border-bottom: 1px solid var(--fd);
|
|
padding: 1rem 2rem;
|
|
display: flex; align-items: center;
|
|
justify-content: space-between; gap: 2rem;
|
|
}}
|
|
.hd-main {{ display: flex; align-items: baseline; gap: 1.5rem; flex-grow: 1; min-width: 0; }}
|
|
header h1 {{
|
|
font-family: 'Faculty Glyphic', sans-serif; color: var(--fp);
|
|
font-size: 2rem; font-weight: normal;
|
|
white-space: nowrap; overflow: hidden; text-overflow: ellipsis;
|
|
}}
|
|
.hd-meta {{
|
|
font-family: 'Share Tech Mono', monospace;
|
|
color: var(--text-muted); font-size: 0.9rem;
|
|
text-transform: uppercase; white-space: nowrap;
|
|
}}
|
|
.nav-links {{ display: flex; gap: 1.5rem; }}
|
|
.back-link {{
|
|
font-family: 'Share Tech Mono', monospace;
|
|
color: var(--fp); text-decoration: none;
|
|
font-size: 1rem; white-space: nowrap;
|
|
transition: color 100ms ease;
|
|
}}
|
|
.back-link:hover {{ color: var(--fb); }}
|
|
.menu-toggle {{
|
|
display: none; background: none;
|
|
border: 1px solid var(--fp); color: var(--fp);
|
|
padding: 0.5rem; font-family: 'Share Tech Mono', monospace;
|
|
cursor: pointer; font-size: 1.2rem;
|
|
}}
|
|
@media (max-width: 640px) {{
|
|
header {{ padding: 1rem; }}
|
|
header h1 {{ font-size: 1.5rem; }}
|
|
.menu-toggle {{ display: block; }}
|
|
.nav-links {{
|
|
display: none; position: absolute;
|
|
top: 100%; left: 0; right: 0;
|
|
background: var(--bg-void);
|
|
flex-direction: column; padding: 1rem;
|
|
border-bottom: 1px solid var(--fd);
|
|
}}
|
|
.nav-links.open {{ display: flex; }}
|
|
}}
|
|
.track-list {{ max-width: 1000px; margin: 2rem auto; padding: 0 1rem 5rem; }}
|
|
.track-card {{
|
|
display: flex; flex-direction: column;
|
|
border-left: 1px solid var(--fd);
|
|
margin-bottom: 0.5rem; position: relative;
|
|
transition: background 100ms ease, border-color 100ms ease;
|
|
}}
|
|
.track-card::after {{
|
|
content: ''; position: absolute;
|
|
bottom: -0.25rem; left: 0; right: 0;
|
|
height: 1px; background: var(--fd); opacity: 0.1;
|
|
}}
|
|
.track-card:hover {{ background: var(--fg); border-color: var(--fp); }}
|
|
.track-card.active {{ border-color: var(--fp); }}
|
|
.track-row {{ display: flex; padding: 1.25rem 1.5rem; gap: 1.5rem; }}
|
|
.zone-l {{
|
|
width: 2.5rem; flex-shrink: 0;
|
|
display: flex; align-items: flex-start; padding-top: 0.2rem;
|
|
}}
|
|
.track-num {{
|
|
font-family: 'Share Tech Mono', monospace;
|
|
color: var(--text-muted); font-size: 0.95rem;
|
|
}}
|
|
.zone-r {{ flex-grow: 1; min-width: 0; }}
|
|
.track-name {{
|
|
font-family: 'Rambla', sans-serif; font-weight: 700;
|
|
font-size: 1.1rem; color: var(--text-warm); margin-bottom: 0.2rem;
|
|
}}
|
|
.artist-name {{
|
|
font-family: 'Rambla', sans-serif; font-size: 0.95rem;
|
|
color: var(--fp); margin-bottom: 0.25rem;
|
|
}}
|
|
.album-meta {{
|
|
font-family: 'Share Tech Mono', monospace;
|
|
font-size: 0.7rem; color: var(--text-muted);
|
|
text-transform: uppercase; letter-spacing: 0.05em; margin-bottom: 0.75rem;
|
|
}}
|
|
.links-row {{ display: flex; flex-wrap: wrap; gap: 1rem; }}
|
|
.tl {{
|
|
font-family: 'Share Tech Mono', monospace; font-size: 0.75rem;
|
|
color: var(--fp); text-decoration: none;
|
|
transition: color 100ms ease; cursor: pointer;
|
|
background: none; border: none; padding: 0;
|
|
}}
|
|
.tl:hover {{ color: var(--fb); }}
|
|
.tl.na {{ color: var(--text-muted); cursor: default; pointer-events: none; }}
|
|
.tl.sp {{ color: #1db954; }}
|
|
.tl.sp:hover {{ color: #1ed760; }}
|
|
.embed-wrap {{
|
|
display: none; width: 100%;
|
|
background: var(--fg); border-top: 1px solid var(--fd); padding: 1rem;
|
|
}}
|
|
.track-card.active .embed-wrap {{ display: block; }}
|
|
.embed-ratio {{ position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden; }}
|
|
.embed-ratio iframe {{
|
|
position: absolute; top: 0; left: 0;
|
|
width: 100%; height: 100%; border: 0;
|
|
}}
|
|
@media (max-width: 480px) {{
|
|
.track-row {{ padding: 1rem; gap: 1rem; }}
|
|
}}
|
|
footer {{
|
|
margin-top: 5rem; padding: 2rem;
|
|
border-top: 1px solid var(--fd);
|
|
font-family: 'Share Tech Mono', monospace;
|
|
color: var(--text-muted); font-size: 0.8rem; text-align: center;
|
|
}}
|
|
footer a {{ color: var(--fp); text-decoration: none; }}
|
|
footer a:hover {{ color: var(--fb); }}
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<header>
|
|
<div class="hd-main">
|
|
<h1>{esc(playlist.name)}</h1>
|
|
<span class="hd-meta">{n} TRACKS • {has_yt} EMBEDDED</span>
|
|
</div>
|
|
<button class="menu-toggle" id="mt">☰</button>
|
|
<nav class="nav-links" id="nl">
|
|
<a href="playlists.html" class="back-link">← Playlists</a>
|
|
</nav>
|
|
</header>
|
|
<main class="track-list">
|
|
{cards}
|
|
</main>
|
|
<footer>
|
|
Playlists from <a href="https://open.spotify.com" target="_blank" rel="noopener">Spotify</a>
|
|
• Recording data via <a href="https://musicbrainz.org" target="_blank" rel="noopener">MusicBrainz</a>
|
|
• — SINGULAR PARTICULAR SPACE —
|
|
</footer>
|
|
<script>
|
|
document.addEventListener('DOMContentLoaded', () => {{
|
|
const mt = document.getElementById('mt');
|
|
const nl = document.getElementById('nl');
|
|
mt.addEventListener('click', () => {{
|
|
nl.classList.toggle('open');
|
|
mt.textContent = nl.classList.contains('open') ? '\\u2715' : '\\u2630';
|
|
}});
|
|
document.querySelectorAll('.yt-toggle').forEach(btn => {{
|
|
btn.addEventListener('click', () => {{
|
|
const card = btn.closest('.track-card');
|
|
const iframe = card.querySelector('iframe');
|
|
card.classList.toggle('active');
|
|
if (card.classList.contains('active') && iframe) {{
|
|
const src = iframe.getAttribute('data-src');
|
|
if (src && iframe.getAttribute('src') !== src)
|
|
iframe.setAttribute('src', src);
|
|
}}
|
|
}});
|
|
}});
|
|
}});
|
|
</script>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
|
|
# ─── Commands ─────────────────────────────────────────────────────────────────
|
|
|
|
def cmd_resolve(args):
|
|
boot("resolve")
|
|
paths = resolve_inputs(args.input, ".csv")
|
|
if not paths:
|
|
out("> [ERR] No CSV files found.", "err"); sys.exit(1)
|
|
|
|
for src in paths:
|
|
out(f"> PARSING: {src.name}")
|
|
try:
|
|
playlist = parse_csv(src)
|
|
except Exception as e:
|
|
out(f" [ERR] {e}", "err"); continue
|
|
|
|
out_path = src.with_name(src.stem + "-playlist.md")
|
|
playlist.save(out_path)
|
|
out(f" → {out_path.name} ({len(playlist.tracks)} tracks)", "accent")
|
|
|
|
out(f"\n[dim]{DIVIDER}[/dim]")
|
|
out("> NEXT: playlist search <file(s) or dir>", "dim")
|
|
|
|
|
|
def cmd_search(args):
|
|
boot("search")
|
|
paths = resolve_inputs(args.input, "-playlist.md")
|
|
if not paths:
|
|
out("> [ERR] No playlist.md files found.", "err"); sys.exit(1)
|
|
|
|
delay_min = float(args.delay_min)
|
|
delay_max = float(args.delay_max)
|
|
ydl_opts = {"quiet": True, "no_warnings": True, "extract_flat": "in_playlist"}
|
|
|
|
for src in paths:
|
|
out(f"\n> PLAYLIST: {src.name}", "accent")
|
|
playlist = Playlist.from_md(src)
|
|
pending = [t for t in playlist.tracks if t.needs_search]
|
|
|
|
out(f" TO SEARCH: {len(pending)} / {len(playlist.tracks)}")
|
|
if not pending:
|
|
out(" Nothing to search — all tracks have URLs.", "warn"); continue
|
|
|
|
found = not_found = 0
|
|
for i, track in enumerate(pending):
|
|
out(f" [{i+1}/{len(pending)}] {track.search_query}")
|
|
try:
|
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
|
info = ydl.extract_info(f"ytsearch1:{track.search_query}", download=False)
|
|
entries = (info or {}).get("entries", [])
|
|
entry = entries[0] if entries else None
|
|
if entry:
|
|
vid_id = entry.get("id") or entry.get("url", "").split("v=")[-1]
|
|
track.url = f"https://www.youtube.com/watch?v={vid_id}"
|
|
out(f" ✓ {track.url}", "dim")
|
|
found += 1
|
|
else:
|
|
track.url = track.status = NOT_FOUND
|
|
out(" NOT FOUND", "warn"); not_found += 1
|
|
except Exception as e:
|
|
track.url = track.status = NOT_FOUND
|
|
out(f" ERROR: {e}", "err"); not_found += 1
|
|
|
|
playlist.save(src)
|
|
if i < len(pending) - 1:
|
|
d = random.uniform(delay_min, delay_max)
|
|
out(f" [dim]sleep {d:.1f}s[/dim]", "dim")
|
|
time.sleep(d)
|
|
|
|
out(f" FOUND: {found} NOT FOUND: {not_found}", "accent")
|
|
|
|
out(f"\n[dim]{DIVIDER}[/dim]")
|
|
out("> NEXT: playlist build <file(s) or dir> --out <output_dir>", "dim")
|
|
|
|
|
|
def cmd_build(args):
|
|
boot("build")
|
|
paths = resolve_inputs(args.input, "-playlist.md")
|
|
if not paths:
|
|
out("> [ERR] No playlist.md files found.", "err"); sys.exit(1)
|
|
|
|
out_dir = Path(args.out) if args.out else paths[0].parent
|
|
out_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
cache_path = out_dir / ".build-cache.json"
|
|
cache = load_build_cache(cache_path)
|
|
|
|
# Load all playlists
|
|
playlists = []
|
|
for p in paths:
|
|
pl = Playlist.from_md(p)
|
|
playlists.append(pl)
|
|
out(f"> LOADED: {pl.name} ({len(pl.tracks)} tracks)")
|
|
|
|
# Sort for consistent fire color assignment
|
|
playlists.sort(key=lambda p: p.name.lower())
|
|
|
|
# ── MusicBrainz pass ──────────────────────────────────────────────────────
|
|
out(f"\n[dim]{DIVIDER}[/dim]")
|
|
out("> MUSICBRAINZ: fetching recording + artist data...")
|
|
|
|
all_isrcs = {
|
|
t.isrc: t for pl in playlists for t in pl.tracks
|
|
if t.isrc and t.isrc != "-"
|
|
}
|
|
uncached_isrcs = [i for i in all_isrcs if f"mb:isrc:{i}" not in cache]
|
|
out(f" ISRCs: {len(all_isrcs)} total | {len(uncached_isrcs)} to fetch")
|
|
|
|
for n, isrc in enumerate(uncached_isrcs, 1):
|
|
result = mb_isrc_lookup(isrc)
|
|
if result is not FETCH_FAILED:
|
|
cache[f"mb:isrc:{isrc}"] = result
|
|
if n % 50 == 0:
|
|
save_build_cache(cache, cache_path)
|
|
out(f" ISRC {n}/{len(uncached_isrcs)}", "dim")
|
|
save_build_cache(cache, cache_path)
|
|
|
|
artist_ids = {
|
|
cache[f"mb:isrc:{i}"]["mb_artist_id"]
|
|
for i in all_isrcs
|
|
if f"mb:isrc:{i}" in cache and cache[f"mb:isrc:{i}"].get("mb_artist_id")
|
|
}
|
|
uncached_artists = [a for a in artist_ids if f"mb:artist:{a}" not in cache]
|
|
out(f" Artists: {len(artist_ids)} total | {len(uncached_artists)} to fetch")
|
|
|
|
for n, aid in enumerate(uncached_artists, 1):
|
|
result = mb_artist_lookup(aid)
|
|
if result is not FETCH_FAILED:
|
|
cache[f"mb:artist:{aid}"] = result
|
|
if n % 50 == 0:
|
|
save_build_cache(cache, cache_path)
|
|
out(f" Artist {n}/{len(uncached_artists)}", "dim")
|
|
save_build_cache(cache, cache_path)
|
|
out(" MusicBrainz complete.", "accent")
|
|
|
|
# ── Generate HTML ─────────────────────────────────────────────────────────
|
|
out(f"\n[dim]{DIVIDER}[/dim]")
|
|
out("> BUILDING HTML...")
|
|
|
|
playlists_meta = []
|
|
for idx, pl in enumerate(playlists):
|
|
fire = get_fire(idx)
|
|
html = build_playlist_html(pl, fire, cache)
|
|
out_path = out_dir / f"{pl.slug}.html"
|
|
out_path.write_text(html, "utf-8")
|
|
has_yt = sum(1 for t in pl.tracks if t.youtube_id)
|
|
out(f" → {pl.slug}.html ({len(pl.tracks)} tracks, {has_yt} embeds)", "accent")
|
|
playlists_meta.append({
|
|
"slug": pl.slug, "name": pl.name,
|
|
"track_count": len(pl.tracks), "fire_idx": idx,
|
|
})
|
|
|
|
# Hub page (only if multiple playlists)
|
|
if len(playlists) > 1:
|
|
hub_path = out_dir / "playlists.html"
|
|
hub_path.write_text(build_hub_html(playlists_meta), "utf-8")
|
|
out(f" → playlists.html (hub, {len(playlists)} playlists)", "accent")
|
|
|
|
total = sum(p["track_count"] for p in playlists_meta)
|
|
embeds = sum(1 for pl in playlists for t in pl.tracks if t.youtube_id)
|
|
out(f"\n> BUILD COMPLETE — {len(playlists)} playlists, {total:,} tracks, {embeds} embeds", "accent")
|
|
out(f"[dim]{DIVIDER}[/dim]")
|
|
out(f"> OUTPUT: {out_dir}", "dim")
|
|
out("> NEXT (opt-in): playlist download <file(s) or dir>", "dim")
|
|
|
|
|
|
def cmd_download(args):
|
|
boot("download")
|
|
paths = resolve_inputs(args.input, "-playlist.md")
|
|
if not paths:
|
|
out("> [ERR] No playlist.md files found.", "err"); sys.exit(1)
|
|
|
|
for src in paths:
|
|
out(f"\n> PLAYLIST: {src.name}", "accent")
|
|
playlist = Playlist.from_md(src)
|
|
pending = [t for t in playlist.tracks if t.needs_download]
|
|
|
|
out_dir = (
|
|
Path(args.output) if args.output
|
|
else src.parent / src.stem.replace("-playlist", "")
|
|
)
|
|
out_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
out(f" TO DOWNLOAD: {len(pending)} / {len(playlist.tracks)}")
|
|
out(f" OUTPUT DIR: {out_dir}", "dim")
|
|
|
|
if not pending:
|
|
out(" Nothing to download.", "warn"); continue
|
|
|
|
for i, track in enumerate(pending):
|
|
safe = _safe_filename(track.title, track.artists)
|
|
target = out_dir / f"{safe}.mp3"
|
|
out(f" [{i+1}/{len(pending)}] {track.title}")
|
|
out(f" {track.url}", "dim")
|
|
|
|
ydl_opts = {
|
|
"format": "bestaudio/best",
|
|
"outtmpl": str(out_dir / f"{safe}.%(ext)s"),
|
|
"quiet": True, "no_warnings": True,
|
|
"postprocessors": [{
|
|
"key": "FFmpegExtractAudio",
|
|
"preferredcodec": "mp3",
|
|
"preferredquality": "192",
|
|
}],
|
|
}
|
|
try:
|
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
|
ydl.download([track.url])
|
|
if target.exists():
|
|
_embed_tags(target, track)
|
|
out(f" ✓ {target.name}", "accent")
|
|
track.status = DONE
|
|
else:
|
|
out(f" WARN: not at expected path", "warn")
|
|
except Exception as e:
|
|
out(f" ERROR: {e}", "err")
|
|
|
|
playlist.save(src)
|
|
|
|
done = sum(1 for t in playlist.tracks if t.status == DONE)
|
|
out(f" {done}/{len(playlist.tracks)} tracks acquired.", "accent")
|
|
|
|
out(f"\n[dim]{DIVIDER}[/dim]")
|
|
|
|
|
|
# ─── Helpers ──────────────────────────────────────────────────────────────────
|
|
|
|
def _safe_filename(title: str, artists: str) -> str:
|
|
raw = f"{artists.split(',')[0].strip()} - {title}"
|
|
safe = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "", raw)
|
|
safe = re.sub(r'\s+', " ", safe).strip()
|
|
return safe[:180]
|
|
|
|
def _embed_tags(path: Path, track: Track):
|
|
try:
|
|
try: tags = ID3(str(path))
|
|
except ID3NoHeaderError: tags = ID3()
|
|
tags["TIT2"] = TIT2(encoding=3, text=track.title)
|
|
tags["TPE1"] = TPE1(encoding=3, text=track.artists)
|
|
if track.isrc and track.isrc != "-":
|
|
tags["TSRC"] = TSRC(encoding=3, text=track.isrc)
|
|
if track.album:
|
|
tags["TALB"] = TALB(encoding=3, text=track.album)
|
|
tags.save(str(path), v2_version=3)
|
|
except Exception as e:
|
|
out(f" WARN: tag write failed: {e}", "warn")
|
|
|
|
|
|
# ─── Entry Point ──────────────────────────────────────────────────────────────
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
prog="playlist",
|
|
description="PLAYLIST PIRATE v2.0 — CSV to embedded web player to MP3",
|
|
)
|
|
sub = parser.add_subparsers(dest="command", required=True)
|
|
|
|
# resolve
|
|
r = sub.add_parser("resolve", help="Parse CSV(s) → *-playlist.md")
|
|
r.add_argument("input", nargs="+", help="CSV file(s) or directory")
|
|
|
|
# search
|
|
s = sub.add_parser("search", help="Find YouTube URLs via yt-dlp (resumable)")
|
|
s.add_argument("input", nargs="+", help="*-playlist.md file(s) or directory")
|
|
s.add_argument("--delay-min", type=float, default=3.0, metavar="SEC")
|
|
s.add_argument("--delay-max", type=float, default=7.0, metavar="SEC")
|
|
|
|
# build
|
|
b = sub.add_parser("build", help="Generate HTML pages with embedded YouTube players")
|
|
b.add_argument("input", nargs="+", help="*-playlist.md file(s) or directory")
|
|
b.add_argument("--out", metavar="DIR", help="Output directory for HTML (default: same as input)")
|
|
|
|
# download
|
|
d = sub.add_parser("download", help="Download tracks as MP3 (opt-in)")
|
|
d.add_argument("input", nargs="+", help="*-playlist.md file(s) or directory")
|
|
d.add_argument("--output", "-o", metavar="DIR", help="Output directory for MP3s")
|
|
|
|
args = parser.parse_args()
|
|
{"resolve": cmd_resolve, "search": cmd_search,
|
|
"build": cmd_build, "download": cmd_download}[args.command](args)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|