#!/usr/bin/env python3 from dataclasses import dataclass import base58 import datetime import os import markdown import pystache import shutil import sys import tempfile from typing import Optional import yaml def short_hash(s: str) -> str: return base58.b58encode(bytes.fromhex(s.replace('-', '')))[:16].decode('utf-8') class Datum: @classmethod def from_yaml(cls, data): return cls(**data) @classmethod def from_file(cls, path): with open(path) as f: data = yaml.safe_load(f) return cls.from_yaml(data) @dataclass class Quote(Datum): id: str content: str author: str def short_hash(self) -> str: return "qt_" + short_hash(self.id) @dataclass class Quip(Datum): id: str content: str def short_hash(self) -> str: return "qp_" + short_hash(self.id) @dataclass class Work: slug: str category: str title: str date: str contents: str description: Optional[str] = None class Path: OUTDIR=tempfile.TemporaryDirectory() @classmethod def data(cls, *paths): tgt = os.path.join(*paths) for path in sys.argv[2:]: if path.endswith(os.path.join(tgt)): return path raise Exception(f"Could not find {tgt}") @classmethod def out(cls, *paths): return os.path.join(cls.OUTDIR.name, *paths) @classmethod def write(cls, *paths): if len(paths) > 1: os.makedirs(cls.out(*paths[:-1]), exist_ok=True) return open(cls.out(*paths), 'w') @classmethod def read(cls, *paths): with open(cls.data(*paths)) as f: return f.read() @classmethod def list(cls, *paths): stuff = set() tgt = f'{os.path.join(*paths)}/' for path in sys.argv[2:]: if tgt in path: chunks = path.split('/') idx = chunks.index(paths[-1]) stuff.add(chunks[idx +1]) return stuff class Template: renderer = pystache.Renderer(search_dirs="templates") def load_template(name): with open(f"templates/{name}.mustache") as f: parsed = pystache.parse(f.read()) return lambda stuff: Template.renderer.render(parsed, stuff) main = load_template("main") quote = load_template("quote") list = load_template("list") def main(): out_file = sys.argv[1] year = datetime.datetime.now().year std_copy = f'©{year} Getty Ritter' no_copy = 'all rights reversed' # gather the quips and make their individual pages quips = [] for uuid in Path.list('quips'): q = Quip.from_file(Path.data('quips', uuid)) q.content = markdown.markdown(q.content) quips.append(q) h = q.short_hash() html = Template.main({ 'title': f"Quip", 'contents': Template.quote({'quotelist': [q]}), 'copy': no_copy, 'opengraph': { 'title': f'quip:{h}', 'url': f'/quip/{h}/', 'description': q.content, }, }) with Path.write('quips', uuid, 'index.html') as f: f.write(html) with Path.write('quips', h, 'index.html') as f: f.write(html) # sort 'em and make the combined page quips.sort(key=lambda q: q.id) with Path.write('quips', 'index.html') as f: f.write(Template.main({ 'title': "Quips", 'contents': Template.quote({'quotelist': quips}), 'copy': no_copy, })) # gather the quotes and make their individual pages quotes = [] for uuid in Path.list('quotes'): q = Quote.from_file(Path.data('quotes', uuid)) q.content = markdown.markdown(q.content) q.author = markdown.markdown(q.author) quotes.append(q) contents = Template.quote({'quotelist': [q]}) short_hash = q.short_hash() html = Template.main({ 'title': f"Quote", 'contents': contents, 'copy': no_copy, 'opengraph': { 'title': f'quote:{short_hash}', 'url': f'/quote/{short_hash}/', 'description': f'{q.content}\n—{q.author}', }, }) with Path.write('quotes', uuid, 'index.html') as f: f.write(html) with Path.write('quotes', short_hash, 'index.html') as f: f.write(html) # sort 'em and make their combined page quotes.sort(key=lambda q: q.id) with Path.write('quotes', 'index.html') as f: f.write(Template.main({ 'title': "Quotes", 'contents': Template.quote({'quotelist': quotes}), 'copy': no_copy, })) # figure out what categories we've got with open(Path.data('works.json')) as f: categories = yaml.safe_load(f) category_lookup = {c['slug']: c for c in categories} # make an index page for each category with Path.write('category', 'index.html') as f: f.write(Template.main({ 'title': 'Categories', 'contents': Template.list({ 'works': [ {'slug': f'category/{c["slug"]}', 'title': c['category']} for c in categories ] }), 'copy': std_copy, })) # create each category page for slug in Path.list('works'): # we need to know what works exist in the category works = [] for work in Path.list('works', slug): # grab the metadata for this work with open(Path.data('works', slug, work, 'metadata.yaml')) as f: meta = yaml.safe_load(f) with open(Path.data('works', slug, work, 'text')) as f: text = markdown.markdown(f.read(), extensions=['footnotes']) w = Work( slug=meta.get('slug', work), category=meta.get('category', slug), title=meta['name'], date=meta['date'], contents=text, ) if slug == 'pages': # always keep index/about up-to-date copy = std_copy else: # report other works in their own year copy = f'© Getty Ritter {w.date}' if w.description is not None: description = w.description elif slug in category_lookup: singular = category_lookup[slug]['singular'] description = f'{w.title}: a {singular}' else: description = '...' with Path.write(w.slug, 'index.html') as f: f.write(Template.main({ 'title': w.title, 'contents': text, 'copy': copy, 'opengraph': { 'title': w.title, 'url': f'/{w.slug}/', 'description': description, }, })) works.append(w) works.sort(key=lambda w: w.slug) # not every on-disk category should be shown: we should find # it in the categories list first category_metadata = [c for c in categories if c['slug'] == slug] if not category_metadata: continue with Path.write('category', slug, 'index.html') as f: f.write(Template.main({ 'title': category_metadata[0]['category'], 'contents': Template.list({ 'works': works, }), 'copy': std_copy, })) shutil.copy(Path.out('index', 'index.html'), Path.out('index.html')) os.makedirs(Path.out('static'), exist_ok=True) shutil.copy('static/main.css', Path.out('static', 'main.css')) shutil.copy('static/icon.png', Path.out('static', 'icon.png')) shutil.make_archive('output', 'zip', Path.OUTDIR.name) shutil.move('output.zip', out_file) Path.OUTDIR.cleanup() if __name__ == '__main__': main()