nasg/nasg.py

2323 lines
69 KiB
Python
Raw Normal View History

2017-05-23 11:14:47 +01:00
#!/usr/bin/env python3
2017-12-17 17:37:32 +00:00
__author__ = "Peter Molnar"
__copyright__ = "Copyright 2017-2019, Peter Molnar"
__license__ = "apache-2.0"
2017-12-17 17:37:32 +00:00
__maintainer__ = "Peter Molnar"
2018-04-30 20:44:04 +01:00
__email__ = "mail@petermolnar.net"
2017-05-23 11:14:47 +01:00
2018-07-20 16:45:42 +01:00
import glob
2017-05-23 11:14:47 +01:00
import os
2018-07-20 16:45:42 +01:00
import time
2017-05-23 11:14:47 +01:00
import re
import asyncio
2018-07-22 11:33:59 +01:00
import sqlite3
import json
import queue
import base64
2018-07-20 16:45:42 +01:00
from shutil import copy2 as cp
from shutil import rmtree
from math import ceil
2018-07-20 16:45:42 +01:00
from urllib.parse import urlparse
from collections import OrderedDict, namedtuple
import logging
import csv
import arrow
2017-05-23 11:14:47 +01:00
import langdetect
import wand.image
import filetype
2018-07-20 16:45:42 +01:00
import jinja2
import yaml
# python-frontmatter
2019-01-15 21:28:58 +00:00
import frontmatter
from feedgen.feed import FeedGenerator
# unicode-slugify
from slugify import slugify
import requests
2019-03-22 15:49:24 +00:00
from pandoc import PandocMD2HTML, PandocMD2TXT, PandocHTML2TXT
from meta import Exif
2018-07-20 16:45:42 +01:00
import settings
from settings import struct
import keys
2018-07-20 16:45:42 +01:00
2019-06-25 22:48:04 +01:00
logger = logging.getLogger("NASG")
2019-06-25 22:48:04 +01:00
MarkdownImage = namedtuple("MarkdownImage", ["match", "alt", "fname", "title", "css"])
2018-07-20 16:45:42 +01:00
RE_MDIMG = re.compile(
2019-06-25 22:48:04 +01:00
r"(?P<match>!\[(?P<alt>[^\]]+)?\]\((?P<fname>[^\s\]]+)"
r"(?:\s[\'\"](?P<title>[^\"\']+)[\'\"])?\)(?:{(?P<css>[^\}]+)\})?)",
re.IGNORECASE,
2018-07-20 16:45:42 +01:00
)
RE_CODE = re.compile(
r'^(?:[~`]{3,4}).+$',
re.MULTILINE
)
2019-06-25 22:48:04 +01:00
RE_PRECODE = re.compile(
r'<pre class="([^"]+)"><code>'
)
2018-07-20 16:45:42 +01:00
RE_MYURL = re.compile(
r'(^(%s[^"]+)$|"(%s[^"]+)")' % (settings.site.url, settings.site.url)
)
2018-07-20 16:45:42 +01:00
def mtime(path):
""" return seconds level mtime or 0 (chomp microsecs) """
if os.path.exists(path):
return int(os.path.getmtime(path))
return 0
def utfyamldump(data):
""" dump YAML with actual UTF-8 chars """
2019-06-25 22:48:04 +01:00
return yaml.dump(data, default_flow_style=False, indent=4, allow_unicode=True)
2018-11-19 16:16:52 +00:00
def url2slug(url, limit=200):
""" convert URL to max 200 char ASCII string """
2019-06-25 22:48:04 +01:00
return slugify(re.sub(r"^https?://(?:www)?", "", url), only_ascii=True, lower=True)[
:limit
]
def rfc3339todt(rfc3339):
""" nice dates for humans """
2019-06-25 22:48:04 +01:00
t = arrow.get(rfc3339).format("YYYY-MM-DD HH:mm ZZZ")
return "%s" % (t)
2019-03-22 15:49:24 +00:00
def extractlicense(url):
""" extract license name """
n, e = os.path.splitext(os.path.basename(url))
return n.upper()
def relurl(text, baseurl=None):
if not baseurl:
baseurl = settings.site.url
for match, standalone, href in RE_MYURL.findall(text):
needsquotes = False
if len(href):
needsquotes = True
url = href
else:
url = standalone
r = os.path.relpath(url, baseurl)
2019-06-25 22:48:04 +01:00
if url.endswith("/") and not r.endswith("/"):
r = "%s/%s" % (r, settings.filenames.html)
if needsquotes:
r = '"%s"' % r
logger.debug("RELURL: %s => %s (base: %s)", match, r, baseurl)
text = text.replace(match, r)
return text
def writepath(fpath, content, mtime=0):
""" f.write with extras """
d = os.path.dirname(fpath)
if not os.path.isdir(d):
2019-06-25 22:48:04 +01:00
logger.debug("creating directory tree %s", d)
os.makedirs(d)
if isinstance(content, str):
2019-06-25 22:48:04 +01:00
mode = "wt"
else:
2019-06-25 22:48:04 +01:00
mode = "wb"
with open(fpath, mode) as f:
2019-06-25 22:48:04 +01:00
logger.info("writing file %s", fpath)
f.write(content)
#def maybe_copy(source, target):
#""" copy only if target mtime is smaller, than source mtime """
#if os.path.exists(target) and mtime(source) <= mtime(target):
#return
#logger.info("copying '%s' to '%s'", source, target)
#cp(source, target)
def extractdomain(url):
url = urlparse(url)
return url.netloc
J2 = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=settings.paths.get("tmpl")),
lstrip_blocks=True,
trim_blocks=True,
)
J2.filters["relurl"] = relurl
J2.filters["url2slug"] = url2slug
J2.filters["printdate"] = rfc3339todt
J2.filters["extractlicense"] = extractlicense
J2.filters["extractdomain"] = extractdomain
class cached_property(object):
""" extermely simple cached_property decorator:
whenever something is called as @cached_property, on first run, the
result is calculated, then the class method is overwritten to be
a property, contaning the result from the method
"""
def __init__(self, method, name=None):
self.method = method
self.name = name or method.__name__
def __get__(self, inst, cls):
if inst is None:
return self
result = self.method(inst)
setattr(inst, self.name, result)
return result
class AQ:
""" Async queue which starts execution right on population """
def __init__(self):
self.loop = asyncio.get_event_loop()
self.queue = asyncio.Queue(loop=self.loop)
def put(self, task):
self.queue.put(asyncio.ensure_future(task))
async def consume(self):
while not self.queue.empty():
item = await self.queue.get()
self.queue.task_done()
# asyncio.gather() ?
def run(self):
consumer = asyncio.ensure_future(self.consume())
self.loop.run_until_complete(consumer)
class Webmention(object):
""" outgoing webmention class """
def __init__(self, source, target, dpath, mtime=0):
self.source = source
self.target = target
self.dpath = dpath
if not mtime:
mtime = arrow.utcnow().timestamp
self.mtime = mtime
@property
def fpath(self):
2019-06-25 22:48:04 +01:00
return os.path.join(self.dpath, "%s.ping" % (url2slug(self.target, 200)))
def check_syndication(self):
""" this is very specific to webmention.io and brid.gy publish """
2019-05-22 20:54:10 +01:00
if "fed.brid.gy" in self.target:
return
if "brid.gy" not in self.target:
return
if not self.exists:
return
with open(self.fpath) as f:
txt = f.read()
2019-05-22 20:54:10 +01:00
if "telegraph.p3k.io" not in txt:
return
try:
maybe = json.loads(txt)
if "location" not in maybe:
return
2019-05-22 20:54:10 +01:00
if "http_body" not in maybe:
2019-06-25 22:48:04 +01:00
logger.debug(
"trying to re-fetch %s for %s", maybe["location"], self.fpath
)
2019-05-22 20:54:10 +01:00
wio = requests.get(maybe["location"])
if wio.status_code != requests.codes.ok:
return
2019-05-22 20:54:10 +01:00
maybe = wio.json()
logger.debug("response: %s", maybe)
with open(self.fpath, "wt") as update:
update.write(json.dumps(maybe, sort_keys=True, indent=4))
if "url" in maybe["http_body"]:
data = json.loads(maybe["http_body"])
url = data["url"]
sp = os.path.join(self.dpath, "%s.copy" % url2slug(url, 200))
if os.path.exists(sp):
return
with open(sp, "wt") as f:
logger.info("writing syndication copy %s to %s", url, sp)
f.write(url)
except Exception as e:
2019-06-25 22:48:04 +01:00
logger.error("failed to fetch syndication URL for %s: %s", self.dpath, e)
2019-05-22 20:54:10 +01:00
pass
@property
def exists(self):
if not os.path.isfile(self.fpath):
return False
elif mtime(self.fpath) > self.mtime:
return True
else:
return False
def save(self, content):
writepath(self.fpath, content)
async def send(self):
if self.exists:
self.check_syndication()
return
2019-06-25 22:48:04 +01:00
elif settings.args.get("noping"):
self.save("noping entry at %s" % arrow.now())
return
2019-06-25 22:48:04 +01:00
telegraph_url = "https://telegraph.p3k.io/webmention"
telegraph_params = {
2019-06-25 22:48:04 +01:00
"token": "%s" % (keys.telegraph.get("token")),
"source": "%s" % (self.source),
"target": "%s" % (self.target),
}
r = requests.post(telegraph_url, data=telegraph_params)
logger.info(
2019-06-25 22:48:04 +01:00
"sent webmention to telegraph from %s to %s", self.source, self.target
)
if r.status_code not in [200, 201, 202]:
2019-06-25 22:48:04 +01:00
logger.error("sending failed: %s %s", r.status_code, r.text)
else:
self.save(r.text)
2019-06-25 22:48:04 +01:00
2018-07-20 16:45:42 +01:00
class MarkdownDoc(object):
""" Base class for anything that is stored as .md """
2019-06-25 22:48:04 +01:00
2019-01-15 21:28:58 +00:00
@property
def mtime(self):
return mtime(self.fpath)
2019-01-15 21:28:58 +00:00
@property
def dt(self):
""" this returns a timestamp, not an arrow object """
2019-01-15 21:28:58 +00:00
maybe = self.mtime
2019-06-25 22:48:04 +01:00
for key in ["published", "date"]:
2019-01-15 21:28:58 +00:00
t = self.meta.get(key, None)
2019-06-25 22:48:04 +01:00
if t and "null" != t:
2019-01-15 21:28:58 +00:00
try:
t = arrow.get(t)
if t.timestamp > maybe:
maybe = t.timestamp
except Exception as e:
logger.error(
2019-06-25 22:48:04 +01:00
"failed to parse date: %s for key %s in %s", t, key, self.fpath
2019-01-15 21:28:58 +00:00
)
return maybe
@cached_property
2018-07-20 16:45:42 +01:00
def _parsed(self):
2019-06-25 22:48:04 +01:00
with open(self.fpath, mode="rt") as f:
logger.debug("parsing YAML+MD file %s", self.fpath)
2019-01-15 21:28:58 +00:00
meta, txt = frontmatter.parse(f.read())
2019-06-25 22:48:04 +01:00
return (meta, txt)
2019-01-15 21:28:58 +00:00
2019-03-22 15:49:24 +00:00
@cached_property
2018-07-20 16:45:42 +01:00
def meta(self):
return self._parsed[0]
2017-06-12 15:40:30 +01:00
2019-03-22 15:49:24 +00:00
@cached_property
2018-07-20 16:45:42 +01:00
def content(self):
return self._parsed[1]
2017-06-12 15:40:30 +01:00
@cached_property
2018-07-20 16:45:42 +01:00
def html_content(self):
c = "%s" % (self.content)
2019-03-22 15:49:24 +00:00
if not len(c):
return c
2019-06-25 22:48:04 +01:00
if hasattr(self, "images") and len(self.images):
2018-07-20 16:45:42 +01:00
for match, img in self.images.items():
c = c.replace(match, str(img))
2019-03-22 15:49:24 +00:00
c = str(PandocMD2HTML(c))
2019-06-25 22:48:04 +01:00
c = RE_PRECODE.sub('<pre><code lang="\g<1>" class="language-\g<1>">', c)
2019-03-22 15:49:24 +00:00
return c
2018-07-20 16:45:42 +01:00
class Comment(MarkdownDoc):
def __init__(self, fpath):
self.fpath = fpath
2017-06-12 15:40:30 +01:00
@property
2018-07-20 16:45:42 +01:00
def dt(self):
2019-06-25 22:48:04 +01:00
maybe = self.meta.get("date")
if maybe and "null" != maybe:
2018-07-20 16:45:42 +01:00
dt = arrow.get(maybe)
else:
dt = arrow.get(mtime(self.fpath))
2018-07-20 16:45:42 +01:00
return dt
@property
def targetname(self):
2019-06-25 22:48:04 +01:00
t = urlparse(self.meta.get("target"))
return os.path.split(t.path.lstrip("/"))[0]
# t = urlparse(self.meta.get('target'))
# return t.path.rstrip('/').strip('/').split('/')[-1]
@property
2018-07-20 16:45:42 +01:00
def source(self):
2019-06-25 22:48:04 +01:00
return self.meta.get("source")
@property
2018-07-20 16:45:42 +01:00
def author(self):
r = {
"@context": "http://schema.org",
"@type": "Person",
2019-06-25 22:48:04 +01:00
"name": urlparse(self.source).hostname,
"url": self.source,
2018-07-20 16:45:42 +01:00
}
2019-06-25 22:48:04 +01:00
author = self.meta.get("author")
2018-07-20 16:45:42 +01:00
if not author:
return r
2019-06-25 22:48:04 +01:00
if "name" in author:
r.update({"name": self.meta.get("author").get("name")})
elif "url" in author:
r.update({"name": urlparse(self.meta.get("author").get("url")).hostname})
2018-07-20 16:45:42 +01:00
return r
2017-10-28 19:08:40 +01:00
@property
2018-07-20 16:45:42 +01:00
def type(self):
2019-06-25 22:48:04 +01:00
return self.meta.get("type", "webmention")
# if len(self.content):
2019-06-25 22:48:04 +01:00
# maybe = clean(self.content, strip=True)
# if maybe in UNICODE_EMOJI:
# return maybe
@cached_property
def jsonld(self):
r = {
"@context": "http://schema.org",
"@type": "Comment",
"author": self.author,
"url": self.source,
2019-06-25 22:48:04 +01:00
"discussionUrl": self.meta.get("target"),
"datePublished": str(self.dt),
2019-06-25 22:48:04 +01:00
"disambiguatingDescription": self.type,
}
return r
2018-07-20 16:45:42 +01:00
class Gone(object):
"""
Gone object for delete entries
"""
2018-04-30 20:44:04 +01:00
2018-07-20 16:45:42 +01:00
def __init__(self, fpath):
self.fpath = fpath
self.mtime = mtime(fpath)
@property
def renderdir(self):
2019-06-25 22:48:04 +01:00
return os.path.join(settings.paths.get("build"), self.source)
@property
def renderfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(self.renderdir, settings.filenames.html)
2018-07-20 16:45:42 +01:00
@property
def source(self):
source, fext = os.path.splitext(os.path.basename(self.fpath))
return source
@property
def template(self):
return "%s.j2.html" % (self.__class__.__name__)
@property
def tmplvars(self):
2019-06-25 22:48:04 +01:00
return {"source": self.source}
async def render(self):
if os.path.exists(self.renderfile):
rmtree(os.path.dirname(self.renderfile))
2019-06-25 22:48:04 +01:00
# logger.info(
#'rendering %s to %s',
# self.__class__.__name__,
# self.source
# )
# r = J2.get_template(self.template).render(
# self.tmplvars
# )
# writepath(self.renderfile, r)
2018-04-30 20:44:04 +01:00
class Redirect(Gone):
2018-07-20 16:45:42 +01:00
"""
Redirect object for entries that moved
"""
@cached_property
2018-07-20 16:45:42 +01:00
def target(self):
2019-06-25 22:48:04 +01:00
target = ""
with open(self.fpath, "rt") as f:
2018-07-20 16:45:42 +01:00
target = f.read().strip()
return target
2017-05-23 11:14:47 +01:00
@property
def tmplvars(self):
2019-06-25 22:48:04 +01:00
return {"source": self.source, "target": self.target}
2018-07-20 16:45:42 +01:00
class Singular(MarkdownDoc):
"""
A Singular object: a complete representation of a post, including
all it's comments, files, images, etc
"""
def __init__(self, fpath):
self.fpath = fpath
2018-07-20 16:45:42 +01:00
n = os.path.dirname(fpath)
self.name = os.path.basename(n)
self.category = os.path.basename(os.path.dirname(n))
@cached_property
2018-07-20 16:45:42 +01:00
def files(self):
"""
An array of files present at the same directory level as
the Singular object, excluding hidden (starting with .) and markdown
(ending with .md) files
"""
return [
k
2019-06-25 22:48:04 +01:00
for k in glob.glob(os.path.join(os.path.dirname(self.fpath), "*.*"))
if not k.startswith(".")
2018-07-20 16:45:42 +01:00
]
2019-01-15 21:28:58 +00:00
@property
def updated(self):
maybe = self.dt
if len(self.comments):
for c in self.comments.values():
if c.dt > maybe:
maybe = c.dt
return maybe
@property
def dt(self):
dt = int(MarkdownDoc.dt.fget(self))
for maybe in self.comments.keys():
if int(dt) < int(maybe):
dt = int(maybe)
return dt
@property
def sameas(self):
r = {}
2019-06-25 22:48:04 +01:00
for k in glob.glob(os.path.join(os.path.dirname(self.fpath), "*.copy")):
with open(k, "rt") as f:
r.update({f.read(): True})
return list(r.keys())
@cached_property
2018-07-20 16:45:42 +01:00
def comments(self):
"""
An dict of Comment objects keyed with their path, populated from the
same directory level as the Singular objects
"""
comments = {}
2018-07-20 16:45:42 +01:00
files = [
k
2019-06-25 22:48:04 +01:00
for k in glob.glob(os.path.join(os.path.dirname(self.fpath), "*.md"))
if os.path.basename(k) != settings.filenames.md
2018-07-20 16:45:42 +01:00
]
for f in files:
c = Comment(f)
comments[c.dt.timestamp] = c
return comments
2017-06-12 15:17:29 +01:00
@cached_property
2018-07-20 16:45:42 +01:00
def images(self):
"""
A dict of WebImage objects, populated by:
- images that are present in the Markdown content
- and have an actual image file at the same directory level as
the Singular object
"""
images = {}
for match, alt, fname, title, css in RE_MDIMG.findall(self.content):
mdimg = MarkdownImage(match, alt, fname, title, css)
2019-06-25 22:48:04 +01:00
imgpath = os.path.join(os.path.dirname(self.fpath), fname)
2018-07-20 16:45:42 +01:00
if imgpath in self.files:
kind = filetype.guess(imgpath)
2019-06-25 22:48:04 +01:00
if kind and "image" in kind.mime.lower():
2018-07-20 16:45:42 +01:00
images.update({match: WebImage(imgpath, mdimg, self)})
else:
2019-06-25 22:48:04 +01:00
logger.error("Missing image: %s, referenced in %s", imgpath, self.fpath)
2018-07-20 16:45:42 +01:00
return images
2017-05-23 11:14:47 +01:00
@property
def is_page(self):
2019-06-25 22:48:04 +01:00
if self.category.startswith("_"):
return True
return False
2018-06-17 18:30:50 +01:00
@property
2018-07-20 16:45:42 +01:00
def is_front(self):
"""
Returns if the post should be displayed on the front
"""
if self.category in settings.notinfeed:
return False
return True
2018-06-17 18:30:50 +01:00
@property
2018-07-20 16:45:42 +01:00
def is_photo(self):
"""
This is true if there is a file, with the same name as the entry's
directory - so, it's slug -, and that that image believes it's a a
photo.
"""
if len(self.images) != 1:
return False
photo = next(iter(self.images.values()))
maybe = self.fpath.replace(settings.filenames.md, "%s.jpg" % (self.name))
if photo.fpath == maybe:
2018-07-20 16:45:42 +01:00
return True
return False
2017-05-23 11:14:47 +01:00
@property
def photo(self):
if not self.is_photo:
return None
return next(iter(self.images.values()))
@property
2018-07-20 16:45:42 +01:00
def summary(self):
2019-06-25 22:48:04 +01:00
return self.meta.get("summary", "")
@cached_property
2018-07-20 16:45:42 +01:00
def html_summary(self):
2019-03-22 15:49:24 +00:00
c = "%s" % (self.summary)
return PandocMD2HTML(c)
@cached_property
def txt_summary(self):
return PandocMD2TXT(self.summary)
@cached_property
def txt_content(self):
return PandocMD2TXT(self.content)
2017-10-27 15:56:05 +01:00
@property
2018-07-20 16:45:42 +01:00
def title(self):
if self.is_reply:
return "RE: %s" % self.is_reply
2019-06-25 22:48:04 +01:00
return self.meta.get("title", self.published.format(settings.displaydate))
2017-10-27 15:56:05 +01:00
@property
2018-07-20 16:45:42 +01:00
def tags(self):
2019-06-25 22:48:04 +01:00
return self.meta.get("tags", [])
2017-10-27 15:56:05 +01:00
@property
2018-07-20 16:45:42 +01:00
def syndicate(self):
2019-06-25 22:48:04 +01:00
urls = self.meta.get("syndicate", [])
urls.append("https://fed.brid.gy/")
2018-07-20 16:45:42 +01:00
if self.is_photo:
urls.append("https://brid.gy/publish/flickr")
return urls
2017-06-12 15:40:30 +01:00
2019-06-25 22:48:04 +01:00
def baseN(self, num, b=36, numerals="0123456789abcdefghijklmnopqrstuvwxyz"):
"""
Creates short, lowercase slug for a number (an epoch) passed
"""
num = int(num)
return ((num == 0) and numerals[0]) or (
2019-06-25 22:48:04 +01:00
self.baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b]
)
@property
def shortslug(self):
return self.baseN(self.published.timestamp)
2017-06-12 15:40:30 +01:00
@property
2018-07-20 16:45:42 +01:00
def published(self):
# ok, so here's a hack: because I have no idea when my older photos
# were actually published, any photo from before 2014 will have
# the EXIF createdate as publish date
2019-06-25 22:48:04 +01:00
pub = arrow.get(self.meta.get("published"))
if self.is_photo:
2019-06-25 22:48:04 +01:00
maybe = arrow.get(self.photo.exif.get("CreateDate"))
if maybe.year < settings.photo.earlyyears:
pub = maybe
return pub
2017-05-23 11:14:47 +01:00
@property
def is_reply(self):
2019-06-25 22:48:04 +01:00
return self.meta.get("in-reply-to", False)
2017-06-12 15:40:30 +01:00
@property
def is_future(self):
2018-07-20 16:45:42 +01:00
if self.published.timestamp > arrow.utcnow().timestamp:
2017-05-23 11:14:47 +01:00
return True
return False
@property
def to_ping(self):
urls = []
if not self.is_page and self.is_front:
w = Webmention(
2019-06-25 22:48:04 +01:00
self.url, "https://fed.brid.gy/", os.path.dirname(self.fpath), self.dt
)
urls.append(w)
if self.is_reply:
w = Webmention(
2019-06-25 22:48:04 +01:00
self.url, self.is_reply, os.path.dirname(self.fpath), self.dt
)
urls.append(w)
elif self.is_photo:
w = Webmention(
self.url,
2019-06-25 22:48:04 +01:00
"https://brid.gy/publish/flickr",
os.path.dirname(self.fpath),
2019-06-25 22:48:04 +01:00
self.dt,
)
urls.append(w)
return urls
@property
def licence(self):
2019-06-25 22:48:04 +01:00
k = "_default"
2018-07-20 16:45:42 +01:00
if self.category in settings.licence:
k = self.category
return settings.licence[k]
2017-06-02 11:19:55 +01:00
@property
def lang(self):
2019-06-25 22:48:04 +01:00
lang = "en"
try:
2019-06-25 22:48:04 +01:00
lang = langdetect.detect(
"\n".join([self.meta.get("title", ""), self.content])
)
2017-11-10 16:04:05 +00:00
except BaseException:
pass
return lang
2017-06-12 15:40:30 +01:00
2017-06-02 11:19:55 +01:00
@property
2018-07-20 16:45:42 +01:00
def url(self):
2019-06-25 22:48:04 +01:00
return "%s/%s/" % (settings.site.get("url"), self.name)
2017-06-03 12:07:03 +01:00
@property
def has_code(self):
if RE_CODE.search(self.content):
return True
else:
return False
@cached_property
def review(self):
2019-06-25 22:48:04 +01:00
if "review" not in self.meta:
return False
2019-06-25 22:48:04 +01:00
review = self.meta.get("review")
rated, outof = review.get("rating").split("/")
r = {
"@context": "https://schema.org/",
"@type": "Review",
"reviewRating": {
"@type": "Rating",
"@context": "http://schema.org",
"ratingValue": rated,
"bestRating": outof,
2019-06-25 22:48:04 +01:00
"worstRating": 1,
},
2019-06-25 22:48:04 +01:00
"name": review.get("title"),
"text": review.get("summary"),
"url": review.get("url"),
"author": settings.author,
}
return r
@cached_property
def event(self):
2019-06-25 22:48:04 +01:00
if "event" not in self.meta:
return False
2019-06-25 22:48:04 +01:00
event = self.meta.get("event", {})
r = {
"@context": "http://schema.org",
"@type": "Event",
2019-06-25 22:48:04 +01:00
"endDate": str(arrow.get(event.get("end"))),
"startDate": str(arrow.get(event.get("start"))),
"location": {
"@context": "http://schema.org",
"@type": "Place",
2019-06-25 22:48:04 +01:00
"address": event.get("location"),
"name": event.get("location"),
},
2019-06-25 22:48:04 +01:00
"name": self.title,
}
return r
@cached_property
def jsonld(self):
r = {
"@context": "http://schema.org",
"@type": "Article",
"@id": self.url,
"inLanguage": self.lang,
"headline": self.title,
"url": self.url,
"genre": self.category,
"mainEntityOfPage": "%s#article" % (self.url),
"dateModified": str(arrow.get(self.dt)),
"datePublished": str(self.published),
2019-06-25 22:48:04 +01:00
"copyrightYear": str(self.published.format("YYYY")),
"license": "https://spdx.org/licenses/%s.html" % (self.licence),
"image": settings.site.image,
"author": settings.author,
"sameAs": self.sameas,
"publisher": settings.site.publisher,
"name": self.name,
"text": self.html_content,
"description": self.html_summary,
"potentialAction": [],
"comment": [],
"commentCount": len(self.comments.keys()),
2019-06-25 22:48:04 +01:00
"keywords": self.tags,
}
if self.is_photo:
2019-06-25 22:48:04 +01:00
r.update(
{
"@type": "Photograph",
# "image": self.photo.jsonld,
}
)
elif self.has_code:
2019-06-25 22:48:04 +01:00
r.update({"@type": "TechArticle"})
elif self.is_page:
2019-06-25 22:48:04 +01:00
r.update({"@type": "WebPage"})
if len(self.images):
r["image"] = []
for img in list(self.images.values()):
r["image"].append(img.jsonld)
# if not self.is_photo and len(self.images):
2019-06-25 22:48:04 +01:00
# img = list(self.images.values())[0]
# r.update({
# "image": img.jsonld,
# })
if self.is_reply:
2019-06-25 22:48:04 +01:00
r.update(
{
"mentions": {
"@context": "http://schema.org",
"@type": "Thing",
"url": self.is_reply,
}
}
2019-06-25 22:48:04 +01:00
)
2017-06-12 15:40:30 +01:00
if self.review:
r.update({"review": self.review})
if self.event:
r.update({"subjectOf": self.event})
2019-06-25 22:48:04 +01:00
# for donation in settings.donateActions:
# r["potentialAction"].append(donation)
for url in list(set(self.syndicate)):
2019-06-25 22:48:04 +01:00
r["potentialAction"].append(
{"@context": "http://schema.org", "@type": "InteractAction", "url": url}
)
for mtime in sorted(self.comments.keys()):
r["comment"].append(self.comments[mtime].jsonld)
return struct(r)
2017-06-02 11:19:55 +01:00
@property
2018-07-20 16:45:42 +01:00
def template(self):
return "%s.j2.html" % (self.__class__.__name__)
2017-06-12 15:40:30 +01:00
@property
def gophertemplate(self):
return "%s.j2.txt" % (self.__class__.__name__)
@property
2018-07-20 16:45:42 +01:00
def renderdir(self):
2019-06-25 22:48:04 +01:00
return os.path.join(settings.paths.get("build"), self.name)
2017-06-12 15:40:30 +01:00
2018-04-30 20:44:04 +01:00
@property
2018-07-20 16:45:42 +01:00
def renderfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(self.renderdir, settings.filenames.html)
@property
def mementofile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(os.path.dirname(self.fpath), settings.filenames.memento)
@property
def has_memento(self):
if os.path.exists(self.mementofile):
if os.path.getsize(self.mementofile) > 0:
return True
return False
@property
def gopherfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(self.renderdir, settings.filenames.txt)
2018-04-30 20:44:04 +01:00
2017-06-02 11:19:55 +01:00
@property
2018-07-20 16:45:42 +01:00
def exists(self):
2019-06-25 22:48:04 +01:00
if settings.args.get("force"):
logger.debug("rendering required: force mode on")
2018-07-20 16:45:42 +01:00
return False
elif not os.path.exists(self.renderfile):
2019-06-25 22:48:04 +01:00
logger.debug("rendering required: no html yet")
2018-07-20 16:45:42 +01:00
return False
elif self.dt > mtime(self.renderfile):
2019-06-25 22:48:04 +01:00
logger.debug("rendering required: self.dt > html mtime")
2018-07-20 16:45:42 +01:00
return False
else:
2019-06-25 22:48:04 +01:00
logger.debug("rendering not required")
2018-07-20 16:45:42 +01:00
return True
@property
def corpus(self):
2019-06-25 22:48:04 +01:00
return "\n".join([self.title, self.name, self.summary, self.content])
async def copy_files(self):
exclude = [
".md",
".jpg",
".png",
".gif",
".ping",
".url",
".del",
".copy",
".cache",
]
2019-06-25 22:48:04 +01:00
files = glob.glob(os.path.join(os.path.dirname(self.fpath), "*.*"))
for f in files:
fname, fext = os.path.splitext(f)
if fext.lower() in exclude:
continue
t = os.path.join(
2019-06-25 22:48:04 +01:00
settings.paths.get("build"), self.name, os.path.basename(f)
)
2019-06-25 22:48:04 +01:00
if os.path.exists(t) and mtime(f) <= mtime(t):
continue
logger.info("copying '%s' to '%s'", f, t)
cp(f, t)
2019-06-25 22:43:45 +01:00
async def save_memento(self):
cp(self.renderfile, self.mementofile)
return
2019-06-25 22:43:45 +01:00
async def save_to_archiveorg(self):
2019-06-25 22:48:04 +01:00
requests.get("http://web.archive.org/save/%s" % (self.url))
def try_memento(self, url):
try:
params = {
2019-06-25 22:48:04 +01:00
"url": url,
"timestamp": "%s" % (self.published.format("YYYY-MM-DD")),
}
2019-06-25 22:48:04 +01:00
waybackmachine = "http://archive.org/wayback/available"
snapshots = requests.get(waybackmachine, params=params).json()
# no archived version...
2019-06-25 22:48:04 +01:00
if not len(snapshots.get("archived_snapshots", None)):
logger.warning("no snapshot found for %s", url)
return None
else:
2019-06-25 22:48:04 +01:00
logger.info("snapshot FOUND for %s", url)
2019-06-25 22:48:04 +01:00
snapshot = snapshots.get("archived_snapshots").get("closest")
logger.info("getting %s", snapshot["url"])
original = requests.get(snapshot["url"])
return original.text
except Exception as e:
2019-06-25 22:48:04 +01:00
logger.warning("wayback memento failed for %s: %s", url, e)
return None
2019-06-25 22:43:45 +01:00
def maybe_fetch_memento(self):
if self.has_memento:
return
# this commented out part is extremely specific to my old site
# but it helps anyone who had multiple domains and/or taxonomy
# structures
# formerdomains = [
2019-06-25 22:48:04 +01:00
# 'cadeyrn.webporfolio.hu',
# 'blog.petermolnar.eu',
# 'petermolnar.eu',
# 'petermolnar.net',
# ]
# formercategories = [
2019-06-25 22:48:04 +01:00
# 'linux-tech-coding',
# 'diy-do-it-yourself',
# 'photoblog',
# 'it',
# 'sysadmin-blog',
# 'sysadmin',
# 'fotography',
# 'blips',
# 'blog',
# 'r'
# ]
# for domain in formerdomains:
2019-06-25 22:48:04 +01:00
# maybe = None
# url = url = 'http://%s/%s/' % (domain, self.name)
# maybe = self.try_memento(url)
# if maybe:
# break
# for formercategory in formercategories:
# url = 'http://%s/%s/%s/' % (domain, formercategory, self.name)
# maybe = self.try_memento(url)
# if maybe:
# break
# if maybe:
# break
maybe = self.try_memento(self.url)
if maybe:
2019-06-25 22:48:04 +01:00
with open(self.mementofile, "wt") as f:
logger.info("saving memento for %s to %s", self.name, self.mementofile)
f.write(maybe)
async def render(self):
2019-06-25 22:48:04 +01:00
if settings.args.get("memento"):
2019-06-25 22:43:45 +01:00
self.maybe_fetch_memento()
if self.exists and not self.has_memento:
if self.published.timestamp >= settings.mementostartime:
cp(self.renderfile, self.mementofile)
if self.exists:
return
memento = False
if self.has_memento:
memento = "%s%s" % (self.url, settings.filenames.memento)
logger.info("rendering %s", self.name)
v = {
2019-06-25 22:48:04 +01:00
"baseurl": self.url,
"post": self.jsonld,
"site": settings.site,
"menu": settings.menu,
"meta": settings.meta,
"fnames": settings.filenames,
"memento": memento,
}
2019-06-25 22:48:04 +01:00
writepath(self.renderfile, J2.get_template(self.template).render(v))
del v
g = {
2019-06-25 22:48:04 +01:00
"post": self.jsonld,
"summary": self.txt_summary,
"content": self.txt_content,
}
2019-06-25 22:48:04 +01:00
writepath(self.gopherfile, J2.get_template(self.gophertemplate).render(g))
del g
j = settings.site.copy()
2019-06-25 22:48:04 +01:00
j.update({"mainEntity": self.jsonld})
writepath(
os.path.join(self.renderdir, settings.filenames.json),
2019-06-25 22:48:04 +01:00
json.dumps(j, indent=4, ensure_ascii=False),
)
2019-06-25 22:48:04 +01:00
del j
# oembed
# writepath(
2019-06-25 22:48:04 +01:00
# os.path.join(self.renderdir, settings.filenames.oembed_json),
# json.dumps(self.oembed_json, indent=4, ensure_ascii=False)
# )
# writepath(
2019-06-25 22:48:04 +01:00
# os.path.join(self.renderdir, settings.filenames.oembed_xml),
# self.oembed_xml
# )
2019-06-25 22:48:04 +01:00
2019-01-15 21:28:58 +00:00
class Home(Singular):
def __init__(self, fpath):
super().__init__(fpath)
self.posts = []
2019-01-15 21:28:58 +00:00
def add(self, category, post):
self.posts.append((category.ctmplvars, post.jsonld))
2019-01-15 21:28:58 +00:00
@property
def renderdir(self):
2019-06-25 22:48:04 +01:00
return settings.paths.get("build")
2019-01-15 21:28:58 +00:00
@property
def renderfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(settings.paths.get("build"), settings.filenames.html)
2019-01-15 21:28:58 +00:00
@property
def dt(self):
maybe = super().dt
for cat, post in self.posts:
2019-06-25 22:48:04 +01:00
pts = arrow.get(post["dateModified"]).timestamp
if pts > maybe:
maybe = pts
return maybe
async def render_gopher(self):
lines = [
"%s's gopherhole - phlog, if you prefer" % (settings.site.name),
2019-06-25 22:48:04 +01:00
"",
"",
]
for category, post in self.posts:
line = "1%s\t/%s/%s\t%s\t70" % (
2019-06-25 22:48:04 +01:00
category["name"],
settings.paths.category,
2019-06-25 22:48:04 +01:00
category["name"],
settings.site.name,
)
lines.append(line)
2019-06-25 22:48:04 +01:00
lines.append("")
writepath(
2019-06-25 22:48:04 +01:00
self.renderfile.replace(settings.filenames.html, settings.filenames.gopher),
"\r\n".join(lines),
)
2019-01-15 21:28:58 +00:00
async def render(self):
if self.exists:
return
logger.info("rendering %s", self.name)
2019-06-25 22:48:04 +01:00
r = J2.get_template(self.template).render(
{
"baseurl": settings.site.get("url"),
"post": self.jsonld,
"site": settings.site,
"menu": settings.menu,
"meta": settings.meta,
"posts": self.posts,
"fnames": settings.filenames,
}
)
2019-01-15 21:28:58 +00:00
writepath(self.renderfile, r)
await self.render_gopher()
2019-01-15 21:28:58 +00:00
2018-07-20 16:45:42 +01:00
class WebImage(object):
def __init__(self, fpath, mdimg, parent):
logger.debug("loading image: %s", fpath)
2018-07-20 16:45:42 +01:00
self.mdimg = mdimg
self.fpath = fpath
self.parent = parent
self.mtime = mtime(self.fpath)
2018-07-20 16:45:42 +01:00
self.fname, self.fext = os.path.splitext(os.path.basename(fpath))
self.resized_images = [
(k, self.Resized(self, k))
2019-06-25 22:48:04 +01:00
for k in settings.photo.get("sizes").keys()
2018-07-20 16:45:42 +01:00
if k < max(self.width, self.height)
]
if not len(self.resized_images):
2019-06-25 22:48:04 +01:00
self.resized_images.append(
(
max(self.width, self.height),
self.Resized(self, max(self.width, self.height)),
)
)
@property
def is_mainimg(self):
if self.fname == self.parent.name:
return True
return False
@property
def jsonld(self):
r = {
"@context": "http://schema.org",
"@type": "ImageObject",
"url": self.href,
"image": self.href,
2019-06-25 22:48:04 +01:00
"thumbnail": struct(
{
"@context": "http://schema.org",
"@type": "ImageObject",
"url": self.src,
"width": self.displayed.width,
"height": self.displayed.height,
}
),
"name": os.path.basename(self.fpath),
"encodingFormat": self.mime_type,
"contentSize": self.mime_size,
"width": self.linked.width,
"height": self.linked.height,
2019-06-25 22:48:04 +01:00
"dateCreated": self.exif.get("CreateDate"),
"exifData": [],
"caption": self.caption,
"headline": self.title,
2019-06-25 22:48:04 +01:00
"representativeOfPage": False,
}
for k, v in self.exif.items():
2019-06-25 22:48:04 +01:00
r["exifData"].append({"@type": "PropertyValue", "name": k, "value": v})
if self.is_photo:
2019-06-25 22:48:04 +01:00
r.update(
{
"creator": settings.author,
"copyrightHolder": settings.author,
"license": settings.licence["_default"],
}
)
if self.is_mainimg:
r.update({"representativeOfPage": True})
2019-06-25 22:48:04 +01:00
if self.exif["GPSLatitude"] != 0 and self.exif["GPSLongitude"] != 0:
r.update(
{
"locationCreated": struct(
{
"@context": "http://schema.org",
"@type": "Place",
"geo": struct(
{
"@context": "http://schema.org",
"@type": "GeoCoordinates",
"latitude": self.exif["GPSLatitude"],
"longitude": self.exif["GPSLongitude"],
}
),
}
)
}
)
return struct(r)
def __str__(self):
if len(self.mdimg.css):
return self.mdimg.match
tmpl = J2.get_template("%s.j2.html" % (self.__class__.__name__))
return tmpl.render(self.jsonld)
@cached_property
2018-07-20 16:45:42 +01:00
def meta(self):
return Exif(self.fpath)
2018-06-08 10:14:39 +01:00
@property
2018-07-20 16:45:42 +01:00
def caption(self):
if len(self.mdimg.alt):
return self.mdimg.alt
else:
2019-06-25 22:48:04 +01:00
return self.meta.get("Description", "")
2018-06-08 10:14:39 +01:00
2017-06-28 12:20:26 +01:00
@property
2018-07-20 16:45:42 +01:00
def title(self):
if len(self.mdimg.title):
return self.mdimg.title
else:
2019-06-25 22:48:04 +01:00
return self.meta.get("Headline", self.fname)
2017-06-02 11:19:55 +01:00
2018-07-20 16:45:42 +01:00
@property
def tags(self):
2019-06-25 22:48:04 +01:00
return list(set(self.meta.get("Subject", [])))
2017-06-12 15:40:30 +01:00
2018-07-20 16:45:42 +01:00
@property
def published(self):
2019-06-25 22:48:04 +01:00
return arrow.get(self.meta.get("ReleaseDate", self.meta.get("ModifyDate")))
2017-06-02 11:19:55 +01:00
@property
2018-07-20 16:45:42 +01:00
def width(self):
2019-06-25 22:48:04 +01:00
return int(self.meta.get("ImageWidth"))
2017-06-12 15:40:30 +01:00
@property
2018-07-20 16:45:42 +01:00
def height(self):
2019-06-25 22:48:04 +01:00
return int(self.meta.get("ImageHeight"))
2017-06-02 11:19:55 +01:00
2018-04-30 20:44:04 +01:00
@property
def mime_type(self):
2019-06-25 22:48:04 +01:00
return str(self.meta.get("MIMEType", "image/jpeg"))
2018-04-30 20:44:04 +01:00
@property
def mime_size(self):
try:
size = os.path.getsize(self.linked.fpath)
except Exception as e:
2019-06-25 22:48:04 +01:00
logger.error("Failed to get mime size of %s", self.linked.fpath)
size = self.meta.get("FileSize", 0)
return size
2018-04-30 20:44:04 +01:00
@property
2018-07-20 16:45:42 +01:00
def displayed(self):
ret = self.resized_images[0][1]
for size, r in self.resized_images:
2019-06-25 22:48:04 +01:00
if size == settings.photo.get("default"):
2018-07-20 16:45:42 +01:00
ret = r
return ret
2017-06-02 11:19:55 +01:00
2018-07-20 16:45:42 +01:00
@property
def linked(self):
m = 0
ret = self.resized_images[0][1]
for size, r in self.resized_images:
if size > m:
m = size
ret = r
return ret
2017-06-12 15:40:30 +01:00
@property
def src(self):
2018-07-20 16:45:42 +01:00
return self.displayed.url
2017-05-23 11:14:47 +01:00
@property
2018-07-20 16:45:42 +01:00
def href(self):
return self.linked.url
@property
def is_photo(self):
2019-06-25 22:48:04 +01:00
r = settings.photo.get("re_author", None)
2018-07-20 16:45:42 +01:00
if not r:
return False
2019-06-25 22:48:04 +01:00
cpr = self.meta.get("Copyright", "")
art = self.meta.get("Artist", "")
# both Artist and Copyright missing from EXIF
if not cpr and not art:
return False
# we have regex, Artist and Copyright, try matching them
2018-07-20 16:45:42 +01:00
if r.search(cpr) or r.search(art):
return True
return False
2017-06-12 15:40:30 +01:00
2017-06-12 15:17:29 +01:00
@property
def exif(self):
2018-07-20 16:45:42 +01:00
exif = {
2019-06-25 22:48:04 +01:00
"Model": "",
"FNumber": "",
"ExposureTime": "",
"FocalLength": "",
"ISO": "",
"LensID": "",
"CreateDate": str(arrow.get(self.mtime)),
"GPSLatitude": 0,
"GPSLongitude": 0,
2018-07-20 16:45:42 +01:00
}
2017-06-12 15:17:29 +01:00
if not self.is_photo:
return exif
2017-06-12 15:17:29 +01:00
mapping = {
2019-06-25 22:48:04 +01:00
"Model": ["Model"],
"FNumber": ["FNumber", "Aperture"],
"ExposureTime": ["ExposureTime"],
"FocalLength": ["FocalLength"], # ['FocalLengthIn35mmFormat'],
"ISO": ["ISO"],
"LensID": ["LensID", "LensSpec", "Lens"],
"CreateDate": ["CreateDate", "DateTimeOriginal"],
"GPSLatitude": ["GPSLatitude"],
"GPSLongitude": ["GPSLongitude"],
2017-06-12 15:17:29 +01:00
}
for ekey, candidates in mapping.items():
for candidate in candidates:
maybe = self.meta.get(candidate, None)
if not maybe:
continue
else:
exif[ekey] = maybe
break
return struct(exif)
2017-06-12 15:40:30 +01:00
def _maybe_watermark(self, img):
if not self.is_photo:
return img
2019-06-25 22:48:04 +01:00
wmarkfile = settings.paths.get("watermark")
2018-07-20 16:45:42 +01:00
if not os.path.exists(wmarkfile):
2017-05-23 11:14:47 +01:00
return img
with wand.image.Image(filename=wmarkfile) as wmark:
w = self.height * 0.2
h = wmark.height * (w / wmark.width)
2018-07-20 16:45:42 +01:00
if self.width > self.height:
x = self.width - w - (self.width * 0.01)
y = self.height - h - (self.height * 0.01)
2017-05-23 11:14:47 +01:00
else:
2018-07-20 16:45:42 +01:00
x = self.width - h - (self.width * 0.01)
y = self.height - w - (self.height * 0.01)
2017-05-23 11:14:47 +01:00
w = round(w)
h = round(h)
x = round(x)
y = round(y)
wmark.resize(w, h)
2018-07-20 16:45:42 +01:00
if self.width <= self.height:
2017-05-23 11:14:47 +01:00
wmark.rotate(-90)
img.composite(image=wmark, left=x, top=y)
return img
async def downsize(self):
2018-07-20 16:45:42 +01:00
need = False
for size, resized in self.resized_images:
2019-06-25 22:48:04 +01:00
if not resized.exists or settings.args.get("regenerate"):
2018-07-20 16:45:42 +01:00
need = True
break
if not need:
return
2017-05-23 11:14:47 +01:00
with wand.image.Image(filename=self.fpath) as img:
img.auto_orient()
img = self._maybe_watermark(img)
2018-07-20 16:45:42 +01:00
for size, resized in self.resized_images:
2019-06-25 22:48:04 +01:00
if not resized.exists or settings.args.get("regenerate"):
logger.info(
2018-07-20 16:45:42 +01:00
"resizing image: %s to size %d",
os.path.basename(self.fpath),
2019-06-25 22:48:04 +01:00
size,
2018-07-20 16:45:42 +01:00
)
await resized.make(img)
class Resized:
def __init__(self, parent, size, crop=False):
self.parent = parent
self.size = size
self.crop = crop
@property
def data(self):
2019-06-25 22:48:04 +01:00
with open(self.fpath, "rb") as f:
encoded = base64.b64encode(f.read())
return "data:%s;base64,%s" % (
2019-06-25 22:48:04 +01:00
self.parent.mime_type,
encoded.decode("utf-8"),
)
2018-07-20 16:45:42 +01:00
@property
def suffix(self):
2019-06-25 22:48:04 +01:00
return settings.photo.get("sizes").get(self.size, "")
2018-07-20 16:45:42 +01:00
@property
def fname(self):
2019-06-25 22:48:04 +01:00
return "%s%s%s" % (self.parent.fname, self.suffix, self.parent.fext)
2018-07-20 16:45:42 +01:00
@property
def fpath(self):
2019-06-25 22:48:04 +01:00
return os.path.join(self.parent.parent.renderdir, self.fname)
2018-07-20 16:45:42 +01:00
@property
def url(self):
return "%s/%s/%s" % (
2019-06-25 22:48:04 +01:00
settings.site.get("url"),
2018-07-20 16:45:42 +01:00
self.parent.parent.name,
2019-06-25 22:48:04 +01:00
"%s%s%s" % (self.parent.fname, self.suffix, self.parent.fext),
2018-07-20 16:45:42 +01:00
)
2018-07-20 16:45:42 +01:00
@property
def relpath(self):
return "%s/%s" % (
2019-06-25 22:48:04 +01:00
self.parent.parent.renderdir.replace(settings.paths.get("build"), ""),
self.fname,
2018-07-20 16:45:42 +01:00
)
2018-07-20 16:45:42 +01:00
@property
def exists(self):
if os.path.isfile(self.fpath):
if mtime(self.fpath) >= self.parent.mtime:
2018-07-20 16:45:42 +01:00
return True
return False
2018-07-20 16:45:42 +01:00
@property
def width(self):
return self.dimensions[0]
@property
def height(self):
return self.dimensions[1]
@property
def dimensions(self):
width = self.parent.width
height = self.parent.height
size = self.size
ratio = max(width, height) / min(width, height)
horizontal = True if (width / height) >= 1 else False
# panorama: reverse "horizontal" because the limit should be on
# the shorter side, not the longer, and make it a bit smaller, than
# the actual limit
# 2.39 is the wide angle cinematic view: anything wider, than that
# is panorama land
if ratio > 2.4 and not self.crop:
size = int(size * 0.6)
horizontal = not horizontal
2019-06-25 22:48:04 +01:00
if (horizontal and not self.crop) or (not horizontal and self.crop):
2018-07-20 16:45:42 +01:00
w = size
h = int(float(size / width) * height)
else:
h = size
w = int(float(size / height) * width)
return (w, h)
2017-06-12 15:40:30 +01:00
2018-07-20 16:45:42 +01:00
async def make(self, original):
if not os.path.isdir(os.path.dirname(self.fpath)):
os.makedirs(os.path.dirname(self.fpath))
2017-06-12 15:40:30 +01:00
2018-07-20 16:45:42 +01:00
with original.clone() as thumb:
thumb.resize(self.width, self.height)
2018-07-20 16:45:42 +01:00
if self.crop:
thumb.liquid_rescale(self.size, self.size, 1, 1)
2017-10-27 15:56:05 +01:00
2019-06-25 22:48:04 +01:00
if self.parent.meta.get("FileType", "jpeg").lower() == "jpeg":
thumb.compression_quality = 88
2019-06-25 22:48:04 +01:00
thumb.unsharp_mask(radius=1, sigma=0.5, amount=0.7, threshold=0.5)
thumb.format = "pjpeg"
2017-10-27 15:56:05 +01:00
2018-07-20 16:45:42 +01:00
# this is to make sure pjpeg happens
2019-06-25 22:48:04 +01:00
with open(self.fpath, "wb") as f:
logger.info("writing %s", self.fpath)
2018-07-20 16:45:42 +01:00
thumb.save(file=f)
2017-10-27 15:56:05 +01:00
# n, e = os.path.splitext(os.path.basename(self.fpath))
# webppath = self.fpath.replace(e, '.webp')
# with open(webppath, 'wb') as f:
2019-06-25 22:48:04 +01:00
# logger.info("writing %s", webppath)
# thumb.format = 'webp'
# thumb.compression_quality = 88
# thumb.save(file=f)
2017-10-27 15:56:05 +01:00
class PHPFile(object):
@property
def exists(self):
2019-06-25 22:48:04 +01:00
if settings.args.get("force"):
return False
if not os.path.exists(self.renderfile):
return False
if self.mtime > mtime(self.renderfile):
return False
return True
@property
def mtime(self):
2019-06-25 22:48:04 +01:00
return mtime(os.path.join(settings.paths.get("tmpl"), self.templatefile))
@property
def renderfile(self):
2019-06-25 22:48:04 +01:00
raise ValueError("Not implemented")
@property
def templatefile(self):
2019-06-25 22:48:04 +01:00
raise ValueError("Not implemented")
async def render(self):
# if self.exists:
2019-06-25 22:48:04 +01:00
# return
await self._render()
class Search(PHPFile):
def __init__(self):
2019-06-25 22:48:04 +01:00
self.fpath = os.path.join(settings.paths.get("build"), "search.sqlite")
self.db = sqlite3.connect(self.fpath)
2019-06-25 22:48:04 +01:00
self.db.execute("PRAGMA auto_vacuum = INCREMENTAL;")
self.db.execute("PRAGMA journal_mode = MEMORY;")
self.db.execute("PRAGMA temp_store = MEMORY;")
self.db.execute("PRAGMA locking_mode = NORMAL;")
self.db.execute("PRAGMA synchronous = FULL;")
self.db.execute('PRAGMA encoding = "UTF-8";')
2019-06-25 22:48:04 +01:00
self.db.execute(
"""
CREATE VIRTUAL TABLE IF NOT EXISTS data USING fts4(
url,
mtime,
name,
title,
category,
content,
notindexed=category,
notindexed=url,
notindexed=mtime,
tokenize=porter
2019-06-25 22:48:04 +01:00
)"""
)
self.is_changed = False
def __exit__(self):
if self.is_changed:
self.db.commit()
2019-06-25 22:48:04 +01:00
self.db.execute("PRAGMA auto_vacuum;")
self.db.close()
def check(self, name):
ret = 0
2019-06-25 22:48:04 +01:00
maybe = self.db.execute(
"""
SELECT
mtime
FROM
data
WHERE
name = ?
2019-06-25 22:48:04 +01:00
""",
(name,),
).fetchone()
if maybe:
ret = int(maybe[0])
return ret
def append(self, post):
mtime = int(post.published.timestamp)
check = self.check(post.name)
2019-06-25 22:48:04 +01:00
if check and check < mtime:
self.db.execute(
"""
DELETE
FROM
data
WHERE
2019-06-25 22:48:04 +01:00
name=?""",
(post.name,),
)
check = False
if not check:
2019-06-25 22:48:04 +01:00
self.db.execute(
"""
INSERT INTO
data
(url, mtime, name, title, category, content)
VALUES
(?,?,?,?,?,?);
2019-06-25 22:48:04 +01:00
""",
(post.url, mtime, post.name, post.title, post.category, post.content),
)
self.is_changed = True
@property
def templates(self):
2019-06-25 22:48:04 +01:00
return ["Search.j2.php", "OpenSearch.j2.xml"]
async def _render(self):
for template in self.templates:
2019-06-25 22:48:04 +01:00
r = J2.get_template(template).render(
{
"post": {},
"site": settings.site,
"menu": settings.menu,
"meta": settings.meta,
}
)
target = os.path.join(
2019-06-25 22:48:04 +01:00
settings.paths.get("build"), template.replace(".j2", "").lower()
)
writepath(target, r)
class IndexPHP(PHPFile):
def __init__(self):
self.gone = {}
self.redirect = {}
def add_gone(self, uri):
self.gone[uri] = True
def add_redirect(self, source, target):
if target in self.gone:
self.add_gone(source)
else:
2019-06-25 22:48:04 +01:00
if "://" not in target:
target = "%s/%s" % (settings.site.get("url"), target)
self.redirect[source] = target
@property
def renderfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(settings.paths.get("build"), "index.php")
@property
def templatefile(self):
2019-06-25 22:48:04 +01:00
return "404.j2.php"
async def _render(self):
2019-06-25 22:48:04 +01:00
r = J2.get_template(self.templatefile).render(
{
"post": {},
"site": settings.site,
"menu": settings.menu,
"gones": self.gone,
"redirects": self.redirect,
"rewrites": settings.rewrites,
"gone_re": settings.gones
2019-06-25 22:48:04 +01:00
}
)
writepath(self.renderfile, r)
2017-10-27 15:56:05 +01:00
class WebhookPHP(PHPFile):
@property
def renderfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(settings.paths.get("build"), "webhook.php")
@property
def templatefile(self):
2019-06-25 22:48:04 +01:00
return "Webhook.j2.php"
async def _render(self):
2019-06-25 22:48:04 +01:00
r = J2.get_template(self.templatefile).render(
{"author": settings.author, "webmentionio": keys.webmentionio}
)
writepath(self.renderfile, r)
class MicropubPHP(PHPFile):
@property
def renderfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(settings.paths.get("build"), "micropub.php")
@property
def templatefile(self):
2019-06-25 22:48:04 +01:00
return "Micropub.j2.php"
async def _render(self):
2019-06-25 22:48:04 +01:00
r = J2.get_template(self.templatefile).render(
{"site": settings.site, "menu": settings.menu, "paths": settings.paths}
)
writepath(self.renderfile, r)
2018-07-20 16:45:42 +01:00
class Category(dict):
2019-06-25 22:48:04 +01:00
def __init__(self, name=""):
2018-07-20 16:45:42 +01:00
self.name = name
2019-06-25 22:48:04 +01:00
self.trange = "YYYY"
2017-10-27 15:56:05 +01:00
2018-07-20 16:45:42 +01:00
def __setitem__(self, key, value):
if key in self:
raise LookupError(
2019-06-25 22:48:04 +01:00
"key '%s' already exists, colliding posts are: %s vs %s"
% (key, self[key].fpath, value.fpath)
)
2018-07-20 16:45:42 +01:00
dict.__setitem__(self, key, value)
2018-07-20 16:45:42 +01:00
@property
def sortedkeys(self):
return list(sorted(self.keys(), reverse=True))
2017-10-27 15:56:05 +01:00
@property
def is_photos(self):
r = True
for i in self.values():
r = r & i.is_photo
return r
2018-07-20 16:45:42 +01:00
@property
def is_paginated(self):
if self.name in settings.flat:
return False
return True
2018-07-20 16:45:42 +01:00
@property
def title(self):
if len(self.name):
return "%s - %s" % (self.name, settings.site.name)
2018-07-20 16:45:42 +01:00
else:
return settings.site.headline
@property
2018-07-20 16:45:42 +01:00
def url(self):
if len(self.name):
url = "%s/%s/%s/" % (settings.site.url, settings.paths.category, self.name)
2018-07-20 16:45:42 +01:00
else:
2019-06-25 22:48:04 +01:00
url = "%s/" % (settings.site.url)
2018-07-20 16:45:42 +01:00
return url
2017-10-27 15:56:05 +01:00
@property
def feedurl(self):
return "%sfeed/" % (self.url)
@property
2018-07-20 16:45:42 +01:00
def template(self):
return "%s.j2.html" % (self.__class__.__name__)
2017-10-27 15:56:05 +01:00
@property
def dpath(self):
2018-07-20 16:45:42 +01:00
if len(self.name):
return os.path.join(
2019-06-25 22:48:04 +01:00
settings.paths.build, settings.paths.category, self.name
2018-07-20 16:45:42 +01:00
)
else:
return settings.paths.build
@property
def newest_year(self):
return int(self[self.sortedkeys[0]].published.format(self.trange))
@property
def years(self):
years = {}
for k in self.sortedkeys:
y = int(self[k].published.format(self.trange))
if y not in years:
if y == self.newest_year:
url = self.url
else:
url = "%s%d/" % (self.url, y)
2019-06-25 22:48:04 +01:00
years.update({y: url})
return years
@property
def mtime(self):
2019-06-25 22:48:04 +01:00
if len(self.sortedkeys) > 0:
2019-04-09 21:34:03 +01:00
return self[self.sortedkeys[0]].published.timestamp
else:
return 0
def feedpath(self, fname):
2019-06-25 22:48:04 +01:00
return os.path.join(self.dpath, settings.paths.feed, fname)
def get_posts(self, start=0, end=-1):
2019-06-25 22:48:04 +01:00
return [self[k].jsonld for k in self.sortedkeys[start:end]]
def is_uptodate(self, fpath, ts):
2019-06-25 22:48:04 +01:00
if settings.args.get("force"):
return False
if not os.path.exists(fpath):
return False
if mtime(fpath) >= ts:
return True
return False
def newest(self, start=0, end=-1):
if start == end:
end = -1
2019-06-25 22:48:04 +01:00
s = sorted([self[k].dt for k in self.sortedkeys[start:end]], reverse=True)
2019-04-09 21:34:03 +01:00
if len(s) > 0:
2019-06-25 22:48:04 +01:00
return s[0] # Timestamp in seconds since epoch
2019-04-09 21:34:03 +01:00
else:
return 0
2019-01-15 21:28:58 +00:00
@property
def ctmplvars(self):
return {
2019-06-25 22:48:04 +01:00
"name": self.name,
"url": self.url,
"feed": self.feedurl,
"title": self.title,
2019-01-15 21:28:58 +00:00
}
def tmplvars(self, posts=[], year=None):
baseurl = self.url
if year:
2019-06-25 22:48:04 +01:00
baseurl = "%s%s/" % (baseurl, year)
2018-07-20 16:45:42 +01:00
return {
2019-06-25 22:48:04 +01:00
"baseurl": baseurl,
"site": settings.site,
"menu": settings.menu,
"meta": settings.meta,
"category": {
"name": self.name,
"paginated": self.is_paginated,
"url": self.url,
"feed": self.feedurl,
"title": self.title,
"year": year,
"years": self.years,
},
2019-06-25 22:48:04 +01:00
"posts": posts,
"fnames": settings.filenames,
2018-07-20 16:45:42 +01:00
}
def indexfpath(self, subpath=None, fname=settings.filenames.html):
if subpath:
2019-06-25 22:48:04 +01:00
return os.path.join(self.dpath, subpath, fname)
else:
2019-06-25 22:48:04 +01:00
return os.path.join(self.dpath, fname)
async def render_feed(self, xmlformat):
2019-06-25 22:48:04 +01:00
if "json" == xmlformat:
await self.render_json()
return
2019-06-25 22:48:04 +01:00
logger.info('rendering category "%s" %s feed', self.name, xmlformat)
2018-07-20 16:45:42 +01:00
start = 0
end = int(settings.pagination)
2018-07-20 16:45:42 +01:00
fg = FeedGenerator()
fg.id(self.feedurl)
2018-07-20 16:45:42 +01:00
fg.title(self.title)
2019-06-25 22:48:04 +01:00
fg.author({"name": settings.author.name, "email": settings.author.email})
fg.logo("%s/favicon.png" % settings.site.url)
fg.updated(arrow.get(self.mtime).to("utc").datetime)
fg.description(settings.site.headline)
for k in reversed(self.sortedkeys[start:end]):
post = self[k]
2018-07-20 16:45:42 +01:00
fe = fg.add_entry()
fe.id(post.url)
fe.title(post.title)
2019-06-25 22:48:04 +01:00
fe.author({"name": settings.author.name, "email": settings.author.email})
fe.category(
{
"term": post.category,
"label": post.category,
"scheme": "%s/%s/%s/"
% (settings.site.url, settings.paths.category, post.category),
}
)
fe.published(post.published.datetime)
fe.updated(arrow.get(post.dt).datetime)
2019-06-25 22:48:04 +01:00
fe.rights(
"%s %s %s"
% (
post.licence.upper(),
settings.author.name,
post.published.format("YYYY"),
)
)
2019-06-25 22:48:04 +01:00
if xmlformat == "rss":
fe.link(href=post.url)
2019-06-25 22:48:04 +01:00
fe.content(post.html_content, type="CDATA")
if post.is_photo:
fe.enclosure(
post.photo.href,
"%d" % post.photo.mime_size,
post.photo.mime_type,
)
2019-06-25 22:48:04 +01:00
elif xmlformat == "atom":
fe.link(href=post.url, rel="alternate", type="text/html")
fe.content(src=post.url, type="text/html")
fe.summary(post.summary)
2019-06-25 22:48:04 +01:00
if xmlformat == "rss":
fg.link(href=self.feedurl)
writepath(self.feedpath(settings.filenames.rss), fg.rss_str(pretty=True))
2019-06-25 22:48:04 +01:00
elif xmlformat == "atom":
fg.link(href=self.feedurl, rel="self")
fg.link(href=settings.meta.get("hub"), rel="hub")
writepath(self.feedpath(settings.filenames.atom), fg.atom_str(pretty=True))
2017-10-28 19:08:40 +01:00
2019-03-22 15:49:24 +00:00
async def render_json(self):
2019-06-25 22:48:04 +01:00
logger.info('rendering category "%s" JSON feed', self.name)
2019-03-22 15:49:24 +00:00
js = {
"version": "https://jsonfeed.org/version/1",
"title": self.title,
"home_page_url": settings.site.url,
"feed_url": "%s%s" % (self.url, settings.filenames.json),
2019-03-22 15:49:24 +00:00
"author": {
"name": settings.author.name,
"url": settings.author.url,
"avatar": settings.author.image,
},
2019-06-25 22:48:04 +01:00
"items": [],
2019-03-22 15:49:24 +00:00
}
2019-06-25 22:48:04 +01:00
for k in reversed(self.sortedkeys[0 : int(settings.pagination)]):
2019-03-22 15:49:24 +00:00
post = self[k]
pjs = {
"id": post.url,
"content_text": post.txt_content,
"content_html": post.html_content,
"url": post.url,
"date_published": str(post.published),
}
if len(post.summary):
pjs.update({"summary": post.txt_summary})
if post.is_photo:
2019-06-25 22:48:04 +01:00
pjs.update(
{
"attachment": {
"url": post.photo.href,
"mime_type": post.photo.mime_type,
"size_in_bytes": "%d" % post.photo.mime_size,
}
}
)
2019-03-22 15:49:24 +00:00
js["items"].append(pjs)
writepath(
self.feedpath(settings.filenames.json),
2019-06-25 22:48:04 +01:00
json.dumps(js, indent=4, ensure_ascii=False),
2019-03-22 15:49:24 +00:00
)
async def render_flat(self):
2019-06-25 22:48:04 +01:00
logger.info("rendering flat archive for %s", self.name)
r = J2.get_template(self.template).render(self.tmplvars(self.get_posts()))
writepath(self.indexfpath(), r)
2017-05-23 11:14:47 +01:00
async def render_gopher(self):
2019-06-25 22:48:04 +01:00
lines = ["%s - %s" % (self.name, settings.site.name), "", ""]
for post in self.get_posts():
line = "0%s\t/%s/%s\t%s\t70" % (
post.headline,
post.name,
settings.filenames.txt,
2019-06-25 22:48:04 +01:00
settings.site.name,
)
lines.append(line)
2019-06-25 22:48:04 +01:00
if len(post.description):
2019-03-22 15:49:24 +00:00
lines.extend(str(PandocHTML2TXT(post.description)).split("\n"))
2019-06-25 22:48:04 +01:00
if isinstance(post["image"], list):
for img in post["image"]:
line = "I%s\t/%s/%s\t%s\t70" % (
img.headline,
post.name,
img.name,
2019-06-25 22:48:04 +01:00
settings.site.name,
)
lines.append(line)
2019-06-25 22:48:04 +01:00
lines.append("")
writepath(self.indexfpath(fname=settings.filenames.gopher), "\r\n".join(lines))
async def render_archives(self):
for year in self.years.keys():
if year == self.newest_year:
fpath = self.indexfpath()
tyear = None
else:
fpath = self.indexfpath("%d" % (year))
tyear = year
2019-06-25 22:48:04 +01:00
y = arrow.get("%d" % year, self.trange).to("utc")
tsmin = y.floor("year").timestamp
tsmax = y.ceil("year").timestamp
start = len(self.sortedkeys)
end = 0
for index, value in enumerate(self.sortedkeys):
if value <= tsmax and index < start:
start = index
if value >= tsmin and index > end:
end = index
if self.is_uptodate(fpath, self[self.sortedkeys[start]].dt):
logger.info("%s / %d is up to date", self.name, year)
else:
logger.info("updating %s / %d", self.name, year)
logger.info("getting posts from %d to %d", start, end)
r = J2.get_template(self.template).render(
self.tmplvars(
# I don't know why end needs the +1, but without that
# some posts disappear
# TODO figure this out...
self.get_posts(start, end + 1),
2019-06-25 22:48:04 +01:00
tyear,
)
)
writepath(fpath, r)
2019-01-15 21:28:58 +00:00
async def render_feeds(self):
m = {
2019-06-25 22:48:04 +01:00
"rss": self.feedpath(settings.filenames.rss),
"atom": self.feedpath(settings.filenames.atom),
"json": self.feedpath(settings.filenames.json),
}
for ft, path in m.items():
if not self.is_uptodate(path, self.newest()):
2019-06-25 22:48:04 +01:00
logger.info("%s outdated, generating new", ft)
await self.render_feed(ft)
2019-03-22 15:49:24 +00:00
2019-01-15 21:28:58 +00:00
async def render(self):
await self.render_feeds()
if not self.is_uptodate(self.indexfpath(), self.newest()):
await self.render_gopher()
if not self.is_paginated:
2019-01-15 21:28:58 +00:00
if not self.is_uptodate(self.indexfpath(), self.newest()):
await self.render_flat()
else:
await self.render_archives()
class Sitemap(dict):
@property
def mtime(self):
r = 0
if os.path.exists(self.renderfile):
r = mtime(self.renderfile)
return r
def append(self, post):
self[post.url] = post.mtime
@property
def renderfile(self):
2019-06-25 22:48:04 +01:00
return os.path.join(settings.paths.get("build"), settings.filenames.sitemap)
async def render(self):
2019-04-09 21:34:03 +01:00
if len(self) > 0:
if self.mtime >= sorted(self.values())[-1]:
return
2019-06-25 22:48:04 +01:00
with open(self.renderfile, "wt") as f:
2019-04-09 21:34:03 +01:00
f.write("\n".join(sorted(self.keys())))
class WebmentionIO(object):
def __init__(self):
self.params = {
2019-06-25 22:48:04 +01:00
"token": "%s" % (keys.webmentionio.get("token")),
"since": "%s" % str(self.since),
"domain": "%s" % (keys.webmentionio.get("domain")),
}
2019-06-25 22:48:04 +01:00
self.url = "https://webmention.io/api/mentions"
@property
def since(self):
newest = 0
2019-06-25 22:48:04 +01:00
content = settings.paths.get("content")
for e in glob.glob(os.path.join(content, "*", "*", "*.md")):
if os.path.basename(e) == settings.filenames.md:
continue
# filenames are like [received epoch]-[slugified source url].md
try:
2019-06-25 22:48:04 +01:00
mtime = int(os.path.basename(e).split("-")[0])
except Exception as exc:
2019-06-25 22:48:04 +01:00
logger.error("int conversation failed: %s, file was: %s", exc, e)
continue
if mtime > newest:
newest = mtime
return arrow.get(newest + 1)
def makecomment(self, webmention):
2019-06-25 22:48:04 +01:00
if "published_ts" in webmention.get("data"):
maybe = webmention.get("data").get("published")
if not maybe or maybe == "None":
dt = arrow.get(webmention.get("verified_date"))
else:
2019-06-25 22:48:04 +01:00
dt = arrow.get(webmention.get("data").get("published"))
2019-06-25 22:48:04 +01:00
slug = os.path.split(urlparse(webmention.get("target")).path.lstrip("/"))[0]
# ignore selfpings
2019-06-25 22:48:04 +01:00
if slug == settings.site.get("name"):
return
2019-06-25 22:48:04 +01:00
fdir = glob.glob(os.path.join(settings.paths.get("content"), "*", slug))
if not len(fdir):
2019-06-25 22:48:04 +01:00
logger.error("couldn't find post for incoming webmention: %s", webmention)
return
elif len(fdir) > 1:
2019-06-25 22:48:04 +01:00
logger.error("multiple posts found for incoming webmention: %s", webmention)
return
fdir = fdir.pop()
fpath = os.path.join(
2019-06-25 22:48:04 +01:00
fdir, "%d-%s.md" % (dt.timestamp, url2slug(webmention.get("source")))
)
2019-06-25 22:48:04 +01:00
author = webmention.get("data", {}).get("author", None)
2019-01-15 21:28:58 +00:00
if not author:
2019-06-25 22:48:04 +01:00
logger.error("missing author info on webmention; skipping")
2019-01-15 21:28:58 +00:00
return
meta = {
2019-06-25 22:48:04 +01:00
"author": {
"name": author.get("name", ""),
"url": author.get("url", ""),
"photo": author.get("photo", ""),
},
2019-06-25 22:48:04 +01:00
"date": str(dt),
"source": webmention.get("source"),
"target": webmention.get("target"),
"type": webmention.get("activity").get("type", "webmention"),
}
try:
2019-06-25 22:48:04 +01:00
txt = webmention.get("data").get("content", "").strip()
except Exception as e:
2019-06-25 22:48:04 +01:00
txt = ""
pass
2019-06-25 22:48:04 +01:00
r = "---\n%s\n---\n\n%s\n" % (utfyamldump(meta), txt)
writepath(fpath, r)
def run(self):
webmentions = requests.get(self.url, params=self.params)
logger.info("queried webmention.io with: %s", webmentions.url)
if webmentions.status_code != requests.codes.ok:
return
try:
mentions = webmentions.json()
2019-06-25 22:48:04 +01:00
for webmention in mentions.get("links"):
self.makecomment(webmention)
except ValueError as e:
2019-06-25 22:48:04 +01:00
logger.error("failed to query webmention.io: %s", e)
pass
2019-03-22 15:49:24 +00:00
# class GranaryIO(dict):
2019-06-25 22:48:04 +01:00
# granary = 'https://granary.io/url'
# convert_to = ['as2', 'mf2-json', 'jsonfeed']
# def __init__(self, source):
# self.source = source
# def run(self):
# for c in self.convert_to:
# p = {
# 'url': self.source,
# 'input': html,
# 'output': c
# }
# r = requests.get(self.granary, params=p)
# logger.info("queried granary.io for %s for url: %s", c, self.source)
# if r.status_code != requests.codes.ok:
# continue
# try:
# self[c] = webmentions.text
# except ValueError as e:
# logger.error('failed to query granary.io: %s', e)
# pass
2019-03-22 15:49:24 +00:00
def dat():
for url in settings.site.sameAs:
if "dat://" in url:
2019-06-25 22:48:04 +01:00
p = os.path.join(settings.paths.build, ".well-known")
if not os.path.isdir(p):
os.makedirs(p)
2019-06-25 22:48:04 +01:00
p = os.path.join(settings.paths.build, ".well-known", "dat")
if not os.path.exists(p) or settings.args.get("force"):
writepath(p, "%s\nTTL=3600" % (url))
2019-03-22 15:49:24 +00:00
def make():
start = int(round(time.time() * 1000))
last = 0
# this needs to be before collecting the 'content' itself
2019-06-25 22:48:04 +01:00
if not settings.args.get("offline") and not settings.args.get("noservices"):
incoming = WebmentionIO()
incoming.run()
queue = AQ()
send = []
firsttimepublished = []
2019-06-25 22:48:04 +01:00
content = settings.paths.get("content")
rules = IndexPHP()
micropub = MicropubPHP()
queue.put(micropub.render())
webhook = WebhookPHP()
queue.put(webhook.render())
sitemap = Sitemap()
search = Search()
2018-07-20 16:45:42 +01:00
categories = {}
frontposts = Category()
2019-06-25 22:48:04 +01:00
home = Home(settings.paths.get("home"))
2019-06-25 22:48:04 +01:00
for e in sorted(glob.glob(os.path.join(content, "*", "*", settings.filenames.md))):
2018-07-20 16:45:42 +01:00
post = Singular(e)
# deal with images, if needed
2018-07-20 16:45:42 +01:00
for i in post.images.values():
queue.put(i.downsize())
if not post.is_future:
for i in post.to_ping:
send.append(i)
# render and arbitrary file copy tasks for this very post
queue.put(post.render())
queue.put(post.copy_files())
# skip draft posts from anything further
if post.is_future:
2019-06-25 22:48:04 +01:00
logger.info("%s is for the future", post.name)
continue
elif not os.path.exists(post.renderfile):
2019-06-25 22:48:04 +01:00
logger.debug("%s seems to be fist time published", post.name)
firsttimepublished.append(post)
# add post to search database
search.append(post)
# start populating sitemap
sitemap.append(post)
# populate redirects, if any
rules.add_redirect(post.shortslug, post.url)
# any category starting with '_' are special: they shouldn't have a
# category archive page
if post.is_page:
continue
# populate the category with the post
if post.category not in categories:
categories[post.category] = Category(post.category)
categories[post.category][post.published.timestamp] = post
# add to front, if allowed
if post.is_front:
frontposts[post.published.timestamp] = post
2018-07-20 16:45:42 +01:00
# commit to search database - this saves quite a few disk writes
2018-07-22 11:33:59 +01:00
search.__exit__()
# render search and sitemap
queue.put(search.render())
queue.put(sitemap.render())
# make gone and redirect arrays for PHP
2019-06-25 22:48:04 +01:00
for e in glob.glob(os.path.join(content, "*", "*.del")):
post = Gone(e)
rules.add_gone(post.source)
2019-06-25 22:48:04 +01:00
for e in glob.glob(os.path.join(content, "*", "*.url")):
post = Redirect(e)
rules.add_redirect(post.source, post.target)
# render 404 fallback PHP
queue.put(rules.render())
# render categories
2018-07-20 16:45:42 +01:00
for category in categories.values():
2019-01-15 21:28:58 +00:00
home.add(category, category.get(category.sortedkeys[0]))
queue.put(category.render())
2019-01-15 21:28:58 +00:00
queue.put(frontposts.render_feeds())
queue.put(home.render())
# actually run all the render & copy tasks
queue.run()
# copy static files
2019-06-25 22:48:04 +01:00
for e in glob.glob(os.path.join(content, "*.*")):
if e.endswith(".md"):
2019-01-15 21:28:58 +00:00
continue
2019-06-25 22:48:04 +01:00
t = os.path.join(settings.paths.get("build"), os.path.basename(e))
if os.path.exists(t) and mtime(e) <= mtime(t):
2018-07-20 16:45:42 +01:00
continue
cp(e, t)
2018-07-20 16:45:42 +01:00
end = int(round(time.time() * 1000))
2019-06-25 22:48:04 +01:00
logger.info("process took %d ms" % (end - start))
2019-06-25 22:48:04 +01:00
if not settings.args.get("offline"):
# upload site
try:
2019-06-25 22:48:04 +01:00
logger.info("starting syncing")
os.system(
2019-06-25 22:48:04 +01:00
"rsync -avuhH --delete-after %s/ %s/"
% (
settings.paths.get("build"),
"%s/%s" % (settings.syncserver, settings.paths.get("remotewww")),
)
)
2019-06-25 22:48:04 +01:00
logger.info("syncing finished")
except Exception as e:
2019-06-25 22:48:04 +01:00
logger.error("syncing failed: %s", e)
2019-06-25 22:48:04 +01:00
if not settings.args.get("offline") and not settings.args.get("noservices"):
logger.info("sending webmentions")
for wm in send:
queue.put(wm.send())
queue.run()
2019-06-25 22:48:04 +01:00
logger.info("sending webmentions finished")
for post in firsttimepublished:
2019-06-25 22:43:45 +01:00
queue.put(post.save_memento())
queue.put(post.save_to_archiveorg())
queue.run()
2019-06-25 22:48:04 +01:00
if __name__ == "__main__":
2018-07-20 16:45:42 +01:00
make()