2017-05-23 11:14:47 +01:00
|
|
|
#!/usr/bin/env python3
|
2017-12-17 17:37:32 +00:00
|
|
|
|
|
|
|
__author__ = "Peter Molnar"
|
2019-01-05 11:55:40 +00:00
|
|
|
__copyright__ = "Copyright 2017-2019, Peter Molnar"
|
2018-12-03 10:36:10 +00:00
|
|
|
__license__ = "apache-2.0"
|
2017-12-17 17:37:32 +00:00
|
|
|
__maintainer__ = "Peter Molnar"
|
2018-04-30 20:44:04 +01:00
|
|
|
__email__ = "mail@petermolnar.net"
|
2017-05-23 11:14:47 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
import glob
|
2017-05-23 11:14:47 +01:00
|
|
|
import os
|
2018-07-20 16:45:42 +01:00
|
|
|
import time
|
2017-05-23 11:14:47 +01:00
|
|
|
import re
|
2018-07-20 16:45:42 +01:00
|
|
|
import imghdr
|
2017-05-23 11:14:47 +01:00
|
|
|
import asyncio
|
2018-07-22 11:33:59 +01:00
|
|
|
import sqlite3
|
2018-07-25 13:24:31 +01:00
|
|
|
import json
|
2018-12-27 19:48:06 +00:00
|
|
|
import queue
|
2019-02-16 00:14:12 +00:00
|
|
|
import base64
|
2018-07-20 16:45:42 +01:00
|
|
|
from shutil import copy2 as cp
|
2017-10-27 10:29:33 +01:00
|
|
|
from math import ceil
|
2018-07-20 16:45:42 +01:00
|
|
|
from urllib.parse import urlparse
|
|
|
|
from collections import OrderedDict, namedtuple
|
2018-11-04 23:27:53 +00:00
|
|
|
import logging
|
2019-02-25 22:40:01 +00:00
|
|
|
import csv
|
2019-02-16 00:14:12 +00:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
import arrow
|
2017-05-23 11:14:47 +01:00
|
|
|
import langdetect
|
2017-10-27 10:29:33 +01:00
|
|
|
import wand.image
|
2018-07-20 16:45:42 +01:00
|
|
|
import jinja2
|
2019-01-05 11:55:40 +00:00
|
|
|
import yaml
|
2019-01-15 21:28:58 +00:00
|
|
|
import frontmatter
|
2017-11-30 17:01:14 +00:00
|
|
|
from feedgen.feed import FeedGenerator
|
2018-07-25 13:24:31 +01:00
|
|
|
from slugify import slugify
|
2018-07-22 17:59:26 +01:00
|
|
|
import requests
|
2019-02-25 22:40:01 +00:00
|
|
|
import lxml.etree as etree
|
2019-02-16 00:14:12 +00:00
|
|
|
|
2019-03-22 15:49:24 +00:00
|
|
|
from pandoc import PandocMD2HTML, PandocMD2TXT, PandocHTML2TXT
|
2019-02-08 23:32:52 +00:00
|
|
|
from meta import Exif
|
2018-07-20 16:45:42 +01:00
|
|
|
import settings
|
2019-02-16 00:14:12 +00:00
|
|
|
from settings import struct
|
2018-07-22 17:59:26 +01:00
|
|
|
import keys
|
2018-07-20 16:45:42 +01:00
|
|
|
|
2018-11-04 23:27:53 +00:00
|
|
|
logger = logging.getLogger('NASG')
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
CATEGORY = 'category'
|
|
|
|
MDFILE = 'index.md'
|
|
|
|
TXTFILE = 'index.txt'
|
|
|
|
HTMLFILE = 'index.html'
|
|
|
|
GOPHERFILE = 'gophermap'
|
2019-03-22 15:49:24 +00:00
|
|
|
ATOMFILE = 'atom.xml'
|
|
|
|
RSSFILE = 'index.xml'
|
|
|
|
JSONFEEDFILE = 'index.json'
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
MarkdownImage = namedtuple(
|
|
|
|
'MarkdownImage',
|
|
|
|
['match', 'alt', 'fname', 'title', 'css']
|
|
|
|
)
|
|
|
|
|
|
|
|
J2 = jinja2.Environment(
|
|
|
|
loader=jinja2.FileSystemLoader(searchpath=settings.paths.get('tmpl')),
|
|
|
|
lstrip_blocks=True,
|
|
|
|
trim_blocks=True
|
|
|
|
)
|
|
|
|
|
|
|
|
RE_MDIMG = re.compile(
|
|
|
|
r'(?P<match>!\[(?P<alt>[^\]]+)?\]\((?P<fname>[^\s]+)'
|
|
|
|
r'(?:\s[\'\"](?P<title>[^\"\']+)[\'\"])?\)(?:{(?P<css>[^\}]+)\})?)',
|
|
|
|
re.IGNORECASE
|
|
|
|
)
|
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
RE_CODE = re.compile(
|
2019-02-16 00:14:12 +00:00
|
|
|
r'^(?:[~`]{3,4}).+$',
|
2018-08-08 09:42:42 +01:00
|
|
|
re.MULTILINE
|
2018-07-20 16:45:42 +01:00
|
|
|
)
|
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
RE_PRECODE = re.compile(
|
|
|
|
r'<pre class="([^"]+)"><code>'
|
2018-07-20 16:45:42 +01:00
|
|
|
)
|
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
def mtime(path):
|
2019-02-17 20:17:35 +00:00
|
|
|
""" return seconds level mtime or 0 (chomp microsecs) """
|
2019-02-16 00:14:12 +00:00
|
|
|
if os.path.exists(path):
|
|
|
|
return int(os.path.getmtime(path))
|
|
|
|
return 0
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-01-05 11:55:40 +00:00
|
|
|
def utfyamldump(data):
|
2019-02-17 20:17:35 +00:00
|
|
|
""" dump YAML with actual UTF-8 chars """
|
2019-01-05 11:55:40 +00:00
|
|
|
return yaml.dump(
|
|
|
|
data,
|
|
|
|
default_flow_style=False,
|
|
|
|
indent=4,
|
|
|
|
allow_unicode=True
|
|
|
|
)
|
2018-11-19 16:16:52 +00:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
def url2slug(url, limit=200):
|
2019-02-17 20:17:35 +00:00
|
|
|
""" convert URL to max 200 char ASCII string """
|
2018-11-10 20:49:13 +00:00
|
|
|
return slugify(
|
|
|
|
re.sub(r"^https?://(?:www)?", "", url),
|
|
|
|
only_ascii=True,
|
|
|
|
lower=True
|
|
|
|
)[:limit]
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
J2.filters['url2slug'] = url2slug
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
def rfc3339todt(rfc3339):
|
2019-02-17 20:17:35 +00:00
|
|
|
""" nice dates for humans """
|
2019-02-08 23:32:52 +00:00
|
|
|
t = arrow.get(rfc3339).format('YYYY-MM-DD HH:mm ZZZ')
|
|
|
|
return "%s" % (t)
|
2019-02-07 19:27:15 +00:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
J2.filters['printdate'] = rfc3339todt
|
2019-02-16 00:14:12 +00:00
|
|
|
|
2019-03-22 15:49:24 +00:00
|
|
|
|
|
|
|
def extractlicense(url):
|
|
|
|
""" extract license name """
|
|
|
|
n, e = os.path.splitext(os.path.basename(url))
|
|
|
|
return n.upper()
|
|
|
|
|
|
|
|
J2.filters['extractlicense'] = extractlicense
|
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
RE_MYURL = re.compile(
|
|
|
|
r'(^(%s[^"]+)$|"(%s[^"]+)")' % (
|
|
|
|
settings.site.url,
|
|
|
|
settings.site.url
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
def relurl(text, baseurl=None):
|
|
|
|
if not baseurl:
|
|
|
|
baseurl = settings.site.url
|
|
|
|
for match, standalone, href in RE_MYURL.findall(text):
|
|
|
|
needsquotes = False
|
|
|
|
if len(href):
|
|
|
|
needsquotes = True
|
|
|
|
url = href
|
|
|
|
else:
|
|
|
|
url = standalone
|
|
|
|
|
|
|
|
r = os.path.relpath(url, baseurl)
|
|
|
|
if url.endswith('/') and not r.endswith('/'):
|
2019-02-25 22:40:01 +00:00
|
|
|
r = "%s/%s" % (r, HTMLFILE)
|
2019-02-16 00:14:12 +00:00
|
|
|
if needsquotes:
|
|
|
|
r = '"%s"' % r
|
|
|
|
logger.debug("RELURL: %s => %s (base: %s)", match, r, baseurl)
|
|
|
|
text = text.replace(match, r)
|
|
|
|
return text
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
J2.filters['relurl'] = relurl
|
2018-11-19 16:16:52 +00:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2018-11-04 23:27:53 +00:00
|
|
|
def writepath(fpath, content, mtime=0):
|
2019-02-17 20:17:35 +00:00
|
|
|
""" f.write with extras """
|
2018-11-04 23:27:53 +00:00
|
|
|
d = os.path.dirname(fpath)
|
|
|
|
if not os.path.isdir(d):
|
|
|
|
logger.debug('creating directory tree %s', d)
|
|
|
|
os.makedirs(d)
|
2018-11-10 20:49:13 +00:00
|
|
|
if isinstance(content, str):
|
|
|
|
mode = 'wt'
|
|
|
|
else:
|
|
|
|
mode = 'wb'
|
|
|
|
with open(fpath, mode) as f:
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info('writing file %s', fpath)
|
|
|
|
f.write(content)
|
|
|
|
|
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
class cached_property(object):
|
2018-08-08 09:42:42 +01:00
|
|
|
""" extermely simple cached_property decorator:
|
|
|
|
whenever something is called as @cached_property, on first run, the
|
|
|
|
result is calculated, then the class method is overwritten to be
|
|
|
|
a property, contaning the result from the method
|
|
|
|
"""
|
2018-11-10 20:49:13 +00:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
def __init__(self, method, name=None):
|
|
|
|
self.method = method
|
|
|
|
self.name = name or method.__name__
|
2018-11-03 09:48:37 +00:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
def __get__(self, inst, cls):
|
|
|
|
if inst is None:
|
|
|
|
return self
|
|
|
|
result = self.method(inst)
|
|
|
|
setattr(inst, self.name, result)
|
|
|
|
return result
|
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
class AQ:
|
2019-02-16 00:14:12 +00:00
|
|
|
""" Async queue which starts execution right on population """
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
def __init__(self):
|
|
|
|
self.loop = asyncio.get_event_loop()
|
|
|
|
self.queue = asyncio.Queue(loop=self.loop)
|
|
|
|
|
|
|
|
def put(self, task):
|
|
|
|
self.queue.put(asyncio.ensure_future(task))
|
|
|
|
|
|
|
|
async def consume(self):
|
|
|
|
while not self.queue.empty():
|
|
|
|
item = await self.queue.get()
|
|
|
|
self.queue.task_done()
|
2019-02-25 22:40:01 +00:00
|
|
|
# asyncio.gather() ?
|
2018-12-27 19:48:06 +00:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
consumer = asyncio.ensure_future(self.consume())
|
|
|
|
self.loop.run_until_complete(consumer)
|
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
|
2018-08-15 11:02:59 +01:00
|
|
|
class Webmention(object):
|
2019-02-16 00:14:12 +00:00
|
|
|
""" outgoing webmention class """
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-02-03 16:40:34 +00:00
|
|
|
def __init__(self, source, target, dpath, mtime=0):
|
|
|
|
self.source = source
|
|
|
|
self.target = target
|
2019-02-07 19:27:15 +00:00
|
|
|
self.dpath = dpath
|
2019-02-03 16:40:34 +00:00
|
|
|
if not mtime:
|
|
|
|
mtime = arrow.utcnow().timestamp
|
|
|
|
self.mtime = mtime
|
2018-08-15 11:02:59 +01:00
|
|
|
|
|
|
|
@property
|
|
|
|
def fpath(self):
|
|
|
|
return os.path.join(
|
2018-12-27 19:48:06 +00:00
|
|
|
self.dpath,
|
|
|
|
'%s.ping' % (
|
|
|
|
url2slug(self.target, 200)
|
2018-08-15 11:02:59 +01:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def exists(self):
|
|
|
|
if not os.path.isfile(self.fpath):
|
|
|
|
return False
|
2019-02-16 00:14:12 +00:00
|
|
|
elif mtime(self.fpath) > self.mtime:
|
2018-08-15 11:02:59 +01:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
def save(self, content):
|
2018-11-04 23:27:53 +00:00
|
|
|
writepath(self.fpath, content)
|
2018-08-15 11:02:59 +01:00
|
|
|
|
2018-11-19 14:36:06 +00:00
|
|
|
async def send(self):
|
2018-08-15 11:02:59 +01:00
|
|
|
if self.exists:
|
|
|
|
return
|
|
|
|
telegraph_url = 'https://telegraph.p3k.io/webmention'
|
|
|
|
telegraph_params = {
|
|
|
|
'token': '%s' % (keys.telegraph.get('token')),
|
|
|
|
'source': '%s' % (self.source),
|
|
|
|
'target': '%s' % (self.target)
|
|
|
|
}
|
|
|
|
r = requests.post(telegraph_url, data=telegraph_params)
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info(
|
2018-08-15 11:02:59 +01:00
|
|
|
"sent webmention to telegraph from %s to %s",
|
|
|
|
self.source,
|
|
|
|
self.target
|
|
|
|
)
|
|
|
|
if r.status_code not in [200, 201, 202]:
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.error('sending failed: %s %s', r.status_code, r.text)
|
2018-08-15 11:02:59 +01:00
|
|
|
else:
|
2018-11-10 20:49:13 +00:00
|
|
|
self.save(r.text)
|
2018-08-15 11:02:59 +01:00
|
|
|
|
2018-07-22 14:52:32 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
class MarkdownDoc(object):
|
2019-02-16 00:14:12 +00:00
|
|
|
""" Base class for anything that is stored as .md """
|
2019-01-15 21:28:58 +00:00
|
|
|
@property
|
|
|
|
def mtime(self):
|
2019-02-16 00:14:12 +00:00
|
|
|
return mtime(self.fpath)
|
2019-01-15 21:28:58 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def dt(self):
|
|
|
|
maybe = self.mtime
|
|
|
|
for key in ['published', 'date']:
|
|
|
|
t = self.meta.get(key, None)
|
|
|
|
if t and 'null' != t:
|
|
|
|
try:
|
|
|
|
t = arrow.get(t)
|
|
|
|
if t.timestamp > maybe:
|
|
|
|
maybe = t.timestamp
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(
|
|
|
|
'failed to parse date: %s for key %s in %s',
|
|
|
|
t,
|
|
|
|
key,
|
|
|
|
self.fpath
|
|
|
|
)
|
|
|
|
return maybe
|
2019-01-05 11:55:40 +00:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def _parsed(self):
|
2019-01-15 21:28:58 +00:00
|
|
|
with open(self.fpath, mode='rt') as f:
|
|
|
|
logger.debug('parsing YAML+MD file %s', self.fpath)
|
|
|
|
meta, txt = frontmatter.parse(f.read())
|
|
|
|
return(meta, txt)
|
|
|
|
|
2019-03-22 15:49:24 +00:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def meta(self):
|
|
|
|
return self._parsed[0]
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2019-03-22 15:49:24 +00:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def content(self):
|
|
|
|
return self._parsed[1]
|
2017-06-12 15:40:30 +01:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def html_content(self):
|
|
|
|
c = "%s" % (self.content)
|
2019-03-22 15:49:24 +00:00
|
|
|
if not len(c):
|
|
|
|
return c
|
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
if hasattr(self, 'images') and len(self.images):
|
|
|
|
for match, img in self.images.items():
|
2018-09-04 21:58:25 +01:00
|
|
|
c = c.replace(match, str(img))
|
2019-03-22 15:49:24 +00:00
|
|
|
c = str(PandocMD2HTML(c))
|
|
|
|
c = RE_PRECODE.sub(
|
|
|
|
'<pre><code lang="\g<1>" class="language-\g<1>">',
|
|
|
|
c
|
|
|
|
)
|
|
|
|
return c
|
2017-10-27 10:29:33 +01:00
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
class Comment(MarkdownDoc):
|
|
|
|
def __init__(self, fpath):
|
|
|
|
self.fpath = fpath
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def dt(self):
|
|
|
|
maybe = self.meta.get('date')
|
2019-01-05 11:55:40 +00:00
|
|
|
if maybe and 'null' != maybe:
|
2018-07-20 16:45:42 +01:00
|
|
|
dt = arrow.get(maybe)
|
|
|
|
else:
|
2019-02-16 00:14:12 +00:00
|
|
|
dt = arrow.get(mtime(self.fpath))
|
2018-07-20 16:45:42 +01:00
|
|
|
return dt
|
2017-06-04 11:38:36 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2019-02-08 23:32:52 +00:00
|
|
|
def targetname(self):
|
2018-07-20 16:45:42 +01:00
|
|
|
t = urlparse(self.meta.get('target'))
|
2019-02-25 22:40:01 +00:00
|
|
|
return os.path.split(t.path.lstrip('/'))[0]
|
|
|
|
#t = urlparse(self.meta.get('target'))
|
|
|
|
#return t.path.rstrip('/').strip('/').split('/')[-1]
|
2017-06-04 11:38:36 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def source(self):
|
|
|
|
return self.meta.get('source')
|
2017-10-27 10:29:33 +01:00
|
|
|
|
2018-03-28 15:19:14 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def author(self):
|
|
|
|
r = {
|
2019-02-08 23:32:52 +00:00
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "Person",
|
2018-07-20 16:45:42 +01:00
|
|
|
'name': urlparse(self.source).hostname,
|
|
|
|
'url': self.source
|
|
|
|
}
|
|
|
|
author = self.meta.get('author')
|
|
|
|
if not author:
|
|
|
|
return r
|
|
|
|
if 'name' in author:
|
|
|
|
r.update({
|
|
|
|
'name': self.meta.get('author').get('name')
|
|
|
|
})
|
|
|
|
elif 'url' in author:
|
|
|
|
r.update({
|
|
|
|
'name': urlparse(self.meta.get('author').get('url')).hostname
|
|
|
|
})
|
|
|
|
return r
|
2018-03-28 15:19:14 +01:00
|
|
|
|
2017-10-28 19:08:40 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def type(self):
|
|
|
|
return self.meta.get('type', 'webmention')
|
2019-02-25 22:40:01 +00:00
|
|
|
# if len(self.content):
|
|
|
|
#maybe = clean(self.content, strip=True)
|
|
|
|
# if maybe in UNICODE_EMOJI:
|
|
|
|
# return maybe
|
2018-03-28 15:19:14 +01:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
@cached_property
|
|
|
|
def jsonld(self):
|
|
|
|
r = {
|
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "Comment",
|
2019-02-08 23:32:52 +00:00
|
|
|
"author": self.author,
|
2019-02-07 19:27:15 +00:00
|
|
|
"url": self.source,
|
|
|
|
"discussionUrl": self.meta.get('target'),
|
|
|
|
"datePublished": str(self.dt),
|
|
|
|
"disambiguatingDescription": self.type
|
2018-03-28 15:19:14 +01:00
|
|
|
}
|
2019-02-07 19:27:15 +00:00
|
|
|
return r
|
2018-03-28 15:19:14 +01:00
|
|
|
|
2017-11-30 17:01:14 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
class Gone(object):
|
|
|
|
"""
|
|
|
|
Gone object for delete entries
|
|
|
|
"""
|
2018-04-30 20:44:04 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
def __init__(self, fpath):
|
|
|
|
self.fpath = fpath
|
2019-02-16 00:14:12 +00:00
|
|
|
self.mtime = mtime(fpath)
|
2017-11-30 17:01:14 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
2018-07-22 14:52:32 +01:00
|
|
|
def source(self):
|
|
|
|
source, fext = os.path.splitext(os.path.basename(self.fpath))
|
|
|
|
return source
|
2017-11-30 17:01:14 +00:00
|
|
|
|
2018-04-30 20:44:04 +01:00
|
|
|
|
2018-07-25 13:24:31 +01:00
|
|
|
class Redirect(Gone):
|
2018-07-20 16:45:42 +01:00
|
|
|
"""
|
|
|
|
Redirect object for entries that moved
|
|
|
|
"""
|
2017-11-30 17:01:14 +00:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def target(self):
|
|
|
|
target = ''
|
|
|
|
with open(self.fpath, 'rt') as f:
|
|
|
|
target = f.read().strip()
|
|
|
|
return target
|
2017-05-23 11:14:47 +01:00
|
|
|
|
2017-07-17 14:21:28 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
class Singular(MarkdownDoc):
|
|
|
|
"""
|
|
|
|
A Singular object: a complete representation of a post, including
|
|
|
|
all it's comments, files, images, etc
|
|
|
|
"""
|
2018-07-22 14:52:32 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
def __init__(self, fpath):
|
|
|
|
self.fpath = fpath
|
2018-07-20 16:45:42 +01:00
|
|
|
n = os.path.dirname(fpath)
|
|
|
|
self.name = os.path.basename(n)
|
|
|
|
self.category = os.path.basename(os.path.dirname(n))
|
2018-07-22 17:59:26 +01:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def files(self):
|
|
|
|
"""
|
|
|
|
An array of files present at the same directory level as
|
|
|
|
the Singular object, excluding hidden (starting with .) and markdown
|
|
|
|
(ending with .md) files
|
|
|
|
"""
|
|
|
|
return [
|
|
|
|
k
|
|
|
|
for k in glob.glob(os.path.join(os.path.dirname(self.fpath), '*.*'))
|
2019-02-07 19:27:15 +00:00
|
|
|
if not k.startswith('.')
|
2018-07-20 16:45:42 +01:00
|
|
|
]
|
2018-03-29 17:07:53 +01:00
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
@property
|
|
|
|
def updated(self):
|
|
|
|
maybe = self.dt
|
|
|
|
if len(self.comments):
|
|
|
|
for c in self.comments.values():
|
|
|
|
|
|
|
|
if c.dt > maybe:
|
|
|
|
maybe = c.dt
|
|
|
|
return maybe
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def dt(self):
|
|
|
|
dt = int(MarkdownDoc.dt.fget(self))
|
|
|
|
for maybe in self.comments.keys():
|
|
|
|
if int(dt) < int(maybe):
|
|
|
|
dt = int(maybe)
|
|
|
|
return dt
|
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
@property
|
|
|
|
def sameas(self):
|
|
|
|
r = []
|
2019-02-25 22:40:01 +00:00
|
|
|
for k in glob.glob(
|
|
|
|
os.path.join(
|
|
|
|
os.path.dirname(self.fpath),
|
|
|
|
'*.copy'
|
|
|
|
)
|
|
|
|
):
|
2019-02-07 19:27:15 +00:00
|
|
|
with open(k, 'rt') as f:
|
|
|
|
r.append(f.read())
|
|
|
|
return r
|
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def comments(self):
|
|
|
|
"""
|
|
|
|
An dict of Comment objects keyed with their path, populated from the
|
|
|
|
same directory level as the Singular objects
|
|
|
|
"""
|
2018-07-22 17:59:26 +01:00
|
|
|
comments = {}
|
2018-07-20 16:45:42 +01:00
|
|
|
files = [
|
|
|
|
k
|
|
|
|
for k in glob.glob(os.path.join(os.path.dirname(self.fpath), '*.md'))
|
2019-02-25 22:40:01 +00:00
|
|
|
if os.path.basename(k) != MDFILE
|
2018-07-20 16:45:42 +01:00
|
|
|
]
|
|
|
|
for f in files:
|
|
|
|
c = Comment(f)
|
|
|
|
comments[c.dt.timestamp] = c
|
|
|
|
return comments
|
2017-06-12 15:17:29 +01:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def images(self):
|
|
|
|
"""
|
|
|
|
A dict of WebImage objects, populated by:
|
|
|
|
- images that are present in the Markdown content
|
|
|
|
- and have an actual image file at the same directory level as
|
|
|
|
the Singular object
|
|
|
|
"""
|
|
|
|
images = {}
|
|
|
|
for match, alt, fname, title, css in RE_MDIMG.findall(self.content):
|
|
|
|
mdimg = MarkdownImage(match, alt, fname, title, css)
|
|
|
|
imgpath = os.path.join(
|
|
|
|
os.path.dirname(self.fpath),
|
|
|
|
fname
|
|
|
|
)
|
|
|
|
if imgpath in self.files:
|
|
|
|
if imghdr.what(imgpath):
|
|
|
|
images.update({match: WebImage(imgpath, mdimg, self)})
|
2019-02-08 23:32:52 +00:00
|
|
|
else:
|
|
|
|
logger.error("Missing image: %s, referenced in %s",
|
2019-02-25 22:40:01 +00:00
|
|
|
imgpath,
|
|
|
|
self.fpath
|
|
|
|
)
|
2018-07-20 16:45:42 +01:00
|
|
|
return images
|
2017-05-23 11:14:47 +01:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
@property
|
|
|
|
def is_page(self):
|
|
|
|
if self.category.startswith('_'):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2018-06-17 18:30:50 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def is_front(self):
|
|
|
|
"""
|
|
|
|
Returns if the post should be displayed on the front
|
|
|
|
"""
|
2019-02-08 23:32:52 +00:00
|
|
|
if self.category in settings.notinfeed:
|
|
|
|
return False
|
|
|
|
return True
|
2018-06-17 18:30:50 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def is_photo(self):
|
|
|
|
"""
|
|
|
|
This is true if there is a file, with the same name as the entry's
|
|
|
|
directory - so, it's slug -, and that that image believes it's a a
|
|
|
|
photo.
|
|
|
|
"""
|
2018-07-22 17:59:26 +01:00
|
|
|
if len(self.images) != 1:
|
|
|
|
return False
|
|
|
|
photo = next(iter(self.images.values()))
|
2019-02-25 22:40:01 +00:00
|
|
|
maybe = self.fpath.replace(MDFILE, "%s.jpg" % (self.name))
|
2018-07-22 17:59:26 +01:00
|
|
|
if photo.fpath == maybe:
|
2018-07-20 16:45:42 +01:00
|
|
|
return True
|
|
|
|
return False
|
2017-05-23 11:14:47 +01:00
|
|
|
|
2018-07-27 14:55:21 +01:00
|
|
|
@property
|
|
|
|
def photo(self):
|
|
|
|
if not self.is_photo:
|
|
|
|
return None
|
|
|
|
return next(iter(self.images.values()))
|
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def summary(self):
|
|
|
|
return self.meta.get('summary', '')
|
2017-10-27 10:29:33 +01:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def html_summary(self):
|
2019-03-22 15:49:24 +00:00
|
|
|
c = "%s" % (self.summary)
|
|
|
|
return PandocMD2HTML(c)
|
|
|
|
|
|
|
|
@cached_property
|
|
|
|
def txt_summary(self):
|
|
|
|
return PandocMD2TXT(self.summary)
|
|
|
|
|
|
|
|
@cached_property
|
|
|
|
def txt_content(self):
|
|
|
|
return PandocMD2TXT(self.content)
|
2017-10-27 15:56:05 +01:00
|
|
|
|
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def title(self):
|
|
|
|
if self.is_reply:
|
|
|
|
return "RE: %s" % self.is_reply
|
|
|
|
return self.meta.get(
|
|
|
|
'title',
|
2019-02-07 19:27:15 +00:00
|
|
|
self.published.format(settings.displaydate)
|
2018-07-20 16:45:42 +01:00
|
|
|
)
|
2017-10-27 15:56:05 +01:00
|
|
|
|
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def tags(self):
|
|
|
|
return self.meta.get('tags', [])
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def syndicate(self):
|
|
|
|
urls = self.meta.get('syndicate', [])
|
2019-02-08 23:32:52 +00:00
|
|
|
urls.append("https://fed.brid.gy/")
|
2018-07-20 16:45:42 +01:00
|
|
|
if self.is_photo:
|
|
|
|
urls.append("https://brid.gy/publish/flickr")
|
|
|
|
return urls
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2018-07-25 13:24:31 +01:00
|
|
|
def baseN(self, num, b=36,
|
2018-11-10 20:49:13 +00:00
|
|
|
numerals="0123456789abcdefghijklmnopqrstuvwxyz"):
|
2018-07-25 13:24:31 +01:00
|
|
|
"""
|
|
|
|
Creates short, lowercase slug for a number (an epoch) passed
|
|
|
|
"""
|
|
|
|
num = int(num)
|
|
|
|
return ((num == 0) and numerals[0]) or (
|
|
|
|
self.baseN(
|
|
|
|
num // b,
|
|
|
|
b,
|
|
|
|
numerals
|
|
|
|
).lstrip(numerals[0]) + numerals[num % b]
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def shortslug(self):
|
|
|
|
return self.baseN(self.published.timestamp)
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def published(self):
|
2019-03-24 15:26:55 +00:00
|
|
|
# ok, so here's a hack: because I have no idea when my older photos
|
|
|
|
# were actually published, any photo from before 2014 will have
|
|
|
|
# the EXIF createdate as publish date
|
|
|
|
pub = arrow.get(self.meta.get('published'))
|
|
|
|
if self.is_photo:
|
|
|
|
maybe = arrow.get(self.photo.exif.get('CreateDate'))
|
|
|
|
if maybe.year < settings.photo.earlyyears:
|
|
|
|
pub = maybe
|
|
|
|
return pub
|
2017-05-23 11:14:47 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
|
|
|
def is_reply(self):
|
|
|
|
return self.meta.get('in-reply-to', False)
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
|
|
|
def is_future(self):
|
2018-07-20 16:45:42 +01:00
|
|
|
if self.published.timestamp > arrow.utcnow().timestamp:
|
2017-05-23 11:14:47 +01:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2018-08-15 11:02:59 +01:00
|
|
|
@property
|
|
|
|
def to_ping(self):
|
|
|
|
urls = []
|
2019-02-16 00:14:12 +00:00
|
|
|
if not self.is_page and self.is_front:
|
|
|
|
w = Webmention(
|
|
|
|
self.url,
|
|
|
|
'https://fed.brid.gy/',
|
|
|
|
os.path.dirname(self.fpath),
|
|
|
|
self.dt
|
|
|
|
)
|
|
|
|
urls.append(w)
|
2018-08-15 11:02:59 +01:00
|
|
|
if self.is_reply:
|
2019-02-03 16:40:34 +00:00
|
|
|
w = Webmention(
|
|
|
|
self.url,
|
|
|
|
self.is_reply,
|
|
|
|
os.path.dirname(self.fpath),
|
|
|
|
self.dt
|
|
|
|
)
|
|
|
|
urls.append(w)
|
|
|
|
elif self.is_photo:
|
|
|
|
w = Webmention(
|
|
|
|
self.url,
|
2019-02-07 19:27:15 +00:00
|
|
|
'https://brid.gy/publish/flickr/',
|
2019-02-03 16:40:34 +00:00
|
|
|
os.path.dirname(self.fpath),
|
|
|
|
self.dt
|
|
|
|
)
|
2018-08-15 11:02:59 +01:00
|
|
|
urls.append(w)
|
|
|
|
return urls
|
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
|
|
|
def licence(self):
|
2019-02-08 23:32:52 +00:00
|
|
|
k = '_default'
|
2018-07-20 16:45:42 +01:00
|
|
|
if self.category in settings.licence:
|
2019-02-08 23:32:52 +00:00
|
|
|
k = self.category
|
|
|
|
return settings.licence[k]
|
2017-06-02 11:19:55 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
|
|
|
def lang(self):
|
|
|
|
lang = 'en'
|
|
|
|
try:
|
|
|
|
lang = langdetect.detect("\n".join([
|
|
|
|
self.meta.get('title', ''),
|
|
|
|
self.content
|
|
|
|
]))
|
2017-11-10 16:04:05 +00:00
|
|
|
except BaseException:
|
2017-10-27 10:29:33 +01:00
|
|
|
pass
|
|
|
|
return lang
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-06-02 11:19:55 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def url(self):
|
|
|
|
return "%s/%s/" % (
|
|
|
|
settings.site.get('url'),
|
|
|
|
self.name
|
|
|
|
)
|
2017-06-03 12:07:03 +01:00
|
|
|
|
2018-08-02 22:47:49 +01:00
|
|
|
@property
|
|
|
|
def has_code(self):
|
|
|
|
if RE_CODE.search(self.content):
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
@cached_property
|
|
|
|
def oembed_xml(self):
|
|
|
|
oembed = etree.Element("oembed", version="1.0")
|
|
|
|
xmldoc = etree.ElementTree(oembed)
|
|
|
|
for k, v in self.oembed_json.items():
|
|
|
|
x = etree.SubElement(oembed, k).text = "%s" % (v)
|
|
|
|
s = etree.tostring(
|
|
|
|
xmldoc,
|
|
|
|
encoding='utf-8',
|
|
|
|
xml_declaration=True,
|
|
|
|
pretty_print=True
|
|
|
|
)
|
|
|
|
return s
|
|
|
|
|
|
|
|
@cached_property
|
|
|
|
def oembed_json(self):
|
|
|
|
r = {
|
|
|
|
"version": "1.0",
|
|
|
|
"provider_name": settings.site.name,
|
|
|
|
"provider_url": settings.site.url,
|
|
|
|
"author_name": settings.author.name,
|
|
|
|
"author_url": settings.author.url,
|
|
|
|
"title": self.title,
|
|
|
|
"type": "link",
|
|
|
|
"html": self.html_content,
|
|
|
|
}
|
|
|
|
|
|
|
|
img = None
|
|
|
|
if self.is_photo:
|
|
|
|
img = self.photo
|
|
|
|
elif not self.is_photo and len(self.images):
|
|
|
|
img = list(self.images.values())[0]
|
|
|
|
if img:
|
|
|
|
r.update({
|
|
|
|
"type": "rich",
|
|
|
|
"thumbnail_url": img.jsonld.thumbnail.url,
|
|
|
|
"thumbnail_width": img.jsonld.thumbnail.width,
|
|
|
|
"thumbnail_height": img.jsonld.thumbnail.height
|
|
|
|
})
|
|
|
|
return r
|
2019-02-16 00:14:12 +00:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
@cached_property
|
|
|
|
def review(self):
|
|
|
|
if 'review' not in self.meta:
|
|
|
|
return False
|
|
|
|
review = self.meta.get('review')
|
|
|
|
rated, outof = review.get('rating').split('/')
|
|
|
|
r = {
|
|
|
|
"@context": "https://schema.org/",
|
|
|
|
"@type": "Review",
|
|
|
|
"reviewRating": {
|
|
|
|
"@type": "Rating",
|
|
|
|
"@context": "http://schema.org",
|
|
|
|
"ratingValue": rated,
|
|
|
|
"bestRating": outof,
|
|
|
|
"worstRating": 1
|
|
|
|
},
|
|
|
|
"name": review.get('title'),
|
|
|
|
"text": review.get('summary'),
|
|
|
|
"url": review.get('url'),
|
|
|
|
"author": settings.author,
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
|
|
|
|
@cached_property
|
2018-11-04 14:45:54 +00:00
|
|
|
def event(self):
|
|
|
|
if 'event' not in self.meta:
|
|
|
|
return False
|
|
|
|
event = self.meta.get('event', {})
|
2019-02-07 19:27:15 +00:00
|
|
|
r = {
|
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "Event",
|
|
|
|
"endDate": str(arrow.get(event.get('end'))),
|
|
|
|
"startDate": str(arrow.get(event.get('start'))),
|
|
|
|
"location": {
|
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "Place",
|
|
|
|
"address": event.get('location'),
|
|
|
|
"name": event.get('location'),
|
|
|
|
},
|
|
|
|
"name": self.title
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
|
2019-02-03 16:40:34 +00:00
|
|
|
@cached_property
|
|
|
|
def jsonld(self):
|
|
|
|
r = {
|
|
|
|
"@context": "http://schema.org",
|
2019-02-07 19:27:15 +00:00
|
|
|
"@type": "Article",
|
|
|
|
"@id": self.url,
|
|
|
|
"inLanguage": self.lang,
|
2019-02-03 16:40:34 +00:00
|
|
|
"headline": self.title,
|
|
|
|
"url": self.url,
|
2019-02-07 19:27:15 +00:00
|
|
|
"genre": self.category,
|
|
|
|
"mainEntityOfPage": "%s#article" % (self.url),
|
|
|
|
"dateModified": str(arrow.get(self.dt)),
|
|
|
|
"datePublished": str(self.published),
|
|
|
|
"copyrightYear": str(self.published.format('YYYY')),
|
|
|
|
"license": "https://spdx.org/licenses/%s.html" % (self.licence),
|
2019-02-08 23:32:52 +00:00
|
|
|
"image": settings.site.image,
|
2019-02-07 19:27:15 +00:00
|
|
|
"author": settings.author,
|
|
|
|
"sameAs": self.sameas,
|
2019-02-08 23:32:52 +00:00
|
|
|
"publisher": settings.site.publisher,
|
2019-02-07 19:27:15 +00:00
|
|
|
"name": self.name,
|
|
|
|
"text": self.html_content,
|
2019-02-03 16:40:34 +00:00
|
|
|
"description": self.html_summary,
|
2019-02-07 19:27:15 +00:00
|
|
|
"potentialAction": [],
|
|
|
|
"comment": [],
|
|
|
|
"commentCount": len(self.comments.keys()),
|
2019-02-03 16:40:34 +00:00
|
|
|
}
|
2019-02-07 19:27:15 +00:00
|
|
|
|
|
|
|
if self.is_photo:
|
|
|
|
r.update({
|
|
|
|
"@type": "Photograph",
|
2019-02-25 22:40:01 +00:00
|
|
|
#"image": self.photo.jsonld,
|
2019-02-07 19:27:15 +00:00
|
|
|
})
|
|
|
|
elif self.has_code:
|
2019-02-03 16:40:34 +00:00
|
|
|
r.update({
|
2019-02-07 19:27:15 +00:00
|
|
|
"@type": "TechArticle",
|
|
|
|
})
|
|
|
|
elif self.is_page:
|
|
|
|
r.update({
|
|
|
|
"@type": "WebPage",
|
2019-02-03 16:40:34 +00:00
|
|
|
})
|
2019-02-25 22:40:01 +00:00
|
|
|
if len(self.images):
|
|
|
|
r["image"] = []
|
|
|
|
for img in list(self.images.values()):
|
|
|
|
r["image"].append(img.jsonld)
|
|
|
|
# if not self.is_photo and len(self.images):
|
|
|
|
# img = list(self.images.values())[0]
|
|
|
|
# r.update({
|
|
|
|
# "image": img.jsonld,
|
|
|
|
# })
|
2019-02-07 19:27:15 +00:00
|
|
|
|
|
|
|
if self.is_reply:
|
|
|
|
r.update({
|
|
|
|
"mentions": {
|
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "Thing",
|
|
|
|
"url": self.is_reply
|
|
|
|
}
|
2018-08-15 11:02:59 +01:00
|
|
|
})
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
if self.review:
|
|
|
|
r.update({"review": self.review})
|
|
|
|
|
|
|
|
if self.event:
|
|
|
|
r.update({"subjectOf": self.event})
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
#for donation in settings.donateActions:
|
|
|
|
#r["potentialAction"].append(donation)
|
2019-02-07 19:27:15 +00:00
|
|
|
|
2019-02-08 23:32:52 +00:00
|
|
|
for url in list(set(self.syndicate)):
|
2019-02-07 19:27:15 +00:00
|
|
|
r["potentialAction"].append({
|
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "InteractAction",
|
|
|
|
"url": url
|
|
|
|
})
|
|
|
|
|
|
|
|
for mtime in sorted(self.comments.keys()):
|
|
|
|
r["comment"].append(self.comments[mtime].jsonld)
|
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
return struct(r)
|
2018-09-04 21:58:25 +01:00
|
|
|
|
2017-06-02 11:19:55 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def template(self):
|
|
|
|
return "%s.j2.html" % (self.__class__.__name__)
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
@property
|
|
|
|
def gophertemplate(self):
|
|
|
|
return "%s.j2.txt" % (self.__class__.__name__)
|
|
|
|
|
2018-11-04 23:27:53 +00:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def renderdir(self):
|
2019-02-25 22:40:01 +00:00
|
|
|
return os.path.join(
|
|
|
|
settings.paths.get('build'),
|
|
|
|
self.name
|
|
|
|
)
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2018-04-30 20:44:04 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def renderfile(self):
|
2018-11-04 23:27:53 +00:00
|
|
|
return os.path.join(
|
2019-02-25 22:40:01 +00:00
|
|
|
self.renderdir,
|
|
|
|
HTMLFILE
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def gopherfile(self):
|
|
|
|
return os.path.join(
|
|
|
|
self.renderdir,
|
|
|
|
TXTFILE
|
2018-11-04 23:27:53 +00:00
|
|
|
)
|
2018-04-30 20:44:04 +01:00
|
|
|
|
2017-06-02 11:19:55 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def exists(self):
|
|
|
|
if settings.args.get('force'):
|
2019-01-15 21:28:58 +00:00
|
|
|
logger.debug('rendering required: force mode on')
|
2018-07-20 16:45:42 +01:00
|
|
|
return False
|
|
|
|
elif not os.path.exists(self.renderfile):
|
2019-01-15 21:28:58 +00:00
|
|
|
logger.debug('rendering required: no html yet')
|
2018-07-20 16:45:42 +01:00
|
|
|
return False
|
2019-02-16 00:14:12 +00:00
|
|
|
elif self.dt > mtime(self.renderfile):
|
2019-01-15 21:28:58 +00:00
|
|
|
logger.debug('rendering required: self.dt > html mtime')
|
2018-07-20 16:45:42 +01:00
|
|
|
return False
|
|
|
|
else:
|
2019-01-15 21:28:58 +00:00
|
|
|
logger.debug('rendering not required')
|
2018-07-20 16:45:42 +01:00
|
|
|
return True
|
2018-06-29 10:40:22 +01:00
|
|
|
|
2018-07-22 08:48:47 +01:00
|
|
|
@property
|
|
|
|
def corpus(self):
|
2018-07-22 11:33:59 +01:00
|
|
|
return "\n".join([
|
2018-07-22 14:52:32 +01:00
|
|
|
self.title,
|
|
|
|
self.name,
|
|
|
|
self.summary,
|
|
|
|
self.content,
|
|
|
|
])
|
2018-07-22 08:48:47 +01:00
|
|
|
|
2018-07-25 13:24:31 +01:00
|
|
|
async def copyfiles(self):
|
2019-02-25 22:40:01 +00:00
|
|
|
exclude = [
|
|
|
|
'.md',
|
|
|
|
'.jpg',
|
|
|
|
'.png',
|
|
|
|
'.gif',
|
|
|
|
'.ping',
|
|
|
|
'.url',
|
|
|
|
'.del',
|
|
|
|
'.copy']
|
2019-02-07 19:27:15 +00:00
|
|
|
files = glob.glob(
|
|
|
|
os.path.join(
|
|
|
|
os.path.dirname(self.fpath),
|
|
|
|
'*.*'
|
|
|
|
)
|
|
|
|
)
|
2018-10-01 10:33:07 +01:00
|
|
|
for f in files:
|
|
|
|
fname, fext = os.path.splitext(f)
|
|
|
|
if fext.lower() in exclude:
|
|
|
|
continue
|
|
|
|
|
|
|
|
t = os.path.join(
|
|
|
|
settings.paths.get('build'),
|
|
|
|
self.name,
|
|
|
|
os.path.basename(f)
|
|
|
|
)
|
2019-02-16 00:14:12 +00:00
|
|
|
if os.path.exists(t) and mtime(
|
|
|
|
f) <= mtime(t):
|
2018-10-01 10:33:07 +01:00
|
|
|
continue
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info("copying '%s' to '%s'", f, t)
|
2018-10-01 10:33:07 +01:00
|
|
|
cp(f, t)
|
2018-07-25 13:24:31 +01:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
async def render(self):
|
|
|
|
if self.exists:
|
|
|
|
return
|
|
|
|
logger.info("rendering %s", self.name)
|
|
|
|
v = {
|
2019-01-31 21:23:16 +00:00
|
|
|
'baseurl': self.url,
|
2019-02-07 19:27:15 +00:00
|
|
|
'post': self.jsonld,
|
2018-07-20 16:45:42 +01:00
|
|
|
'site': settings.site,
|
2019-01-21 16:10:27 +00:00
|
|
|
'menu': settings.menu,
|
2018-07-20 16:45:42 +01:00
|
|
|
'meta': settings.meta,
|
2019-02-25 22:40:01 +00:00
|
|
|
}
|
2019-02-07 19:27:15 +00:00
|
|
|
writepath(
|
|
|
|
self.renderfile,
|
2019-02-25 22:40:01 +00:00
|
|
|
J2.get_template(self.template).render(v)
|
2019-02-07 19:27:15 +00:00
|
|
|
)
|
2019-02-25 22:40:01 +00:00
|
|
|
|
|
|
|
g = {
|
|
|
|
'post': self.jsonld,
|
2019-03-22 15:49:24 +00:00
|
|
|
'summary': self.txt_summary,
|
|
|
|
'content': self.txt_content
|
2019-02-25 22:40:01 +00:00
|
|
|
}
|
|
|
|
writepath(
|
|
|
|
self.gopherfile,
|
|
|
|
J2.get_template(self.gophertemplate).render(g)
|
|
|
|
)
|
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
j = settings.site.copy()
|
|
|
|
j.update({
|
|
|
|
"mainEntity": self.jsonld
|
|
|
|
})
|
2019-02-07 19:27:15 +00:00
|
|
|
writepath(
|
2019-02-25 22:40:01 +00:00
|
|
|
os.path.join(self.renderdir, 'index.json'),
|
2019-02-16 00:14:12 +00:00
|
|
|
json.dumps(j, indent=4, ensure_ascii=False)
|
2019-02-07 19:27:15 +00:00
|
|
|
)
|
2019-02-16 00:14:12 +00:00
|
|
|
del(j)
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
class Home(Singular):
|
|
|
|
def __init__(self, fpath):
|
|
|
|
super().__init__(fpath)
|
2019-02-07 19:27:15 +00:00
|
|
|
self.posts = []
|
2019-01-15 21:28:58 +00:00
|
|
|
|
|
|
|
def add(self, category, post):
|
2019-02-07 19:27:15 +00:00
|
|
|
self.posts.append((category.ctmplvars, post.jsonld))
|
2019-01-15 21:28:58 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def renderdir(self):
|
|
|
|
return settings.paths.get('build')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def renderfile(self):
|
|
|
|
return os.path.join(
|
|
|
|
settings.paths.get('build'),
|
2019-02-25 22:40:01 +00:00
|
|
|
HTMLFILE
|
2019-01-15 21:28:58 +00:00
|
|
|
)
|
|
|
|
|
2019-01-21 16:10:27 +00:00
|
|
|
@property
|
|
|
|
def dt(self):
|
|
|
|
maybe = super().dt
|
2019-02-07 19:27:15 +00:00
|
|
|
for cat, post in self.posts:
|
|
|
|
pts = arrow.get(post['dateModified']).timestamp
|
|
|
|
if pts > maybe:
|
|
|
|
maybe = pts
|
2019-01-21 16:10:27 +00:00
|
|
|
return maybe
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
async def render_gopher(self):
|
|
|
|
lines = [
|
|
|
|
"%s's gopherhole - phlog, if you prefer" % (settings.site.name),
|
|
|
|
'',
|
|
|
|
''
|
|
|
|
]
|
|
|
|
|
|
|
|
for category, post in self.posts:
|
|
|
|
line = "1%s\t/%s/%s\t%s\t70" % (
|
|
|
|
category['name'],
|
|
|
|
CATEGORY,
|
|
|
|
category['name'],
|
|
|
|
settings.site.name
|
|
|
|
)
|
|
|
|
lines.append(line)
|
|
|
|
lines.append('')
|
2019-03-22 15:49:24 +00:00
|
|
|
#lines.append('')
|
|
|
|
#lines = lines + list(settings.bye.split('\n'))
|
|
|
|
#lines.append('')
|
2019-02-25 22:40:01 +00:00
|
|
|
writepath(self.renderfile.replace(HTMLFILE,GOPHERFILE), "\r\n".join(lines))
|
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
async def render(self):
|
|
|
|
if self.exists:
|
|
|
|
return
|
|
|
|
logger.info("rendering %s", self.name)
|
|
|
|
r = J2.get_template(self.template).render({
|
2019-01-31 21:23:16 +00:00
|
|
|
'baseurl': settings.site.get('url'),
|
2019-02-07 19:27:15 +00:00
|
|
|
'post': self.jsonld,
|
2019-01-15 21:28:58 +00:00
|
|
|
'site': settings.site,
|
2019-01-21 16:10:27 +00:00
|
|
|
'menu': settings.menu,
|
2019-01-15 21:28:58 +00:00
|
|
|
'meta': settings.meta,
|
2019-02-07 19:27:15 +00:00
|
|
|
'posts': self.posts
|
2019-01-15 21:28:58 +00:00
|
|
|
})
|
|
|
|
writepath(self.renderfile, r)
|
2019-02-25 22:40:01 +00:00
|
|
|
await self.render_gopher()
|
2019-01-15 21:28:58 +00:00
|
|
|
|
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
class WebImage(object):
|
|
|
|
def __init__(self, fpath, mdimg, parent):
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.debug("loading image: %s", fpath)
|
2018-07-20 16:45:42 +01:00
|
|
|
self.mdimg = mdimg
|
|
|
|
self.fpath = fpath
|
|
|
|
self.parent = parent
|
2019-02-16 00:14:12 +00:00
|
|
|
self.mtime = mtime(self.fpath)
|
2018-07-20 16:45:42 +01:00
|
|
|
self.fname, self.fext = os.path.splitext(os.path.basename(fpath))
|
|
|
|
self.resized_images = [
|
|
|
|
(k, self.Resized(self, k))
|
|
|
|
for k in settings.photo.get('sizes').keys()
|
|
|
|
if k < max(self.width, self.height)
|
|
|
|
]
|
|
|
|
if not len(self.resized_images):
|
|
|
|
self.resized_images.append((
|
|
|
|
max(self.width, self.height),
|
|
|
|
self.Resized(self, max(self.width, self.height))
|
|
|
|
))
|
2017-11-03 22:54:36 +00:00
|
|
|
|
2018-09-04 21:58:25 +01:00
|
|
|
@property
|
|
|
|
def is_mainimg(self):
|
|
|
|
if self.fname == self.parent.name:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
@property
|
2019-02-07 19:27:15 +00:00
|
|
|
def jsonld(self):
|
|
|
|
r = {
|
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "ImageObject",
|
|
|
|
"url": self.href,
|
|
|
|
"image": self.href,
|
2019-02-16 00:14:12 +00:00
|
|
|
"thumbnail": struct({
|
2019-02-07 19:27:15 +00:00
|
|
|
"@context": "http://schema.org",
|
|
|
|
"@type": "ImageObject",
|
|
|
|
"url": self.src,
|
|
|
|
"width": self.displayed.width,
|
|
|
|
"height": self.displayed.height,
|
2019-02-16 00:14:12 +00:00
|
|
|
}),
|
2019-02-07 19:27:15 +00:00
|
|
|
"name": os.path.basename(self.fpath),
|
|
|
|
"encodingFormat": self.mime_type,
|
|
|
|
"contentSize": self.mime_size,
|
|
|
|
"width": self.linked.width,
|
|
|
|
"height": self.linked.height,
|
|
|
|
"dateCreated": self.exif.get('CreateDate'),
|
|
|
|
"exifData": [],
|
|
|
|
"caption": self.caption,
|
|
|
|
"headline": self.title,
|
|
|
|
"representativeOfPage": False
|
2018-09-04 21:58:25 +01:00
|
|
|
}
|
2019-02-07 19:27:15 +00:00
|
|
|
for k, v in self.exif.items():
|
|
|
|
r["exifData"].append({
|
|
|
|
"@type": "PropertyValue",
|
|
|
|
"name": k,
|
|
|
|
"value": v
|
|
|
|
})
|
|
|
|
if self.is_photo:
|
|
|
|
r.update({
|
|
|
|
"creator": settings.author,
|
2019-02-08 23:32:52 +00:00
|
|
|
"copyrightHolder": settings.author,
|
|
|
|
"license": settings.licence['_default']
|
2019-02-07 19:27:15 +00:00
|
|
|
})
|
|
|
|
if self.is_mainimg:
|
|
|
|
r.update({"representativeOfPage": True})
|
2019-02-16 00:14:12 +00:00
|
|
|
return struct(r)
|
2018-09-04 21:58:25 +01:00
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
if len(self.mdimg.css):
|
|
|
|
return self.mdimg.match
|
|
|
|
tmpl = J2.get_template("%s.j2.html" % (self.__class__.__name__))
|
2019-02-07 19:27:15 +00:00
|
|
|
return tmpl.render(self.jsonld)
|
2018-12-11 14:06:18 +00:00
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
@cached_property
|
2018-07-20 16:45:42 +01:00
|
|
|
def meta(self):
|
2018-08-08 09:42:42 +01:00
|
|
|
return Exif(self.fpath)
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2018-06-08 10:14:39 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def caption(self):
|
|
|
|
if len(self.mdimg.alt):
|
|
|
|
return self.mdimg.alt
|
|
|
|
else:
|
|
|
|
return self.meta.get('Description', '')
|
2018-06-08 10:14:39 +01:00
|
|
|
|
2017-06-28 12:20:26 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def title(self):
|
|
|
|
if len(self.mdimg.title):
|
|
|
|
return self.mdimg.title
|
|
|
|
else:
|
|
|
|
return self.meta.get('Headline', self.fname)
|
2017-06-02 11:19:55 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def tags(self):
|
|
|
|
return list(set(self.meta.get('Subject', [])))
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def published(self):
|
|
|
|
return arrow.get(
|
|
|
|
self.meta.get('ReleaseDate', self.meta.get('ModifyDate'))
|
|
|
|
)
|
2017-06-02 11:19:55 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def width(self):
|
|
|
|
return int(self.meta.get('ImageWidth'))
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def height(self):
|
|
|
|
return int(self.meta.get('ImageHeight'))
|
2017-06-02 11:19:55 +01:00
|
|
|
|
2018-04-30 20:44:04 +01:00
|
|
|
@property
|
|
|
|
def mime_type(self):
|
|
|
|
return str(self.meta.get('MIMEType', 'image/jpeg'))
|
|
|
|
|
|
|
|
@property
|
|
|
|
def mime_size(self):
|
2019-02-16 00:14:12 +00:00
|
|
|
try:
|
|
|
|
size = os.path.getsize(self.linked.fpath)
|
|
|
|
except Exception as e:
|
|
|
|
logger.error('Failed to get mime size of %s', self.linked.fpath)
|
|
|
|
size = self.meta.get('FileSize', 0)
|
|
|
|
return size
|
2018-04-30 20:44:04 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def displayed(self):
|
|
|
|
ret = self.resized_images[0][1]
|
|
|
|
for size, r in self.resized_images:
|
|
|
|
if size == settings.photo.get('default'):
|
|
|
|
ret = r
|
|
|
|
return ret
|
2017-06-02 11:19:55 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def linked(self):
|
|
|
|
m = 0
|
|
|
|
ret = self.resized_images[0][1]
|
|
|
|
for size, r in self.resized_images:
|
|
|
|
if size > m:
|
|
|
|
m = size
|
|
|
|
ret = r
|
|
|
|
return ret
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
|
|
|
def src(self):
|
2018-07-20 16:45:42 +01:00
|
|
|
return self.displayed.url
|
2017-05-23 11:14:47 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def href(self):
|
|
|
|
return self.linked.url
|
2017-07-26 11:23:06 +01:00
|
|
|
|
|
|
|
@property
|
2017-10-27 10:29:33 +01:00
|
|
|
def is_photo(self):
|
2018-07-20 16:45:42 +01:00
|
|
|
r = settings.photo.get('re_author', None)
|
|
|
|
if not r:
|
2017-10-27 10:29:33 +01:00
|
|
|
return False
|
|
|
|
cpr = self.meta.get('Copyright', '')
|
|
|
|
art = self.meta.get('Artist', '')
|
|
|
|
# both Artist and Copyright missing from EXIF
|
|
|
|
if not cpr and not art:
|
|
|
|
return False
|
|
|
|
# we have regex, Artist and Copyright, try matching them
|
2018-07-20 16:45:42 +01:00
|
|
|
if r.search(cpr) or r.search(art):
|
2017-10-27 10:29:33 +01:00
|
|
|
return True
|
|
|
|
return False
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-06-12 15:17:29 +01:00
|
|
|
@property
|
|
|
|
def exif(self):
|
2018-07-20 16:45:42 +01:00
|
|
|
exif = {
|
2019-02-07 19:27:15 +00:00
|
|
|
'Model': '',
|
|
|
|
'FNumber': '',
|
|
|
|
'ExposureTime': '',
|
|
|
|
'FocalLength': '',
|
|
|
|
'ISO': '',
|
|
|
|
'LensID': '',
|
|
|
|
'CreateDate': str(arrow.get(self.mtime))
|
2018-07-20 16:45:42 +01:00
|
|
|
}
|
2017-06-12 15:17:29 +01:00
|
|
|
if not self.is_photo:
|
2017-10-27 10:29:33 +01:00
|
|
|
return exif
|
2017-06-12 15:17:29 +01:00
|
|
|
|
|
|
|
mapping = {
|
2019-02-07 19:27:15 +00:00
|
|
|
'Model': ['Model'],
|
|
|
|
'FNumber': ['FNumber', 'Aperture'],
|
|
|
|
'ExposureTime': ['ExposureTime'],
|
2019-02-25 22:40:01 +00:00
|
|
|
'FocalLength': ['FocalLength'], # ['FocalLengthIn35mmFormat'],
|
2019-02-07 19:27:15 +00:00
|
|
|
'ISO': ['ISO'],
|
|
|
|
'LensID': ['LensID', 'LensSpec', 'Lens'],
|
|
|
|
'CreateDate': ['CreateDate', 'DateTimeOriginal']
|
2017-06-12 15:17:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for ekey, candidates in mapping.items():
|
|
|
|
for candidate in candidates:
|
|
|
|
maybe = self.meta.get(candidate, None)
|
2017-10-27 10:29:33 +01:00
|
|
|
if not maybe:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
exif[ekey] = maybe
|
|
|
|
break
|
|
|
|
return exif
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
def _maybe_watermark(self, img):
|
|
|
|
if not self.is_photo:
|
|
|
|
return img
|
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
wmarkfile = settings.paths.get('watermark')
|
|
|
|
if not os.path.exists(wmarkfile):
|
2017-05-23 11:14:47 +01:00
|
|
|
return img
|
|
|
|
|
|
|
|
with wand.image.Image(filename=wmarkfile) as wmark:
|
2018-07-20 16:45:42 +01:00
|
|
|
if self.width > self.height:
|
2019-01-31 21:23:16 +00:00
|
|
|
w = self.width * 0.3
|
2017-05-23 11:14:47 +01:00
|
|
|
h = wmark.height * (w / wmark.width)
|
2018-07-20 16:45:42 +01:00
|
|
|
x = self.width - w - (self.width * 0.01)
|
|
|
|
y = self.height - h - (self.height * 0.01)
|
2017-05-23 11:14:47 +01:00
|
|
|
else:
|
2019-01-31 21:23:16 +00:00
|
|
|
w = self.height * 0.24
|
2017-05-23 11:14:47 +01:00
|
|
|
h = wmark.height * (w / wmark.width)
|
2018-07-20 16:45:42 +01:00
|
|
|
x = self.width - h - (self.width * 0.01)
|
|
|
|
y = self.height - w - (self.height * 0.01)
|
2017-05-23 11:14:47 +01:00
|
|
|
|
|
|
|
w = round(w)
|
|
|
|
h = round(h)
|
|
|
|
x = round(x)
|
|
|
|
y = round(y)
|
|
|
|
|
|
|
|
wmark.resize(w, h)
|
2018-07-20 16:45:42 +01:00
|
|
|
if self.width <= self.height:
|
2017-05-23 11:14:47 +01:00
|
|
|
wmark.rotate(-90)
|
|
|
|
img.composite(image=wmark, left=x, top=y)
|
|
|
|
return img
|
|
|
|
|
2017-10-27 10:29:33 +01:00
|
|
|
async def downsize(self):
|
2018-07-20 16:45:42 +01:00
|
|
|
need = False
|
|
|
|
for size, resized in self.resized_images:
|
|
|
|
if not resized.exists or settings.args.get('regenerate'):
|
|
|
|
need = True
|
|
|
|
break
|
|
|
|
if not need:
|
2017-10-27 10:29:33 +01:00
|
|
|
return
|
2017-05-23 11:14:47 +01:00
|
|
|
|
|
|
|
with wand.image.Image(filename=self.fpath) as img:
|
|
|
|
img.auto_orient()
|
2017-10-27 10:29:33 +01:00
|
|
|
img = self._maybe_watermark(img)
|
2018-07-20 16:45:42 +01:00
|
|
|
for size, resized in self.resized_images:
|
|
|
|
if not resized.exists or settings.args.get('regenerate'):
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info(
|
2018-07-20 16:45:42 +01:00
|
|
|
"resizing image: %s to size %d",
|
|
|
|
os.path.basename(self.fpath),
|
|
|
|
size
|
|
|
|
)
|
|
|
|
await resized.make(img)
|
|
|
|
|
|
|
|
class Resized:
|
|
|
|
def __init__(self, parent, size, crop=False):
|
|
|
|
self.parent = parent
|
|
|
|
self.size = size
|
|
|
|
self.crop = crop
|
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
@property
|
|
|
|
def data(self):
|
|
|
|
with open(self.fpath, 'rb') as f:
|
|
|
|
encoded = base64.b64encode(f.read())
|
2019-02-25 22:40:01 +00:00
|
|
|
return "data:%s;base64,%s" % (
|
|
|
|
self.parent.mime_type, encoded.decode('utf-8'))
|
2019-02-16 00:14:12 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def suffix(self):
|
|
|
|
return settings.photo.get('sizes').get(self.size, '')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def fname(self):
|
|
|
|
return "%s%s%s" % (
|
|
|
|
self.parent.fname,
|
|
|
|
self.suffix,
|
|
|
|
self.parent.fext
|
|
|
|
)
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def fpath(self):
|
|
|
|
return os.path.join(
|
|
|
|
self.parent.parent.renderdir,
|
|
|
|
self.fname
|
|
|
|
)
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def url(self):
|
|
|
|
return "%s/%s/%s" % (
|
|
|
|
settings.site.get('url'),
|
|
|
|
self.parent.parent.name,
|
|
|
|
"%s%s%s" % (
|
|
|
|
self.parent.fname,
|
|
|
|
self.suffix,
|
|
|
|
self.parent.fext
|
|
|
|
)
|
|
|
|
)
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def relpath(self):
|
|
|
|
return "%s/%s" % (
|
|
|
|
self.parent.parent.renderdir.replace(
|
|
|
|
settings.paths.get('build'), ''
|
|
|
|
),
|
|
|
|
self.fname
|
|
|
|
)
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def exists(self):
|
|
|
|
if os.path.isfile(self.fpath):
|
2019-02-16 00:14:12 +00:00
|
|
|
if mtime(self.fpath) >= self.parent.mtime:
|
2018-07-20 16:45:42 +01:00
|
|
|
return True
|
|
|
|
return False
|
2017-05-31 13:53:47 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def width(self):
|
|
|
|
return self.dimensions[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def height(self):
|
|
|
|
return self.dimensions[1]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def dimensions(self):
|
|
|
|
width = self.parent.width
|
|
|
|
height = self.parent.height
|
|
|
|
size = self.size
|
|
|
|
|
|
|
|
ratio = max(width, height) / min(width, height)
|
|
|
|
horizontal = True if (width / height) >= 1 else False
|
|
|
|
|
|
|
|
# panorama: reverse "horizontal" because the limit should be on
|
|
|
|
# the shorter side, not the longer, and make it a bit smaller, than
|
|
|
|
# the actual limit
|
|
|
|
# 2.39 is the wide angle cinematic view: anything wider, than that
|
|
|
|
# is panorama land
|
|
|
|
if ratio > 2.4 and not self.crop:
|
|
|
|
size = int(size * 0.6)
|
|
|
|
horizontal = not horizontal
|
|
|
|
|
|
|
|
if (horizontal and not self.crop) \
|
|
|
|
or (not horizontal and self.crop):
|
|
|
|
w = size
|
|
|
|
h = int(float(size / width) * height)
|
|
|
|
else:
|
|
|
|
h = size
|
|
|
|
w = int(float(size / height) * width)
|
|
|
|
return (w, h)
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
async def make(self, original):
|
|
|
|
if not os.path.isdir(os.path.dirname(self.fpath)):
|
|
|
|
os.makedirs(os.path.dirname(self.fpath))
|
2017-06-12 15:40:30 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
with original.clone() as thumb:
|
|
|
|
thumb.resize(self.width, self.height)
|
2017-05-31 13:53:47 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
if self.crop:
|
|
|
|
thumb.liquid_rescale(self.size, self.size, 1, 1)
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
if self.parent.meta.get('FileType', 'jpeg').lower() == 'jpeg':
|
2018-07-25 13:24:31 +01:00
|
|
|
thumb.compression_quality = 88
|
2018-07-20 16:45:42 +01:00
|
|
|
thumb.unsharp_mask(
|
|
|
|
radius=1,
|
|
|
|
sigma=0.5,
|
|
|
|
amount=0.7,
|
|
|
|
threshold=0.5
|
|
|
|
)
|
|
|
|
thumb.format = 'pjpeg'
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
# this is to make sure pjpeg happens
|
|
|
|
with open(self.fpath, 'wb') as f:
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info("writing %s", self.fpath)
|
2018-07-20 16:45:42 +01:00
|
|
|
thumb.save(file=f)
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2019-03-24 15:26:55 +00:00
|
|
|
# n, e = os.path.splitext(os.path.basename(self.fpath))
|
|
|
|
# webppath = self.fpath.replace(e, '.webp')
|
|
|
|
# with open(webppath, 'wb') as f:
|
|
|
|
# logger.info("writing %s", webppath)
|
|
|
|
# thumb.format = 'webp'
|
|
|
|
# thumb.compression_quality = 88
|
|
|
|
# thumb.save(file=f)
|
|
|
|
|
|
|
|
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2018-08-15 11:02:59 +01:00
|
|
|
class PHPFile(object):
|
|
|
|
@property
|
|
|
|
def exists(self):
|
|
|
|
if settings.args.get('force'):
|
|
|
|
return False
|
|
|
|
if not os.path.exists(self.renderfile):
|
|
|
|
return False
|
2019-02-16 00:14:12 +00:00
|
|
|
if self.mtime > mtime(self.renderfile):
|
2018-08-15 11:02:59 +01:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
@property
|
|
|
|
def mtime(self):
|
2019-02-16 00:14:12 +00:00
|
|
|
return mtime(
|
2018-08-15 11:02:59 +01:00
|
|
|
os.path.join(
|
|
|
|
settings.paths.get('tmpl'),
|
|
|
|
self.templatefile
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def renderfile(self):
|
|
|
|
raise ValueError('Not implemented')
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2018-08-15 11:02:59 +01:00
|
|
|
@property
|
|
|
|
def templatefile(self):
|
|
|
|
raise ValueError('Not implemented')
|
|
|
|
|
|
|
|
async def render(self):
|
2019-02-25 22:40:01 +00:00
|
|
|
# if self.exists:
|
|
|
|
# return
|
2018-08-15 11:02:59 +01:00
|
|
|
await self._render()
|
|
|
|
|
|
|
|
|
|
|
|
class Search(PHPFile):
|
2018-07-22 14:52:32 +01:00
|
|
|
def __init__(self):
|
2018-08-15 11:02:59 +01:00
|
|
|
self.fpath = os.path.join(
|
|
|
|
settings.paths.get('build'),
|
|
|
|
'search.sqlite'
|
|
|
|
)
|
|
|
|
self.db = sqlite3.connect(self.fpath)
|
|
|
|
self.db.execute('PRAGMA auto_vacuum = INCREMENTAL;')
|
|
|
|
self.db.execute('PRAGMA journal_mode = MEMORY;')
|
|
|
|
self.db.execute('PRAGMA temp_store = MEMORY;')
|
|
|
|
self.db.execute('PRAGMA locking_mode = NORMAL;')
|
|
|
|
self.db.execute('PRAGMA synchronous = FULL;')
|
|
|
|
self.db.execute('PRAGMA encoding = "UTF-8";')
|
|
|
|
self.db.execute('''
|
|
|
|
CREATE VIRTUAL TABLE IF NOT EXISTS data USING fts4(
|
|
|
|
url,
|
|
|
|
mtime,
|
|
|
|
name,
|
|
|
|
title,
|
|
|
|
category,
|
|
|
|
content,
|
|
|
|
notindexed=category,
|
|
|
|
notindexed=url,
|
|
|
|
notindexed=mtime,
|
|
|
|
tokenize=porter
|
|
|
|
)'''
|
2019-02-25 22:40:01 +00:00
|
|
|
)
|
2018-11-10 20:49:13 +00:00
|
|
|
self.is_changed = False
|
2018-08-15 11:02:59 +01:00
|
|
|
|
|
|
|
def __exit__(self):
|
2018-11-10 20:49:13 +00:00
|
|
|
if self.is_changed:
|
|
|
|
self.db.commit()
|
|
|
|
self.db.execute('PRAGMA auto_vacuum;')
|
2018-08-15 11:02:59 +01:00
|
|
|
self.db.close()
|
|
|
|
|
|
|
|
def check(self, name):
|
|
|
|
ret = 0
|
|
|
|
maybe = self.db.execute('''
|
|
|
|
SELECT
|
|
|
|
mtime
|
|
|
|
FROM
|
|
|
|
data
|
|
|
|
WHERE
|
|
|
|
name = ?
|
|
|
|
''', (name,)).fetchone()
|
|
|
|
if maybe:
|
|
|
|
ret = int(maybe[0])
|
|
|
|
return ret
|
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
def append(self, post):
|
|
|
|
mtime = int(post.mtime)
|
|
|
|
check = self.check(post.name)
|
2018-08-15 11:02:59 +01:00
|
|
|
if (check and check < mtime):
|
|
|
|
self.db.execute('''
|
|
|
|
DELETE
|
|
|
|
FROM
|
|
|
|
data
|
|
|
|
WHERE
|
2018-12-27 19:48:06 +00:00
|
|
|
name=?''', (post.name,))
|
2018-08-15 11:02:59 +01:00
|
|
|
check = False
|
|
|
|
if not check:
|
|
|
|
self.db.execute('''
|
|
|
|
INSERT INTO
|
|
|
|
data
|
|
|
|
(url, mtime, name, title, category, content)
|
|
|
|
VALUES
|
|
|
|
(?,?,?,?,?,?);
|
|
|
|
''', (
|
2018-12-27 19:48:06 +00:00
|
|
|
post.url,
|
2018-08-15 11:02:59 +01:00
|
|
|
mtime,
|
2018-12-27 19:48:06 +00:00
|
|
|
post.name,
|
|
|
|
post.title,
|
|
|
|
post.category,
|
|
|
|
post.content
|
2018-08-15 11:02:59 +01:00
|
|
|
))
|
2018-11-10 20:49:13 +00:00
|
|
|
self.is_changed = True
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2018-07-23 11:04:48 +01:00
|
|
|
@property
|
2018-08-15 11:02:59 +01:00
|
|
|
def renderfile(self):
|
|
|
|
return os.path.join(
|
|
|
|
settings.paths.get('build'),
|
|
|
|
'search.php'
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def templatefile(self):
|
|
|
|
return 'Search.j2.php'
|
|
|
|
|
|
|
|
async def _render(self):
|
|
|
|
r = J2.get_template(self.templatefile).render({
|
|
|
|
'post': {},
|
|
|
|
'site': settings.site,
|
2019-01-21 16:10:27 +00:00
|
|
|
'menu': settings.menu,
|
2018-08-15 11:02:59 +01:00
|
|
|
'meta': settings.meta,
|
|
|
|
})
|
2018-11-04 23:27:53 +00:00
|
|
|
writepath(self.renderfile, r)
|
2018-08-15 11:02:59 +01:00
|
|
|
|
|
|
|
|
|
|
|
class IndexPHP(PHPFile):
|
|
|
|
def __init__(self):
|
|
|
|
self.gone = {}
|
|
|
|
self.redirect = {}
|
2018-07-23 11:04:48 +01:00
|
|
|
|
2018-07-22 14:52:32 +01:00
|
|
|
def add_gone(self, uri):
|
|
|
|
self.gone[uri] = True
|
|
|
|
|
|
|
|
def add_redirect(self, source, target):
|
|
|
|
if target in self.gone:
|
|
|
|
self.add_gone(source)
|
|
|
|
else:
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
if '://' not in target:
|
2018-07-22 14:52:32 +01:00
|
|
|
target = "%s/%s" % (settings.site.get('url'), target)
|
|
|
|
self.redirect[source] = target
|
|
|
|
|
2018-07-23 11:04:48 +01:00
|
|
|
@property
|
|
|
|
def renderfile(self):
|
|
|
|
return os.path.join(
|
2018-07-20 16:45:42 +01:00
|
|
|
settings.paths.get('build'),
|
2018-07-22 14:52:32 +01:00
|
|
|
'index.php'
|
2017-10-27 15:56:05 +01:00
|
|
|
)
|
2018-07-23 11:04:48 +01:00
|
|
|
|
2018-08-15 11:02:59 +01:00
|
|
|
@property
|
|
|
|
def templatefile(self):
|
2019-01-15 21:28:58 +00:00
|
|
|
return '404.j2.php'
|
2018-08-15 11:02:59 +01:00
|
|
|
|
|
|
|
async def _render(self):
|
|
|
|
r = J2.get_template(self.templatefile).render({
|
2018-07-22 14:52:32 +01:00
|
|
|
'post': {},
|
|
|
|
'site': settings.site,
|
2019-01-21 16:10:27 +00:00
|
|
|
'menu': settings.menu,
|
2018-07-22 14:52:32 +01:00
|
|
|
'gones': self.gone,
|
|
|
|
'redirects': self.redirect
|
|
|
|
})
|
2018-11-10 20:49:13 +00:00
|
|
|
writepath(self.renderfile, r)
|
2017-10-27 15:56:05 +01:00
|
|
|
|
|
|
|
|
2018-08-15 11:02:59 +01:00
|
|
|
class WebhookPHP(PHPFile):
|
2018-08-08 09:42:42 +01:00
|
|
|
@property
|
|
|
|
def renderfile(self):
|
|
|
|
return os.path.join(
|
|
|
|
settings.paths.get('build'),
|
|
|
|
'webhook.php'
|
|
|
|
)
|
|
|
|
|
2018-08-15 11:02:59 +01:00
|
|
|
@property
|
|
|
|
def templatefile(self):
|
|
|
|
return 'Webhook.j2.php'
|
|
|
|
|
|
|
|
async def _render(self):
|
|
|
|
r = J2.get_template(self.templatefile).render({
|
2018-08-08 09:42:42 +01:00
|
|
|
'author': settings.author,
|
2018-11-03 09:48:37 +00:00
|
|
|
'webmentionio': keys.webmentionio,
|
|
|
|
'zapier': keys.zapier,
|
2018-08-08 09:42:42 +01:00
|
|
|
})
|
2018-11-10 20:49:13 +00:00
|
|
|
writepath(self.renderfile, r)
|
|
|
|
|
|
|
|
|
|
|
|
class MicropubPHP(PHPFile):
|
|
|
|
@property
|
|
|
|
def renderfile(self):
|
|
|
|
return os.path.join(
|
|
|
|
settings.paths.get('build'),
|
|
|
|
'micropub.php'
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def templatefile(self):
|
|
|
|
return 'Micropub.j2.php'
|
|
|
|
|
|
|
|
async def _render(self):
|
|
|
|
r = J2.get_template(self.templatefile).render({
|
|
|
|
'site': settings.site,
|
2019-01-21 16:10:27 +00:00
|
|
|
'menu': settings.menu,
|
2018-11-10 20:49:13 +00:00
|
|
|
'paths': settings.paths
|
|
|
|
})
|
|
|
|
writepath(self.renderfile, r)
|
2018-08-08 09:42:42 +01:00
|
|
|
|
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
class Category(dict):
|
|
|
|
def __init__(self, name=''):
|
|
|
|
self.name = name
|
2019-01-31 21:23:16 +00:00
|
|
|
#self.page = 1
|
2018-11-04 13:40:44 +00:00
|
|
|
self.trange = 'YYYY'
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
def __setitem__(self, key, value):
|
|
|
|
if key in self:
|
|
|
|
raise LookupError(
|
|
|
|
"key '%s' already exists, colliding posts are: %s vs %s" % (
|
|
|
|
key,
|
|
|
|
self[key].fpath,
|
|
|
|
value.fpath,
|
|
|
|
)
|
2018-03-29 17:07:53 +01:00
|
|
|
)
|
2018-07-20 16:45:42 +01:00
|
|
|
dict.__setitem__(self, key, value)
|
2017-10-30 10:47:08 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def sortedkeys(self):
|
|
|
|
return list(sorted(self.keys(), reverse=True))
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
2019-02-07 19:27:15 +00:00
|
|
|
def is_paginated(self):
|
|
|
|
if self.name in settings.flat:
|
|
|
|
return False
|
|
|
|
return True
|
2018-06-01 10:49:14 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
@property
|
|
|
|
def title(self):
|
|
|
|
if len(self.name):
|
2019-02-07 19:27:15 +00:00
|
|
|
return "%s - %s" % (self.name, settings.site.get('name'))
|
2018-07-20 16:45:42 +01:00
|
|
|
else:
|
2019-02-07 19:27:15 +00:00
|
|
|
return settings.site.get('headline')
|
2018-06-01 10:49:14 +01:00
|
|
|
|
2017-10-29 19:11:01 +00:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def url(self):
|
|
|
|
if len(self.name):
|
2019-02-25 22:40:01 +00:00
|
|
|
url = "%s/%s/%s/" % (settings.site.get('url'), CATEGORY, self.name)
|
2018-07-20 16:45:42 +01:00
|
|
|
else:
|
2018-07-25 13:24:31 +01:00
|
|
|
url = '%s/' % (settings.site.get('url'))
|
2018-07-20 16:45:42 +01:00
|
|
|
return url
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2018-07-25 13:24:31 +01:00
|
|
|
@property
|
2018-11-04 23:27:53 +00:00
|
|
|
def feedurl(self):
|
2018-07-25 13:24:31 +01:00
|
|
|
return "%sfeed/" % (self.url)
|
|
|
|
|
2017-10-29 19:11:01 +00:00
|
|
|
@property
|
2018-07-20 16:45:42 +01:00
|
|
|
def template(self):
|
|
|
|
return "%s.j2.html" % (self.__class__.__name__)
|
2017-10-27 15:56:05 +01:00
|
|
|
|
2017-10-29 19:11:01 +00:00
|
|
|
@property
|
2018-11-04 23:27:53 +00:00
|
|
|
def dpath(self):
|
2018-07-20 16:45:42 +01:00
|
|
|
if len(self.name):
|
|
|
|
return os.path.join(
|
|
|
|
settings.paths.get('build'),
|
2019-02-25 22:40:01 +00:00
|
|
|
CATEGORY,
|
2018-07-20 16:45:42 +01:00
|
|
|
self.name
|
|
|
|
)
|
2017-10-30 09:24:46 +00:00
|
|
|
else:
|
2018-07-20 16:45:42 +01:00
|
|
|
return settings.paths.get('build')
|
2017-10-29 19:11:01 +00:00
|
|
|
|
2019-01-31 21:23:16 +00:00
|
|
|
@property
|
|
|
|
def newest_year(self):
|
|
|
|
return int(self[self.sortedkeys[0]].published.format(self.trange))
|
|
|
|
|
|
|
|
@property
|
|
|
|
def years(self):
|
|
|
|
years = {}
|
|
|
|
for k in self.sortedkeys:
|
|
|
|
y = int(self[k].published.format(self.trange))
|
|
|
|
if y not in years:
|
|
|
|
if y == self.newest_year:
|
|
|
|
url = self.url
|
|
|
|
else:
|
|
|
|
url = "%s%d/" % (self.url, y)
|
|
|
|
years.update({
|
|
|
|
y: url
|
|
|
|
})
|
|
|
|
return years
|
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
@property
|
|
|
|
def mtime(self):
|
2019-01-31 21:23:16 +00:00
|
|
|
return self[self.sortedkeys[0]].published.timestamp
|
2018-11-10 20:49:13 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def rssfeedfpath(self):
|
|
|
|
return os.path.join(
|
|
|
|
self.dpath,
|
|
|
|
'feed',
|
2019-03-22 15:49:24 +00:00
|
|
|
RSSFILE
|
2018-11-10 20:49:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def atomfeedfpath(self):
|
|
|
|
return os.path.join(
|
|
|
|
self.dpath,
|
|
|
|
'feed',
|
2019-03-22 15:49:24 +00:00
|
|
|
ATOMFILE
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def jsonfeedfpath(self):
|
|
|
|
return os.path.join(
|
|
|
|
self.dpath,
|
|
|
|
'feed',
|
|
|
|
JSONFEEDFILE
|
2018-11-10 20:49:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def get_posts(self, start=0, end=-1):
|
|
|
|
return [
|
2019-02-07 19:27:15 +00:00
|
|
|
self[k].jsonld
|
2018-11-10 20:49:13 +00:00
|
|
|
for k in self.sortedkeys[start:end]
|
|
|
|
]
|
|
|
|
|
|
|
|
def is_uptodate(self, fpath, ts):
|
|
|
|
if settings.args.get('force'):
|
|
|
|
return False
|
|
|
|
if not os.path.exists(fpath):
|
|
|
|
return False
|
2019-02-16 00:14:12 +00:00
|
|
|
if mtime(fpath) >= ts:
|
2018-11-10 20:49:13 +00:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def newest(self, start=0, end=-1):
|
|
|
|
if start == end:
|
|
|
|
end = -1
|
|
|
|
s = sorted(
|
2019-01-15 21:28:58 +00:00
|
|
|
[self[k].dt for k in self.sortedkeys[start:end]],
|
2018-11-10 20:49:13 +00:00
|
|
|
reverse=True
|
|
|
|
)
|
|
|
|
return s[0]
|
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
@property
|
|
|
|
def ctmplvars(self):
|
|
|
|
return {
|
|
|
|
'name': self.name,
|
|
|
|
'url': self.url,
|
|
|
|
'feed': self.feedurl,
|
|
|
|
'title': self.title,
|
|
|
|
}
|
|
|
|
|
2019-02-16 00:14:12 +00:00
|
|
|
def tmplvars(self, posts=[], year=None):
|
2019-01-31 21:23:16 +00:00
|
|
|
baseurl = self.url
|
|
|
|
if year:
|
2019-02-16 00:14:12 +00:00
|
|
|
baseurl = '%s%s/' % (baseurl, year)
|
2018-07-20 16:45:42 +01:00
|
|
|
return {
|
2019-01-31 21:23:16 +00:00
|
|
|
'baseurl': baseurl,
|
2018-11-04 12:57:51 +00:00
|
|
|
'site': settings.site,
|
2019-01-21 16:10:27 +00:00
|
|
|
'menu': settings.menu,
|
2018-11-04 12:57:51 +00:00
|
|
|
'meta': settings.meta,
|
|
|
|
'category': {
|
|
|
|
'name': self.name,
|
2019-02-07 19:27:15 +00:00
|
|
|
'paginated': self.is_paginated,
|
2018-11-04 12:57:51 +00:00
|
|
|
'url': self.url,
|
2018-11-04 23:27:53 +00:00
|
|
|
'feed': self.feedurl,
|
2018-11-04 12:57:51 +00:00
|
|
|
'title': self.title,
|
2019-01-31 21:23:16 +00:00
|
|
|
'year': year,
|
|
|
|
'years': self.years,
|
2018-11-04 12:57:51 +00:00
|
|
|
},
|
2019-01-31 21:23:16 +00:00
|
|
|
'posts': posts,
|
2018-07-20 16:45:42 +01:00
|
|
|
}
|
2017-10-29 19:11:01 +00:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
def indexfpath(self, subpath=None, fname=HTMLFILE):
|
2018-11-04 23:27:53 +00:00
|
|
|
if subpath:
|
|
|
|
return os.path.join(
|
|
|
|
self.dpath,
|
|
|
|
subpath,
|
2019-02-25 22:40:01 +00:00
|
|
|
fname
|
2018-11-04 23:27:53 +00:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
return os.path.join(
|
|
|
|
self.dpath,
|
2019-02-25 22:40:01 +00:00
|
|
|
fname
|
2018-11-04 23:27:53 +00:00
|
|
|
)
|
2018-10-10 22:36:48 +01:00
|
|
|
|
|
|
|
async def render_feed(self, xmlformat):
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info(
|
2018-10-10 22:36:48 +01:00
|
|
|
'rendering category "%s" %s feed',
|
|
|
|
self.name,
|
|
|
|
xmlformat
|
|
|
|
)
|
2018-07-20 16:45:42 +01:00
|
|
|
start = 0
|
2019-02-07 19:27:15 +00:00
|
|
|
end = int(settings.pagination)
|
2017-11-10 15:56:45 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
fg = FeedGenerator()
|
2018-11-04 23:27:53 +00:00
|
|
|
fg.id(self.feedurl)
|
2018-07-20 16:45:42 +01:00
|
|
|
fg.title(self.title)
|
|
|
|
fg.author({
|
2019-02-08 23:32:52 +00:00
|
|
|
'name': settings.author.name,
|
|
|
|
'email': settings.author.email
|
2018-07-20 16:45:42 +01:00
|
|
|
})
|
|
|
|
fg.logo('%s/favicon.png' % settings.site.get('url'))
|
|
|
|
fg.updated(arrow.get(self.mtime).to('utc').datetime)
|
2019-02-07 19:27:15 +00:00
|
|
|
fg.description(settings.site.get('headline'))
|
2017-10-27 10:29:33 +01:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
for k in reversed(self.sortedkeys[start:end]):
|
|
|
|
post = self[k]
|
2018-07-20 16:45:42 +01:00
|
|
|
fe = fg.add_entry()
|
2018-10-10 22:36:48 +01:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
fe.id(post.url)
|
|
|
|
fe.title(post.title)
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
fe.author({
|
2019-02-08 23:32:52 +00:00
|
|
|
'name': settings.author.name,
|
|
|
|
'email': settings.author.email
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
})
|
2018-08-08 09:42:42 +01:00
|
|
|
fe.category({
|
2019-02-07 19:27:15 +00:00
|
|
|
'term': post.category,
|
|
|
|
'label': post.category,
|
2019-02-25 22:40:01 +00:00
|
|
|
'scheme': "%s/%s/%s/" % (
|
2018-08-08 09:42:42 +01:00
|
|
|
settings.site.get('url'),
|
2019-02-25 22:40:01 +00:00
|
|
|
CATEGORY,
|
2019-02-07 19:27:15 +00:00
|
|
|
post.category
|
2018-08-08 09:42:42 +01:00
|
|
|
)
|
|
|
|
})
|
2018-10-10 22:36:48 +01:00
|
|
|
|
2019-02-07 19:27:15 +00:00
|
|
|
fe.published(post.published.datetime)
|
|
|
|
fe.updated(arrow.get(post.dt).datetime)
|
2018-10-10 22:36:48 +01:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
fe.rights('%s %s %s' % (
|
2019-02-07 19:27:15 +00:00
|
|
|
post.licence.upper(),
|
2019-02-08 23:32:52 +00:00
|
|
|
settings.author.name,
|
2019-02-07 19:27:15 +00:00
|
|
|
post.published.format('YYYY')
|
2018-07-20 16:45:42 +01:00
|
|
|
))
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
|
2018-10-10 22:36:48 +01:00
|
|
|
if xmlformat == 'rss':
|
2019-02-07 19:27:15 +00:00
|
|
|
fe.link(href=post.url)
|
|
|
|
fe.content(post.html_content, type='CDATA')
|
|
|
|
if post.is_photo:
|
2018-10-10 22:36:48 +01:00
|
|
|
fe.enclosure(
|
2019-02-07 19:27:15 +00:00
|
|
|
post.photo.href,
|
|
|
|
"%d" % post.photo.mime_size,
|
|
|
|
post.photo.mime_type,
|
2018-10-10 22:36:48 +01:00
|
|
|
)
|
|
|
|
elif xmlformat == 'atom':
|
2018-11-10 20:49:13 +00:00
|
|
|
fe.link(
|
2019-02-07 19:27:15 +00:00
|
|
|
href=post.url,
|
2018-11-10 20:49:13 +00:00
|
|
|
rel='alternate',
|
2019-02-07 19:27:15 +00:00
|
|
|
type='text/html'
|
|
|
|
)
|
|
|
|
fe.content(src=post.url, type='text/html')
|
|
|
|
fe.summary(post.summary)
|
2018-10-10 22:36:48 +01:00
|
|
|
|
|
|
|
if xmlformat == 'rss':
|
2018-11-04 23:27:53 +00:00
|
|
|
fg.link(href=self.feedurl)
|
2018-11-10 20:49:13 +00:00
|
|
|
writepath(self.rssfeedfpath, fg.rss_str(pretty=True))
|
2018-10-10 22:36:48 +01:00
|
|
|
elif xmlformat == 'atom':
|
2018-11-04 23:27:53 +00:00
|
|
|
fg.link(href=self.feedurl, rel='self')
|
2018-11-03 09:48:37 +00:00
|
|
|
fg.link(href=settings.meta.get('hub'), rel='hub')
|
2018-11-10 20:49:13 +00:00
|
|
|
writepath(self.atomfeedfpath, fg.atom_str(pretty=True))
|
2017-10-28 19:08:40 +01:00
|
|
|
|
2019-03-22 15:49:24 +00:00
|
|
|
async def render_json(self):
|
|
|
|
logger.info(
|
|
|
|
'rendering category "%s" JSON feed',
|
|
|
|
self.name,
|
|
|
|
)
|
|
|
|
start = 0
|
|
|
|
end = int(settings.pagination)
|
|
|
|
|
|
|
|
js = {
|
|
|
|
"version": "https://jsonfeed.org/version/1",
|
|
|
|
"title": self.title,
|
|
|
|
"home_page_url": settings.site.url,
|
|
|
|
"feed_url": "%s%s" % (self.url, JSONFEEDFILE),
|
|
|
|
"author": {
|
|
|
|
"name": settings.author.name,
|
|
|
|
"url": settings.author.url,
|
|
|
|
"avatar": settings.author.image,
|
|
|
|
},
|
|
|
|
"items": []
|
|
|
|
}
|
|
|
|
|
|
|
|
for k in reversed(self.sortedkeys[start:end]):
|
|
|
|
post = self[k]
|
|
|
|
pjs = {
|
|
|
|
"id": post.url,
|
|
|
|
"content_text": post.txt_content,
|
|
|
|
"content_html": post.html_content,
|
|
|
|
"url": post.url,
|
|
|
|
"date_published": str(post.published),
|
|
|
|
}
|
|
|
|
if len(post.summary):
|
|
|
|
pjs.update({"summary": post.txt_summary})
|
|
|
|
if post.is_photo:
|
|
|
|
pjs.update({"attachment": {
|
|
|
|
"url": post.photo.href,
|
|
|
|
"mime_type": post.photo.mime_type,
|
|
|
|
"size_in_bytes": "%d" % post.photo.mime_size
|
|
|
|
}})
|
|
|
|
js["items"].append(pjs)
|
|
|
|
writepath(
|
|
|
|
self.jsonfeedfpath,
|
|
|
|
json.dumps(js, indent=4, ensure_ascii=False)
|
|
|
|
)
|
|
|
|
|
2018-11-04 12:57:51 +00:00
|
|
|
async def render_flat(self):
|
|
|
|
r = J2.get_template(self.template).render(
|
2018-11-04 23:27:53 +00:00
|
|
|
self.tmplvars(self.get_posts())
|
2018-11-04 12:57:51 +00:00
|
|
|
)
|
2018-11-04 23:27:53 +00:00
|
|
|
writepath(self.indexfpath(), r)
|
2017-05-23 11:14:47 +01:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
async def render_gopher(self):
|
|
|
|
lines = [
|
|
|
|
'%s - %s' % (self.name, settings.site.name),
|
|
|
|
'',
|
|
|
|
''
|
|
|
|
]
|
|
|
|
for post in self.get_posts():
|
|
|
|
line = "0%s\t/%s/%s\t%s\t70" % (
|
|
|
|
post.headline,
|
|
|
|
post.name,
|
|
|
|
TXTFILE,
|
|
|
|
settings.site.name
|
|
|
|
)
|
|
|
|
lines.append(line)
|
2019-03-22 15:49:24 +00:00
|
|
|
#lines.append(post.datePublished)
|
|
|
|
if (len(post.description)):
|
|
|
|
lines.extend(str(PandocHTML2TXT(post.description)).split("\n"))
|
2019-02-25 22:40:01 +00:00
|
|
|
if isinstance(post['image'], list):
|
|
|
|
for img in post['image']:
|
|
|
|
line = "I%s\t/%s/%s\t%s\t70" % (
|
|
|
|
img.headline,
|
|
|
|
post.name,
|
|
|
|
img.name,
|
|
|
|
settings.site.name
|
|
|
|
)
|
|
|
|
lines.append(line)
|
|
|
|
lines.append('')
|
|
|
|
writepath(self.indexfpath(fname=GOPHERFILE), "\r\n".join(lines))
|
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
async def render_archives(self):
|
2019-01-31 21:23:16 +00:00
|
|
|
for year in self.years.keys():
|
|
|
|
if year == self.newest_year:
|
2018-11-04 23:27:53 +00:00
|
|
|
fpath = self.indexfpath()
|
2019-02-16 00:14:12 +00:00
|
|
|
tyear = None
|
2018-11-04 12:57:51 +00:00
|
|
|
else:
|
2019-01-31 21:23:16 +00:00
|
|
|
fpath = self.indexfpath("%d" % (year))
|
2019-02-16 00:14:12 +00:00
|
|
|
tyear = year
|
2019-01-31 21:23:16 +00:00
|
|
|
y = arrow.get("%d" % year, self.trange).to('utc')
|
|
|
|
tsmin = y.floor('year').timestamp
|
|
|
|
tsmax = y.ceil('year').timestamp
|
|
|
|
start = len(self.sortedkeys)
|
|
|
|
end = 0
|
|
|
|
|
|
|
|
for index, value in enumerate(self.sortedkeys):
|
|
|
|
if value <= tsmax and index < start:
|
|
|
|
start = index
|
|
|
|
if value >= tsmin and index > end:
|
|
|
|
end = index
|
|
|
|
|
|
|
|
if self.is_uptodate(fpath, self[self.sortedkeys[start]].dt):
|
2019-02-25 22:40:01 +00:00
|
|
|
logger.info("%s / %d is up to date", self.name, year)
|
2018-11-04 23:27:53 +00:00
|
|
|
else:
|
2019-01-31 21:23:16 +00:00
|
|
|
logger.info("updating %s / %d", self.name, year)
|
|
|
|
logger.info("getting posts from %d to %d", start, end)
|
2018-11-04 23:27:53 +00:00
|
|
|
r = J2.get_template(self.template).render(
|
|
|
|
self.tmplvars(
|
2019-01-31 21:23:16 +00:00
|
|
|
# I don't know why end needs the +1, but without that
|
|
|
|
# some posts disappear
|
|
|
|
# TODO figure this out...
|
2019-02-25 22:40:01 +00:00
|
|
|
self.get_posts(start, end + 1),
|
2019-02-16 00:14:12 +00:00
|
|
|
tyear
|
2018-11-04 23:27:53 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
writepath(fpath, r)
|
2018-07-22 14:52:32 +01:00
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
async def render_feeds(self):
|
|
|
|
if not self.is_uptodate(self.rssfeedfpath, self.newest()):
|
2018-11-10 20:49:13 +00:00
|
|
|
logger.info(
|
|
|
|
'%s RSS feed outdated, generating new',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
await self.render_feed('rss')
|
|
|
|
else:
|
|
|
|
logger.info(
|
|
|
|
'%s RSS feed up to date',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
if not self.is_uptodate(self.atomfeedfpath, self.newest()):
|
2018-11-10 20:49:13 +00:00
|
|
|
logger.info(
|
|
|
|
'%s ATOM feed outdated, generating new',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
await self.render_feed('atom')
|
|
|
|
else:
|
|
|
|
logger.info(
|
|
|
|
'%s ATOM feed up to date',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
|
2019-03-22 15:49:24 +00:00
|
|
|
if not self.is_uptodate(self.jsonfeedfpath, self.newest()):
|
|
|
|
logger.info(
|
|
|
|
'%s JSON feed outdated, generating new',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
await self.render_json()
|
|
|
|
else:
|
|
|
|
logger.info(
|
|
|
|
'%s JSON feed up to date',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
async def render(self):
|
|
|
|
await self.render_feeds()
|
2019-02-25 22:40:01 +00:00
|
|
|
if not self.is_uptodate(self.indexfpath(), self.newest()):
|
|
|
|
await self.render_gopher()
|
2019-02-07 19:27:15 +00:00
|
|
|
if not self.is_paginated:
|
2019-01-15 21:28:58 +00:00
|
|
|
if not self.is_uptodate(self.indexfpath(), self.newest()):
|
2018-11-10 20:49:13 +00:00
|
|
|
logger.info(
|
|
|
|
'%s flat index outdated, generating new',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
await self.render_flat()
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
else:
|
|
|
|
logger.info(
|
|
|
|
'%s flat index is up to date',
|
|
|
|
self.name
|
|
|
|
)
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
await self.render_archives()
|
|
|
|
|
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2018-07-23 11:04:48 +01:00
|
|
|
class Sitemap(dict):
|
|
|
|
@property
|
|
|
|
def mtime(self):
|
|
|
|
r = 0
|
|
|
|
if os.path.exists(self.renderfile):
|
2019-02-16 00:14:12 +00:00
|
|
|
r = mtime(self.renderfile)
|
2018-07-23 11:04:48 +01:00
|
|
|
return r
|
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
def append(self, post):
|
|
|
|
self[post.url] = post.mtime
|
|
|
|
|
2018-07-23 11:04:48 +01:00
|
|
|
@property
|
|
|
|
def renderfile(self):
|
|
|
|
return os.path.join(settings.paths.get('build'), 'sitemap.txt')
|
|
|
|
|
Back To Pandoc
So, Python Markdown is a bottomless pit of horrors, including crippling parsing bugs,
random out of nowhere, lack of features. It's definitely much faster, than
Pandoc, but Pandoc doesn't go full retard where there's a regex in a fenced code block,
that happens to be a regex for markdown elements.
Also added some ugly post string replacements to make Pandoc fenced code output work
with Prism:
instead of the Pandoc <pre class="codelang"><code>, Prism wants
<pre><code class="language-codelang>, so I added a regex sub, because it's 00:32.
2018-08-04 00:28:55 +01:00
|
|
|
async def render(self):
|
2018-07-23 11:04:48 +01:00
|
|
|
if self.mtime >= sorted(self.values())[-1]:
|
|
|
|
return
|
|
|
|
with open(self.renderfile, 'wt') as f:
|
|
|
|
f.write("\n".join(sorted(self.keys())))
|
2018-07-22 08:48:47 +01:00
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
class WebmentionIO(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.params = {
|
|
|
|
'token': '%s' % (keys.webmentionio.get('token')),
|
2019-02-07 19:27:15 +00:00
|
|
|
'since': '%s' % str(self.since),
|
2018-12-27 19:48:06 +00:00
|
|
|
'domain': '%s' % (keys.webmentionio.get('domain'))
|
|
|
|
}
|
|
|
|
self.url = 'https://webmention.io/api/mentions'
|
2018-08-08 09:42:42 +01:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
@property
|
|
|
|
def since(self):
|
|
|
|
newest = 0
|
|
|
|
content = settings.paths.get('content')
|
|
|
|
for e in glob.glob(os.path.join(content, '*', '*', '*.md')):
|
2019-02-25 22:40:01 +00:00
|
|
|
if os.path.basename(e) == MDFILE:
|
2018-12-27 19:48:06 +00:00
|
|
|
continue
|
|
|
|
# filenames are like [received epoch]-[slugified source url].md
|
|
|
|
try:
|
|
|
|
mtime = int(os.path.basename(e).split('-')[0])
|
|
|
|
except Exception as exc:
|
|
|
|
logger.error(
|
|
|
|
'int conversation failed: %s, file was: %s',
|
|
|
|
exc,
|
|
|
|
e
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
if mtime > newest:
|
|
|
|
newest = mtime
|
2019-02-25 22:40:01 +00:00
|
|
|
return arrow.get(newest + 1)
|
2018-12-27 19:48:06 +00:00
|
|
|
|
|
|
|
def makecomment(self, webmention):
|
|
|
|
if 'published_ts' in webmention.get('data'):
|
|
|
|
maybe = webmention.get('data').get('published')
|
|
|
|
if not maybe or maybe == 'None':
|
|
|
|
dt = arrow.get(webmention.get('verified_date'))
|
|
|
|
else:
|
|
|
|
dt = arrow.get(webmention.get('data').get('published'))
|
2018-08-08 09:42:42 +01:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
slug = os.path.split(urlparse(webmention.get('target')).path.lstrip('/'))[0]
|
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
# ignore selfpings
|
2019-02-07 19:27:15 +00:00
|
|
|
if slug == settings.site.get('name'):
|
2018-12-27 19:48:06 +00:00
|
|
|
return
|
2018-07-22 17:59:26 +01:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
fdir = glob.glob(
|
|
|
|
os.path.join(
|
|
|
|
settings.paths.get('content'),
|
|
|
|
'*',
|
|
|
|
slug
|
|
|
|
)
|
|
|
|
)
|
2018-12-27 19:48:06 +00:00
|
|
|
if not len(fdir):
|
|
|
|
logger.error(
|
|
|
|
"couldn't find post for incoming webmention: %s",
|
|
|
|
webmention
|
|
|
|
)
|
|
|
|
return
|
|
|
|
elif len(fdir) > 1:
|
|
|
|
logger.error(
|
|
|
|
"multiple posts found for incoming webmention: %s",
|
|
|
|
webmention
|
|
|
|
)
|
|
|
|
return
|
2018-07-22 17:59:26 +01:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
fdir = fdir.pop()
|
|
|
|
fpath = os.path.join(
|
|
|
|
fdir,
|
|
|
|
"%d-%s.md" % (
|
|
|
|
dt.timestamp,
|
|
|
|
url2slug(webmention.get('source'))
|
|
|
|
)
|
|
|
|
)
|
2018-11-10 20:49:13 +00:00
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
author = webmention.get('data', {}).get('author', None)
|
|
|
|
if not author:
|
|
|
|
logger.error('missing author info on webmention; skipping')
|
|
|
|
return
|
2019-01-05 11:55:40 +00:00
|
|
|
meta = {
|
|
|
|
'author': {
|
2019-01-15 21:28:58 +00:00
|
|
|
'name': author.get('name', ''),
|
|
|
|
'url': author.get('url', ''),
|
|
|
|
'photo': author.get('photo', '')
|
2019-01-05 11:55:40 +00:00
|
|
|
},
|
2019-02-07 19:27:15 +00:00
|
|
|
'date': str(dt),
|
2018-12-27 19:48:06 +00:00
|
|
|
'source': webmention.get('source'),
|
|
|
|
'target': webmention.get('target'),
|
|
|
|
'type': webmention.get('activity').get('type', 'webmention')
|
2018-11-10 20:49:13 +00:00
|
|
|
}
|
2019-01-05 11:55:40 +00:00
|
|
|
|
|
|
|
r = "---\n%s\n---\n\n%s\n" % (
|
|
|
|
utfyamldump(meta),
|
|
|
|
webmention.get('data').get('content', '').strip()
|
|
|
|
)
|
|
|
|
writepath(fpath, r)
|
2018-11-10 20:49:13 +00:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
def run(self):
|
|
|
|
webmentions = requests.get(self.url, params=self.params)
|
|
|
|
logger.info("queried webmention.io with: %s", webmentions.url)
|
|
|
|
if webmentions.status_code != requests.codes.ok:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
mentions = webmentions.json()
|
|
|
|
for webmention in mentions.get('links'):
|
|
|
|
self.makecomment(webmention)
|
|
|
|
except ValueError as e:
|
|
|
|
logger.error('failed to query webmention.io: %s', e)
|
|
|
|
pass
|
2018-08-15 11:02:59 +01:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
|
2019-03-22 15:49:24 +00:00
|
|
|
# class GranaryIO(dict):
|
|
|
|
# granary = 'https://granary.io/url'
|
|
|
|
# convert_to = ['as2', 'mf2-json', 'jsonfeed']
|
|
|
|
|
|
|
|
# def __init__(self, source):
|
|
|
|
# self.source = source
|
|
|
|
|
|
|
|
# def run(self):
|
|
|
|
# for c in self.convert_to:
|
|
|
|
# p = {
|
|
|
|
# 'url': self.source,
|
|
|
|
# 'input': html,
|
|
|
|
# 'output': c
|
|
|
|
# }
|
|
|
|
# r = requests.get(self.granary, params=p)
|
|
|
|
# logger.info("queried granary.io for %s for url: %s", c, self.source)
|
|
|
|
# if r.status_code != requests.codes.ok:
|
|
|
|
# continue
|
|
|
|
# try:
|
|
|
|
# self[c] = webmentions.text
|
|
|
|
# except ValueError as e:
|
|
|
|
# logger.error('failed to query granary.io: %s', e)
|
|
|
|
# pass
|
|
|
|
|
|
|
|
|
2018-07-23 11:04:48 +01:00
|
|
|
def make():
|
|
|
|
start = int(round(time.time() * 1000))
|
|
|
|
last = 0
|
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
# this needs to be before collecting the 'content' itself
|
2018-12-01 10:43:13 +00:00
|
|
|
if not settings.args.get('nosync'):
|
2018-12-27 19:48:06 +00:00
|
|
|
incoming = WebmentionIO()
|
|
|
|
incoming.run()
|
|
|
|
|
|
|
|
queue = AQ()
|
|
|
|
send = []
|
2018-11-10 20:49:13 +00:00
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
content = settings.paths.get('content')
|
2018-08-15 11:02:59 +01:00
|
|
|
rules = IndexPHP()
|
2018-08-08 09:42:42 +01:00
|
|
|
|
2018-11-10 20:49:13 +00:00
|
|
|
micropub = MicropubPHP()
|
2018-12-27 19:48:06 +00:00
|
|
|
queue.put(micropub.render())
|
2018-11-10 20:49:13 +00:00
|
|
|
|
2018-08-08 09:42:42 +01:00
|
|
|
webhook = WebhookPHP()
|
2018-12-27 19:48:06 +00:00
|
|
|
queue.put(webhook.render())
|
2018-08-08 09:42:42 +01:00
|
|
|
|
2018-07-23 11:04:48 +01:00
|
|
|
sitemap = Sitemap()
|
|
|
|
search = Search()
|
2018-07-20 16:45:42 +01:00
|
|
|
categories = {}
|
2018-12-27 19:48:06 +00:00
|
|
|
frontposts = Category()
|
2019-01-15 21:28:58 +00:00
|
|
|
home = Home(settings.paths.get('home'))
|
2018-07-22 08:48:47 +01:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
for e in sorted(glob.glob(os.path.join(content, '*', '*', MDFILE))):
|
2018-07-20 16:45:42 +01:00
|
|
|
post = Singular(e)
|
2018-12-27 19:48:06 +00:00
|
|
|
# deal with images, if needed
|
2018-07-20 16:45:42 +01:00
|
|
|
for i in post.images.values():
|
2018-12-27 19:48:06 +00:00
|
|
|
queue.put(i.downsize())
|
2018-08-15 11:02:59 +01:00
|
|
|
for i in post.to_ping:
|
2018-12-27 19:48:06 +00:00
|
|
|
send.append(i)
|
|
|
|
|
|
|
|
# render and arbitrary file copy tasks for this very post
|
|
|
|
queue.put(post.render())
|
|
|
|
queue.put(post.copyfiles())
|
2018-08-15 11:02:59 +01:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
# skip draft posts from anything further
|
2018-07-25 13:24:31 +01:00
|
|
|
if post.is_future:
|
2019-01-15 21:28:58 +00:00
|
|
|
logger.info('%s is for the future', post.name)
|
2018-07-25 13:24:31 +01:00
|
|
|
continue
|
2018-12-27 19:48:06 +00:00
|
|
|
|
|
|
|
# add post to search database
|
|
|
|
search.append(post)
|
|
|
|
|
|
|
|
# start populating sitemap
|
|
|
|
sitemap.append(post)
|
|
|
|
|
|
|
|
# populate redirects, if any
|
2018-11-03 09:48:37 +00:00
|
|
|
rules.add_redirect(post.shortslug, post.url)
|
2018-12-27 19:48:06 +00:00
|
|
|
|
|
|
|
# any category starting with '_' are special: they shouldn't have a
|
|
|
|
# category archive page
|
|
|
|
if post.is_page:
|
2018-08-08 09:42:42 +01:00
|
|
|
continue
|
2018-12-27 19:48:06 +00:00
|
|
|
|
|
|
|
# populate the category with the post
|
2018-08-08 09:42:42 +01:00
|
|
|
if post.category not in categories:
|
|
|
|
categories[post.category] = Category(post.category)
|
|
|
|
categories[post.category][post.published.timestamp] = post
|
2018-12-27 19:48:06 +00:00
|
|
|
|
|
|
|
# add to front, if allowed
|
2018-08-08 09:42:42 +01:00
|
|
|
if post.is_front:
|
2018-12-27 19:48:06 +00:00
|
|
|
frontposts[post.published.timestamp] = post
|
2018-07-20 16:45:42 +01:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
# commit to search database - this saves quite a few disk writes
|
2018-07-22 11:33:59 +01:00
|
|
|
search.__exit__()
|
2018-11-03 09:48:37 +00:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
# render search and sitemap
|
|
|
|
queue.put(search.render())
|
|
|
|
queue.put(sitemap.render())
|
|
|
|
|
|
|
|
# make gone and redirect arrays for PHP
|
2018-11-10 20:49:13 +00:00
|
|
|
for e in glob.glob(os.path.join(content, '*', '*.del')):
|
2018-11-03 09:48:37 +00:00
|
|
|
post = Gone(e)
|
|
|
|
rules.add_gone(post.source)
|
|
|
|
for e in glob.glob(os.path.join(content, '*', '*.url')):
|
|
|
|
post = Redirect(e)
|
|
|
|
rules.add_redirect(post.source, post.target)
|
2018-12-27 19:48:06 +00:00
|
|
|
# render 404 fallback PHP
|
|
|
|
queue.put(rules.render())
|
2018-11-03 09:48:37 +00:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
# render categories
|
2018-07-20 16:45:42 +01:00
|
|
|
for category in categories.values():
|
2019-01-15 21:28:58 +00:00
|
|
|
home.add(category, category.get(category.sortedkeys[0]))
|
2018-12-27 19:48:06 +00:00
|
|
|
queue.put(category.render())
|
|
|
|
|
2019-01-15 21:28:58 +00:00
|
|
|
queue.put(frontposts.render_feeds())
|
|
|
|
queue.put(home.render())
|
2018-12-27 19:48:06 +00:00
|
|
|
# actually run all the render & copy tasks
|
|
|
|
queue.run()
|
|
|
|
|
|
|
|
# copy static files
|
|
|
|
for e in glob.glob(os.path.join(content, '*.*')):
|
2019-01-15 21:28:58 +00:00
|
|
|
if e.endswith('.md'):
|
|
|
|
continue
|
2019-02-25 22:40:01 +00:00
|
|
|
t = os.path.join(settings.paths.get('build'), os.path.basename(e))
|
2019-02-16 00:14:12 +00:00
|
|
|
if os.path.exists(t) and mtime(e) <= mtime(t):
|
2018-07-20 16:45:42 +01:00
|
|
|
continue
|
|
|
|
cp(e, t)
|
2018-03-21 15:42:36 +00:00
|
|
|
|
2019-02-25 22:40:01 +00:00
|
|
|
# ...
|
|
|
|
#for url in settings.site.sameAs:
|
|
|
|
#if "dat://" in url:
|
|
|
|
#p = os.path.join(settings.paths.build, '.well-known', 'dat')
|
|
|
|
#if not os.path.exists(p):
|
|
|
|
#writepath(p, "%s\nTTL=3600" % (url))
|
|
|
|
|
2018-07-20 16:45:42 +01:00
|
|
|
end = int(round(time.time() * 1000))
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info('process took %d ms' % (end - start))
|
2018-09-04 21:58:25 +01:00
|
|
|
|
|
|
|
if not settings.args.get('nosync'):
|
2018-12-27 19:48:06 +00:00
|
|
|
# upload site
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info('starting syncing')
|
2018-09-04 21:58:25 +01:00
|
|
|
os.system(
|
|
|
|
"rsync -avuhH --delete-after %s/ %s/" % (
|
|
|
|
settings.paths.get('build'),
|
2018-11-10 20:49:13 +00:00
|
|
|
'%s/%s' % (settings.syncserver,
|
|
|
|
settings.paths.get('remotewww'))
|
2018-09-04 21:58:25 +01:00
|
|
|
)
|
2018-08-15 11:02:59 +01:00
|
|
|
)
|
2018-11-04 23:27:53 +00:00
|
|
|
logger.info('syncing finished')
|
2018-09-04 21:58:25 +01:00
|
|
|
|
2018-12-27 19:48:06 +00:00
|
|
|
if not settings.args.get('nosync'):
|
2018-11-10 20:49:13 +00:00
|
|
|
logger.info('sending webmentions')
|
2018-12-27 19:48:06 +00:00
|
|
|
for wm in send:
|
|
|
|
queue.put(wm.send())
|
|
|
|
queue.run()
|
2018-11-10 20:49:13 +00:00
|
|
|
logger.info('sending webmentions finished')
|
2018-07-22 14:52:32 +01:00
|
|
|
|
2018-09-04 21:58:25 +01:00
|
|
|
|
2017-05-23 11:13:35 +01:00
|
|
|
if __name__ == '__main__':
|
2018-07-20 16:45:42 +01:00
|
|
|
make()
|