- added photo layout with CSS flexbox

- removed webhook - the functionality now lives in zapier
- replaced the flickr logo
- arrow is temporarily locked to 0.14.2 because of the ultra annoying warning message for upcoming 0.15
This commit is contained in:
Peter Molnar 2019-08-23 09:06:26 +01:00
parent 35679e8fd8
commit 0cd5925620
13 changed files with 171 additions and 274 deletions

1
.gitignore vendored
View file

@ -5,3 +5,4 @@ keys.py
lib
gcloud.json
tests/.Exif.tests.jpg.json
post-run.sh

View file

@ -5,7 +5,6 @@ name = "pypi"
[packages]
wand = "*"
arrow = "*"
unicode-slugify = "*"
requests = "*"
python-frontmatter = "*"
@ -13,7 +12,7 @@ langdetect = "*"
jinja2 = "*"
feedgen = "*"
filetype = "*"
gumbo = "*"
arrow = "==0.14.2"
[dev-packages]

View file

@ -1,5 +1,6 @@
<!-- Generated by IcoMoon.io -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16">
<title>flickr</title>
<path fill="#0063dc" d="M0 8c0 2.049 1.663 3.709 3.71 3.709 2.050 0 3.713-1.66 3.713-3.709s-1.662-3.709-3.713-3.709c-2.047 0-3.71 1.66-3.71 3.709zM8.577 8c0 2.049 1.662 3.709 3.711 3.709 2.042 0 3.711-1.66 3.711-3.709s-1.661-3.709-3.709-3.709c-2.050 0-3.713 1.66-3.713 3.709z"></path>
<?xml version="1.0" encoding="UTF-8"?>
<svg version="1.1" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg">
<title>flickr</title>
<circle cx="3.75" cy="8.25" r="3.75" fill="#0063dc" style="paint-order:markers fill stroke"/>
<circle cx="12.25" cy="8.25" r="3.75" fill="#ff0084" style="paint-order:markers fill stroke"/>
</svg>

Before

Width:  |  Height:  |  Size: 445 B

After

Width:  |  Height:  |  Size: 344 B

152
nasg.py
View file

@ -807,7 +807,6 @@ class Singular(MarkdownDoc):
self.dirpath = os.path.dirname(fpath)
self.name = os.path.basename(self.dirpath)
self.category = os.path.basename(os.path.dirname(self.dirpath))
self.pointers = []
@cached_property
def files(self):
@ -1236,34 +1235,32 @@ class Singular(MarkdownDoc):
logger.info("copying '%s' to '%s'", f, t)
cp(f, t)
async def save_to_archiveorg(self):
requests.get(f"http://web.archive.org/save/{self.url}")
@property
def has_archive(self):
return len(
glob.glob(os.path.join(self.dirpath, f"*archiveorg*.copy"))
)
async def get_from_archiveorg(self):
done = glob.glob(
os.path.join(self.dirpath, f"*archiveorg*.copy")
)
if done:
logger.debug(
"archive.org .copy exists for %s at %s",
self.name,
done[0],
)
if self.has_archive:
return
logger.info("trying to get archive.org .copy for %s", self.name)
if len(self.category):
wb = wayback.FindWaybackURL(
self.name, self.category, self.pointers
)
if self.is_future:
return
logger.info("archive.org .copy is missing for %s", self.name)
if len(self.category) and not (
settings.args.get("noservices")
or settings.args.get("offline")
):
wb = wayback.FindWaybackURL(self.name, self.category)
wb.run()
if len(wb.oldest):
archiveurl = url2slug(wb.oldest)
t = os.path.join(self.dirpath, f"{archiveurl}.copy")
writepath(t, wb.oldest)
del wb
async def render(self):
if settings.args.get("memento"):
await self.get_from_archiveorg()
await self.get_from_archiveorg()
if self.exists:
return True
@ -1541,25 +1538,6 @@ class IndexPHP(PHPFile):
writepath(self.renderfile, r)
class WebhookPHP(PHPFile):
@property
def renderfile(self):
return os.path.join(settings.paths.get("build"), "webhook.php")
@property
def templatefile(self):
return "Webhook.j2.php"
async def _render(self):
r = J2.get_template(self.templatefile).render(
{
"author": settings.author,
"webmentionio": keys.webmentionio,
}
)
writepath(self.renderfile, r)
class Category(dict):
def __init__(self, name=""):
self.name = name
@ -1628,10 +1606,13 @@ class Category(dict):
years.update({year: url})
return years
async def render(self):
async def render_feeds(self):
await self.XMLFeed(self, "rss").render()
await self.XMLFeed(self, "atom").render()
await self.JSONFeed(self).render()
async def render(self):
await self.render_feeds()
await self.Gopher(self).render()
if self.name in settings.flat:
await self.Flat(self).render()
@ -1788,9 +1769,13 @@ class Category(dict):
fg.link(href=self.parent.feedurl, rel="self")
fg.link(href=settings.meta.get("hub"), rel="hub")
for key in list(sorted(self.parent.keys(), reverse=True))[
0 : settings.pagination
]:
rkeys = list(sorted(self.parent.keys(), reverse=True))
rkeys = rkeys[0 : settings.pagination]
rkeys = list(sorted(rkeys, reverse=False))
# for key in list(sorted(self.parent.keys(), reverse=True))[
# 0 : settings.pagination
# ]:
for key in rkeys:
post = self.parent[key]
fe = fg.add_entry()
@ -1825,6 +1810,7 @@ class Category(dict):
if self.feedformat == "rss":
fe.link(href=post.url)
fe.content(post.html_content, type="CDATA")
# fe.description(post.txt_content, isSummary=True)
if post.is_photo:
fe.enclosure(
post.photo.href,
@ -1833,7 +1819,14 @@ class Category(dict):
)
elif self.feedformat == "atom":
fe.link(
href=post.url, rel="alternate", type="text/html"
href=post.url,
rel="alternate",
type="text/html"
)
fe.link(
href=post.photo.href,
rel="enclosure",
type=post.photo.mime_type,
)
fe.content(src=post.url, type="text/html")
fe.summary(post.summary)
@ -2149,22 +2142,24 @@ class Webmention(object):
if not self.exists:
return
with open(self.fpath) as f:
with open(self.fpath, "rt") as f:
txt = f.read()
if "telegraph.p3k.io" not in txt:
return
try:
maybe = json.loads(txt)
if "status" in maybe and "error" == maybe["status"]:
logger.error(
"errored webmention found at %s: %s",
self.dpath,
maybe,
)
return
except Exception as e:
# if it's not a JSON, it's a manually placed file, ignore it
return
if "status" in maybe and "error" == maybe["status"]:
logger.error(
"errored webmention found at %s: %s", self.dpath, maybe
)
# maybe["location"] = maybe[""]
# TODO finish cleanup and re-fetching with from 'original' in JSON
return
try:
if "location" not in maybe:
return
if "http_body" not in maybe:
@ -2314,36 +2309,28 @@ def make():
start = int(round(time.time() * 1000))
last = 0
# this needs to be before collecting the 'content' itself
if not settings.args.get("offline") and not settings.args.get(
"noservices"
if not (
settings.args.get("offline") or settings.args.get("noservices")
):
incoming = WebmentionIO()
incoming.run()
queue = AQ()
send = []
firsttimepublished = []
to_archive = []
content = settings.paths.get("content")
rules = IndexPHP()
webhook = WebhookPHP()
queue.put(webhook.render())
sitemap = Sitemap()
search = Search()
categories = {}
frontposts = Category()
home = Home(settings.paths.get("home"))
reverse_redirects = {}
for e in glob.glob(os.path.join(content, "*", "*.url")):
post = Redirect(e)
rules.add_redirect(post.source, post.target)
if post.target not in reverse_redirects:
reverse_redirects[post.target] = []
reverse_redirects[post.target].append(post.source)
for e in sorted(
glob.glob(
@ -2351,8 +2338,6 @@ def make():
)
):
post = Singular(e)
if post.url in reverse_redirects:
post.pointers = reverse_redirects[post.target]
# deal with images, if needed
for i in post.images.values():
queue.put(i.downsize())
@ -2360,6 +2345,9 @@ def make():
for i in post.to_ping:
send.append(i)
# if not post.is_future and not post.has_archive:
# to_archive.append(post.url)
# render and arbitrary file copy tasks for this very post
queue.put(post.render())
queue.put(post.copy_files())
@ -2368,11 +2356,6 @@ def make():
if post.is_future:
logger.info("%s is for the future", post.name)
continue
# elif not os.path.exists(post.renderfile):
# logger.debug(
# "%s seems to be fist time published", post.name
# )
# firsttimepublished.append(post)
# add post to search database
search.append(post)
@ -2419,9 +2402,8 @@ def make():
home.add(category, category.get(category.sortedkeys[0]))
queue.put(category.render())
# queue.put(frontposts.render_feeds())
queue.put(frontposts.render_feeds())
queue.put(home.render())
# actually run all the render & copy tasks
queue.run()
# copy static files
@ -2431,9 +2413,7 @@ def make():
t = os.path.join(
settings.paths.get("build"), os.path.basename(e)
)
if os.path.exists(t) and mtime(e) <= mtime(t):
continue
cp(e, t)
maybe_copy(e, t)
end = int(round(time.time() * 1000))
logger.info("process took %d ms" % (end - start))
@ -2457,18 +2437,12 @@ def make():
except Exception as e:
logger.error("syncing failed: %s", e)
if not settings.args.get("offline") and not settings.args.get(
"noservices"
):
logger.info("sending webmentions")
for wm in send:
queue.put(wm.send())
queue.run()
logger.info("sending webmentions finished")
# for post in firsttimepublished:
# queue.put(post.save_to_archiveorg())
queue.run()
if not settings.args.get("noservices"):
logger.info("sending webmentions")
for wm in send:
queue.put(wm.send())
queue.run()
logger.info("sending webmentions finished")
if __name__ == "__main__":

View file

@ -16,6 +16,7 @@ class nameddict(dict):
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
base = os.path.abspath(os.path.expanduser("~/Projects/petermolnar.net"))
syncserver = "liveserver:/web/petermolnar.net"
@ -26,7 +27,11 @@ displaydate = "YYYY-MM-DD HH:mm"
mementostartime = 1560992400
licence = nameddict(
{"article": "CC-BY-4.0", "journal": "CC-BY-NC-4.0", "_default": "CC-BY-NC-ND-4.0"}
{
"article": "CC-BY-4.0",
"journal": "CC-BY-NC-4.0",
"_default": "CC-BY-NC-ND-4.0",
}
)
author = nameddict(
@ -48,8 +53,11 @@ site = nameddict(
"url": "https://petermolnar.net",
"name": "petermolnar.net",
"image": "https://petermolnar.net/favicon.ico",
"license": "https://spdx.org/licenses/%s.html" % (licence["_default"]),
"sameAs": [],
"license": "https://spdx.org/licenses/%s.html"
% (licence["_default"]),
"sameAs": [
"https://t.me/petermolnarnet"
],
"author": {
"@context": "http://schema.org",
"@type": "Person",
@ -117,10 +125,22 @@ site = nameddict(
menu = nameddict(
{
"home": {"url": "%s/" % site["url"], "text": "home"},
"photo": {"url": "%s/category/photo/" % site["url"], "text": "photos"},
"journal": {"url": "%s/category/journal/" % site["url"], "text": "journal"},
"article": {"url": "%s/category/article/" % site["url"], "text": "IT"},
"note": {"url": "%s/category/note/" % site["url"], "text": "notes"},
"photo": {
"url": "%s/category/photo/" % site["url"],
"text": "photos",
},
"journal": {
"url": "%s/category/journal/" % site["url"],
"text": "journal",
},
"article": {
"url": "%s/category/article/" % site["url"],
"text": "IT",
},
"note": {
"url": "%s/category/note/" % site["url"],
"text": "notes",
},
}
)
@ -140,7 +160,9 @@ paths = nameddict(
{
"content": os.path.join(base, "content"),
"tmpl": os.path.join(base, "nasg", "templates"),
"watermark": os.path.join(base, "nasg", "templates", "watermark.png"),
"watermark": os.path.join(
base, "nasg", "templates", "watermark.png"
),
"build": os.path.join(base, "www"),
"queue": os.path.join(base, "queue"),
"remotewww": "web",
@ -206,32 +228,23 @@ gones = [
]
formerdomains = [
"cadeyrn.webporfolio.hu",
"blog.petermolnar.eu",
"petermolnar.eu",
# "cadeyrn.webporfolio.hu",
# "blog.petermolnar.eu",
# "petermolnar.eu",
]
formercategories = {
"article": [
"linux-tech-coding",
"diy-do-it-yourself",
"sysadmin-blog",
"sysadmin",
"szubjektiv-technika",
"wordpress"
],
"note": [
"blips",
"blog",
"r"
],
"journal": [
"blog",
],
"photo": [
"photoblog",
"fotography",
]
# "article": [
# "linux-tech-coding",
# "diy-do-it-yourself",
# "sysadmin-blog",
# "sysadmin",
# "szubjektiv-technika",
# "wordpress",
# ],
# "note": ["blips", "blog", "r"],
# "journal": ["blog"],
# "photo": ["photoblog", "fotography"],
}
@ -252,11 +265,12 @@ _booleanparams = {
"offline": "offline mode - no syncing, no querying services, etc.",
"noping": "make dummy webmention entries and don't really send them",
"noservices": "skip querying any service but do sync the website",
"memento": "try to fetch mementos from archive.org",
}
for k, v in _booleanparams.items():
_parser.add_argument("--%s" % (k), action="store_true", default=False, help=v)
_parser.add_argument(
"--%s" % (k), action="store_true", default=False, help=v
)
args = vars(_parser.parse_args())
@ -271,7 +285,9 @@ logger = logging.getLogger("NASG")
logger.setLevel(loglevel)
console_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)

View file

@ -11,7 +11,7 @@
{% endblock %}
{% block content %}
<main id="main" class="h-feed hatom">
<main id="main" class="h-feed hatom {{ category.name }}">
{% set year = [0] %}
{% for post in posts %}

View file

@ -1,114 +0,0 @@
<?php
function _syslog($msg) {
$trace = debug_backtrace();
$caller = $trace[1];
$parent = $caller['function'];
if (isset($caller['class']))
$parent = $caller['class'] . '::' . $parent;
return error_log( "{$parent}: {$msg}" );
}
function unauthorized($text) {
header('HTTP/1.1 401 Unauthorized');
die($text);
}
function badrequest($text) {
header('HTTP/1.1 400 Bad Request');
die($text);
}
function httpok($text) {
header('HTTP/1.1 200 OK');
echo($text);
exit(0);
}
function accepted() {
header('HTTP/1.1 202 Accepted');
#header('Location: {{ site.url }}');
exit(0);
}
if (!empty($_GET)) {
if ( ! isset($_GET['q']) ) {
badrequest('please POST a micropub request');
}
if ( isset($_GET['q']['config']) ) {
httpok(json_encode(array('tags' => array())));
}
if(isset($_GET['q']['syndicate-to'])) {
httpok(json_encode(array('syndicate-to' => array())));
}
badrequest('please POST a micropub request');
}
$raw = file_get_contents("php://input");
print_r($raw);
try {
$decoded = json_decode($raw, true);
} catch (Exception $e) {
_syslog('failed to decode JSON, trying decoding form data');
try {
parse_str($raw, $decoded);
}
catch (Exception $e) {
_syslog('failed to decoding form data as well');
badrequest('invalid POST contents');
}
}
print_r($decoded);
$token = '';
if ( isset($decoded['access_token']) ) {
$token = $decoded['access_token'];
unset($decoded['access_token']);
}
elseif ( isset($_SERVER['HTTP_AUTHORIZATION']) ) {
$token = trim(str_replace('Bearer', '', $_SERVER['HTTP_AUTHORIZATION']));
}
if (empty($token)) {
unauthorized('missing token');
}
$request = curl_init();
curl_setopt($request, CURLOPT_URL, 'https://tokens.indieauth.com/token');
curl_setopt($request, CURLOPT_HTTPHEADER, array(
'Content-Type: application/x-www-form-urlencoded',
sprintf('Authorization: Bearer %s', $token)
));
curl_setopt($request, CURLOPT_RETURNTRANSFER, 1);
$response = curl_exec($request);
curl_close($request);
parse_str(urldecode($response), $verification);
if (! isset($verification['scope']) ) {
unauthorized('missing "scope"');
}
if (! isset($verification['me']) ) {
unauthorized('missing "me"');
}
if ( ! stristr($verification['me'], '{{ site.url }}') ) {
unauthorized('wrong domain');
}
if ( ! stristr($verification['scope'], 'create') ) {
unauthorized('invalid scope');
}
$user = posix_getpwuid(posix_getuid());
$now = time();
$decoded['mtime'] = $now;
$fname = sprintf(
'%s/%s/%s.json',
$user['dir'],
'{{ paths.remotequeue }}',
microtime(true)
);
file_put_contents($fname, json_encode($decoded, JSON_PRETTY_PRINT));
accepted();

View file

@ -65,7 +65,7 @@
<aside id="entry-meta">
{% if post.sameAs|length %}
<span id="syndication">
Also on:
this post on other sites:
{% for url in post.sameAs %}
<a class="u-syndication" href="{{ url }}"><svg width="16" height="16" aria-label="{{ url|extractdomain }}"><use xlink:href="#icon-{{ url|extractdomain }}"</svg></a>
{% endfor %}

View file

@ -35,7 +35,7 @@
{% endblock %}
{% block content %}
<main id="main" class="h-feed hatom">
<main id="main" class="h-feed hatom {{ category.name }}">
{% set year = [0] %}
{% for post in posts %}

View file

@ -1,4 +1,11 @@
<article class="h-entry hentry" lang="{{ post.inLanguage }}">
{% if 'Photograph' == post['@type'] and post.image[0].width > post.image[0].height %}
{% set flexval_raw = post.image[0].width / post.image[0].height %}
{% else %}
{% set flexval_raw = 1 %}
{% endif %}
{% set flexval = flexval_raw|round|int %}
<article class="h-entry hentry" style="flex-grow: {{ flexval }}" lang="{{ post.inLanguage }}">
<header class="entry-header">
<h3 class="p-name entry-title">
{% if post.mentions %}

View file

@ -1,3 +1,4 @@
* {
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
@ -362,7 +363,8 @@ li p {
}
#syndication {
float:right;
display: block;
text-align: right;
}
@media all and (max-width: 51em) {
@ -391,3 +393,24 @@ li p {
margin: 0 auto;
}
}
@media all and (min-width: 70em) {
#main.photo {
max-width: 100%;
}
#main.photo {
display: flex;
flex-wrap: wrap;
align-items:stretch;
}
#main.photo article {
flex: 1 1 30em;
margin: 0.2em;
}
#main.photo article h3 {
text-align: center;
}
}

View file

@ -220,7 +220,8 @@
</g>
</symbol>
<symbol id="icon-www.flickr.com" viewBox="0 0 16 16">
<path fill="#0063dc" d="M0 8c0 2.049 1.663 3.709 3.71 3.709 2.050 0 3.713-1.66 3.713-3.709s-1.662-3.709-3.713-3.709c-2.047 0-3.71 1.66-3.71 3.709zM8.577 8c0 2.049 1.662 3.709 3.711 3.709 2.042 0 3.711-1.66 3.711-3.709s-1.661-3.709-3.709-3.709c-2.050 0-3.713 1.66-3.713 3.709z"></path>
<circle cx="3.75" cy="8.25" r="3.75" fill="#0063dc"/>
<circle cx="12.25" cy="8.25" r="3.75" fill="#ff0084"/>
</symbol>
<symbol id="icon-web.archive.org" viewBox="0 0 17 16">
<path d="M16 15v-1h-1v-6h1v-1h-3v1h1v6h-3v-6h1v-1h-3v1h1v6h-3v-6h1v-1h-3v1h1v6h-3v-6h1v-1h-3v1h1v6h-1v1h-1v1h17v-1h-1z"></path>

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View file

@ -33,39 +33,28 @@ RE_FIRST = re.compile(
class FindWaybackURL(object):
def __init__(self, path, category="", redirects=[]):
def __init__(self, path, category=""):
self.path = path
self.category = category
self.redirects = redirects
self.epoch = int(arrow.utcnow().timestamp)
self.oldest = ""
def possible_urls(self):
q = {}
paths = self.redirects
paths.append(self.path)
for path in paths:
q[f"http://{settings.site.name}/{path}/"] = True
q[f"http://{settings.site.name}/{path}/index.html"] = True
q[f"http://{settings.site.name}/{self.path}/"] = True
q[f"http://{settings.site.name}/{self.path}/index.html"] = True
domains = settings.formerdomains
domains.append(settings.site.name)
for domain in domains:
q[f"http://{domain}/{path}/"] = True
if self.category in settings.formercategories:
categories = settings.formercategories[
self.category
]
else:
categories = []
categories.append(self.category)
for category in categories:
q[f"http://{domain}/{category}/{path}/"] = True
q[
f"http://{domain}/category/{category}/{path}/"
] = True
# logger.info("possible urls: %s", json.dumps(list(q.keys()), indent=4, ensure_ascii=False))
domains = settings.formerdomains + [settings.site.name]
for domain in domains:
q[f"http://{domain}/{self.path}/"] = True
categories = [self.category]
if self.category in settings.formercategories:
categories = categories + settings.formercategories[self.category]
for category in categories:
q[f"http://{domain}/{category}/{self.path}/"] = True
q[
f"http://{domain}/category/{category}/{self.path}/"
] = True
return list(q.keys())
def get_first_memento(self, url):