reformatted code

This commit is contained in:
Bastian Venthur
2023-06-16 10:25:21 +02:00
parent f6c5eaf375
commit 0349bd3359
11 changed files with 381 additions and 380 deletions

View File

@@ -6,32 +6,28 @@
# remove when we don't support py38 anymore
from __future__ import annotations
from typing import Any
import argparse
import configparser
import logging
import os
import shutil
import logging
import configparser
import sys
from typing import Any
from jinja2 import (
Environment,
FileSystemLoader,
Template,
TemplateNotFound,
)
import feedgenerator
from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound
import blag
from blag.markdown import markdown_factory, convert_markdown
from blag.devserver import serve
from blag.version import __VERSION__
from blag.markdown import convert_markdown, markdown_factory
from blag.quickstart import quickstart
from blag.version import __VERSION__
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s %(name)s %(message)s',
format="%(asctime)s %(levelname)s %(name)s %(message)s",
)
@@ -70,84 +66,84 @@ def parse_args(args: list[str] | None = None) -> argparse.Namespace:
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + __VERSION__,
"--version",
action="version",
version="%(prog)s " + __VERSION__,
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Verbose output.',
"-v",
"--verbose",
action="store_true",
help="Verbose output.",
)
commands = parser.add_subparsers(dest='command')
commands = parser.add_subparsers(dest="command")
commands.required = True
build_parser = commands.add_parser(
'build',
help='Build website.',
"build",
help="Build website.",
)
build_parser.set_defaults(func=build)
build_parser.add_argument(
'-i',
'--input-dir',
default='content',
help='Input directory (default: content)',
"-i",
"--input-dir",
default="content",
help="Input directory (default: content)",
)
build_parser.add_argument(
'-o',
'--output-dir',
default='build',
help='Ouptut directory (default: build)',
"-o",
"--output-dir",
default="build",
help="Ouptut directory (default: build)",
)
build_parser.add_argument(
'-t',
'--template-dir',
default='templates',
help='Template directory (default: templates)',
"-t",
"--template-dir",
default="templates",
help="Template directory (default: templates)",
)
build_parser.add_argument(
'-s',
'--static-dir',
default='static',
help='Static directory (default: static)',
"-s",
"--static-dir",
default="static",
help="Static directory (default: static)",
)
quickstart_parser = commands.add_parser(
'quickstart',
"quickstart",
help="Quickstart blag, creating necessary configuration.",
)
quickstart_parser.set_defaults(func=quickstart)
serve_parser = commands.add_parser(
'serve',
"serve",
help="Start development server.",
)
serve_parser.set_defaults(func=serve)
serve_parser.add_argument(
'-i',
'--input-dir',
default='content',
help='Input directory (default: content)',
"-i",
"--input-dir",
default="content",
help="Input directory (default: content)",
)
serve_parser.add_argument(
'-o',
'--output-dir',
default='build',
help='Ouptut directory (default: build)',
"-o",
"--output-dir",
default="build",
help="Ouptut directory (default: build)",
)
serve_parser.add_argument(
'-t',
'--template-dir',
default='templates',
help='Template directory (default: templates)',
"-t",
"--template-dir",
default="templates",
help="Template directory (default: templates)",
)
serve_parser.add_argument(
'-s',
'--static-dir',
default='static',
help='Static directory (default: static)',
"-s",
"--static-dir",
default="static",
help="Static directory (default: static)",
)
return parser.parse_args(args)
@@ -170,18 +166,18 @@ def get_config(configfile: str) -> configparser.SectionProxy:
config = configparser.ConfigParser()
config.read(configfile)
# check for the mandatory options
for value in 'base_url', 'title', 'description', 'author':
for value in "base_url", "title", "description", "author":
try:
config['main'][value]
config["main"][value]
except Exception:
print(f'{value} is missing in {configfile}!')
print(f"{value} is missing in {configfile}!")
sys.exit(1)
if not config['main']['base_url'].endswith('/'):
logger.warning('base_url does not end with a slash, adding it.')
config['main']['base_url'] += '/'
if not config["main"]["base_url"].endswith("/"):
logger.warning("base_url does not end with a slash, adding it.")
config["main"]["base_url"] += "/"
return config['main']
return config["main"]
def environment_factory(
@@ -222,51 +218,51 @@ def build(args: argparse.Namespace) -> None:
args
"""
os.makedirs(f'{args.output_dir}', exist_ok=True)
os.makedirs(f"{args.output_dir}", exist_ok=True)
convertibles = []
for root, dirnames, filenames in os.walk(args.input_dir):
for filename in filenames:
rel_src = os.path.relpath(
f'{root}/{filename}', start=args.input_dir
f"{root}/{filename}", start=args.input_dir
)
# all non-markdown files are just copied over, the markdown
# files are converted to html
if rel_src.endswith('.md'):
if rel_src.endswith(".md"):
rel_dst = rel_src
rel_dst = rel_dst[:-3] + '.html'
rel_dst = rel_dst[:-3] + ".html"
convertibles.append((rel_src, rel_dst))
else:
shutil.copy(
f'{args.input_dir}/{rel_src}',
f'{args.output_dir}/{rel_src}',
f"{args.input_dir}/{rel_src}",
f"{args.output_dir}/{rel_src}",
)
for dirname in dirnames:
# all directories are copied into the output directory
path = os.path.relpath(f'{root}/{dirname}', start=args.input_dir)
os.makedirs(f'{args.output_dir}/{path}', exist_ok=True)
path = os.path.relpath(f"{root}/{dirname}", start=args.input_dir)
os.makedirs(f"{args.output_dir}/{path}", exist_ok=True)
# copy static files over
logger.info('Copying static files.')
logger.info("Copying static files.")
if os.path.exists(args.static_dir):
shutil.copytree(args.static_dir, args.output_dir, dirs_exist_ok=True)
config = get_config('config.ini')
config = get_config("config.ini")
env = environment_factory(args.template_dir, dict(site=config))
try:
page_template = env.get_template('page.html')
article_template = env.get_template('article.html')
index_template = env.get_template('index.html')
archive_template = env.get_template('archive.html')
tags_template = env.get_template('tags.html')
tag_template = env.get_template('tag.html')
page_template = env.get_template("page.html")
article_template = env.get_template("article.html")
index_template = env.get_template("index.html")
archive_template = env.get_template("archive.html")
tags_template = env.get_template("tags.html")
tag_template = env.get_template("tag.html")
except TemplateNotFound as exc:
tmpl = os.path.join(blag.__path__[0], 'templates')
tmpl = os.path.join(blag.__path__[0], "templates")
logger.error(
f'Template "{exc.name}" not found in {args.template_dir}! '
'Consider running `blag quickstart` or copying the '
f'missing template from {tmpl}.'
"Consider running `blag quickstart` or copying the "
f"missing template from {tmpl}."
)
sys.exit(1)
@@ -282,10 +278,10 @@ def build(args: argparse.Namespace) -> None:
generate_feed(
articles,
args.output_dir,
base_url=config['base_url'],
blog_title=config['title'],
blog_description=config['description'],
blog_author=config['author'],
base_url=config["base_url"],
blog_title=config["title"],
blog_description=config["description"],
blog_author=config["author"],
)
generate_index(articles, index_template, args.output_dir)
generate_archive(articles, archive_template, args.output_dir)
@@ -330,9 +326,9 @@ def process_markdown(
articles = []
pages = []
for src, dst in convertibles:
logger.debug(f'Processing {src}')
logger.debug(f"Processing {src}")
with open(f'{input_dir}/{src}', 'r') as fh:
with open(f"{input_dir}/{src}", "r") as fh:
body = fh.read()
content, meta = convert_markdown(md, body)
@@ -342,17 +338,17 @@ def process_markdown(
# if markdown has date in meta, we treat it as a blog article,
# everything else are just pages
if meta and 'date' in meta:
if meta and "date" in meta:
articles.append((dst, context))
result = article_template.render(context)
else:
pages.append((dst, context))
result = page_template.render(context)
with open(f'{output_dir}/{dst}', 'w') as fh_dest:
with open(f"{output_dir}/{dst}", "w") as fh_dest:
fh_dest.write(result)
# sort articles by date, descending
articles = sorted(articles, key=lambda x: x[1]['date'], reverse=True)
articles = sorted(articles, key=lambda x: x[1]["date"], reverse=True)
return articles, pages
@@ -382,30 +378,30 @@ def generate_feed(
blog author
"""
logger.info('Generating Atom feed.')
logger.info("Generating Atom feed.")
feed = feedgenerator.Atom1Feed(
link=base_url,
title=blog_title,
description=blog_description,
feed_url=base_url + 'atom.xml',
feed_url=base_url + "atom.xml",
)
for dst, context in articles:
# if article has a description, use that. otherwise fall back to
# the title
description = context.get('description', context['title'])
description = context.get("description", context["title"])
feed.add_item(
title=context['title'],
title=context["title"],
author_name=blog_author,
link=base_url + dst,
description=description,
content=context['content'],
pubdate=context['date'],
content=context["content"],
pubdate=context["date"],
)
with open(f'{output_dir}/atom.xml', 'w') as fh:
feed.write(fh, encoding='utf8')
with open(f"{output_dir}/atom.xml", "w") as fh:
feed.write(fh, encoding="utf8")
def generate_index(
@@ -429,11 +425,11 @@ def generate_index(
archive = []
for dst, context in articles:
entry = context.copy()
entry['dst'] = dst
entry["dst"] = dst
archive.append(entry)
result = template.render(dict(archive=archive))
with open(f'{output_dir}/index.html', 'w') as fh:
with open(f"{output_dir}/index.html", "w") as fh:
fh.write(result)
@@ -458,11 +454,11 @@ def generate_archive(
archive = []
for dst, context in articles:
entry = context.copy()
entry['dst'] = dst
entry["dst"] = dst
archive.append(entry)
result = template.render(dict(archive=archive))
with open(f'{output_dir}/archive.html', 'w') as fh:
with open(f"{output_dir}/archive.html", "w") as fh:
fh.write(result)
@@ -484,11 +480,11 @@ def generate_tags(
"""
logger.info("Generating Tag-pages.")
os.makedirs(f'{output_dir}/tags', exist_ok=True)
os.makedirs(f"{output_dir}/tags", exist_ok=True)
# get tags number of occurrences
all_tags: dict[str, int] = {}
for _, context in articles:
tags: list[str] = context.get('tags', [])
tags: list[str] = context.get("tags", [])
for tag in tags:
all_tags[tag] = all_tags.get(tag, 0) + 1
# sort by occurrence
@@ -497,25 +493,25 @@ def generate_tags(
)
result = tags_template.render(dict(tags=taglist))
with open(f'{output_dir}/tags/index.html', 'w') as fh:
with open(f"{output_dir}/tags/index.html", "w") as fh:
fh.write(result)
# get tags and archive per tag
all_tags2: dict[str, list[dict[str, Any]]] = {}
for dst, context in articles:
tags = context.get('tags', [])
tags = context.get("tags", [])
for tag in tags:
archive: list[dict[str, Any]] = all_tags2.get(tag, [])
entry = context.copy()
entry['dst'] = dst
entry["dst"] = dst
archive.append(entry)
all_tags2[tag] = archive
for tag, archive in all_tags2.items():
result = tag_template.render(dict(archive=archive, tag=tag))
with open(f'{output_dir}/tags/{tag}.html', 'w') as fh:
with open(f"{output_dir}/tags/{tag}.html", "w") as fh:
fh.write(result)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -8,18 +8,18 @@ site if necessary.
# remove when we don't support py38 anymore
from __future__ import annotations
from typing import NoReturn
import os
import logging
import time
import multiprocessing
from http.server import SimpleHTTPRequestHandler, HTTPServer
from functools import partial
import argparse
import logging
import multiprocessing
import os
import time
from functools import partial
from http.server import HTTPServer, SimpleHTTPRequestHandler
from typing import NoReturn
from blag import blag
logger = logging.getLogger(__name__)
@@ -69,7 +69,7 @@ def autoreload(args: argparse.Namespace) -> NoReturn:
"""
dirs = [args.input_dir, args.template_dir, args.static_dir]
logger.info(f'Monitoring {dirs} for changes...')
logger.info(f"Monitoring {dirs} for changes...")
# make sure we trigger the rebuild immediately when we enter the
# loop to avoid serving stale contents
last_mtime = 0.0
@@ -77,7 +77,7 @@ def autoreload(args: argparse.Namespace) -> NoReturn:
mtime = get_last_modified(dirs)
if mtime > last_mtime:
last_mtime = mtime
logger.info('Change detected, rebuilding...')
logger.info("Change detected, rebuilding...")
blag.build(args)
time.sleep(1)
@@ -92,7 +92,7 @@ def serve(args: argparse.Namespace) -> None:
"""
httpd = HTTPServer(
('', 8000),
("", 8000),
partial(SimpleHTTPRequestHandler, directory=args.output_dir),
)
proc = multiprocessing.Process(target=autoreload, args=(args,))

View File

@@ -7,8 +7,9 @@ processing.
# remove when we don't support py38 anymore
from __future__ import annotations
from datetime import datetime
import logging
from datetime import datetime
from urllib.parse import urlsplit, urlunsplit
from xml.etree.ElementTree import Element
@@ -16,7 +17,6 @@ from markdown import Markdown
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
logger = logging.getLogger(__name__)
@@ -33,13 +33,13 @@ def markdown_factory() -> Markdown:
"""
md = Markdown(
extensions=[
'meta',
'fenced_code',
'codehilite',
'smarty',
"meta",
"fenced_code",
"codehilite",
"smarty",
MarkdownLinkExtension(),
],
output_format='html',
output_format="html",
)
return md
@@ -75,20 +75,20 @@ def convert_markdown(
# markdowns metadata consists as list of strings -- one item per
# line. let's convert into single strings.
for key, value in meta.items():
value = '\n'.join(value)
value = "\n".join(value)
meta[key] = value
# convert known metadata
# date: datetime
if 'date' in meta:
meta['date'] = datetime.fromisoformat(meta['date'])
meta['date'] = meta['date'].astimezone()
if "date" in meta:
meta["date"] = datetime.fromisoformat(meta["date"])
meta["date"] = meta["date"].astimezone()
# tags: list[str] and lower case
if 'tags' in meta:
tags = meta['tags'].split(',')
if "tags" in meta:
tags = meta["tags"].split(",")
tags = [t.lower() for t in tags]
tags = [t.strip() for t in tags]
meta['tags'] = tags
meta["tags"] = tags
return content, meta
@@ -98,25 +98,25 @@ class MarkdownLinkTreeprocessor(Treeprocessor):
def run(self, root: Element) -> Element:
for element in root.iter():
if element.tag == 'a':
url = element.get('href')
if element.tag == "a":
url = element.get("href")
# element.get could also return None, we haven't seen this so
# far, so lets wait if we raise this
assert url is not None
url = str(url)
converted = self.convert(url)
element.set('href', converted)
element.set("href", converted)
return root
def convert(self, url: str) -> str:
scheme, netloc, path, query, fragment = urlsplit(url)
logger.debug(
f'{url}: {scheme=} {netloc=} {path=} {query=} {fragment=}'
f"{url}: {scheme=} {netloc=} {path=} {query=} {fragment=}"
)
if scheme or netloc or not path:
return url
if path.endswith('.md'):
path = path[:-3] + '.html'
if path.endswith(".md"):
path = path[:-3] + ".html"
url = urlunsplit((scheme, netloc, path, query, fragment))
return url
@@ -128,6 +128,6 @@ class MarkdownLinkExtension(Extension):
def extendMarkdown(self, md: Markdown) -> None:
md.treeprocessors.register(
MarkdownLinkTreeprocessor(md),
'mdlink',
"mdlink",
0,
)

View File

@@ -4,10 +4,11 @@
# remove when we don't support py38 anymore
from __future__ import annotations
import configparser
import argparse
import shutil
import configparser
import os
import shutil
import blag
@@ -47,7 +48,7 @@ def copy_default_theme() -> None:
"""
print("Copying default theme...")
for dir_ in 'templates', 'content', 'static':
for dir_ in "templates", "content", "static":
print(f" Copying {dir_}...")
try:
shutil.copytree(
@@ -89,13 +90,13 @@ def quickstart(args: argparse.Namespace | None) -> None:
)
config = configparser.ConfigParser()
config['main'] = {
'base_url': base_url,
'title': title,
'description': description,
'author': author,
config["main"] = {
"base_url": base_url,
"title": title,
"description": description,
"author": author,
}
with open('config.ini', 'w') as fh:
with open("config.ini", "w") as fh:
config.write(fh)
copy_default_theme()

View File

@@ -1 +1 @@
__VERSION__ = '1.5.0'
__VERSION__ = "1.5.0"