--- /dev/null
+:PYTHON=python3
+dist: dist/treecutter.bin
+
+dist/treecutter.bin: bin/treecutter
+ nuitka3 --output-dir=dist bin/treecutter
+
+doc: doc/treecutter.en.xml.pdf
+
+doc/treecutter.en.xml.pdf: doc/treecutter.en.xml
+ cd doc ; nec.doc treecutter.en.xml
+
+README.md: doc/readme.en.xml
+ pandoc -s doc/readme.en.xml -o README.md
+
+.PHONY: typehint
+typehint:
+ mypy --no-namespace-packages --ignore-missing-imports treecutter/ bin/treecutter
+
+.PHONY: test
+test:
+ pytest tests/
+
+.PHONY: lint
+lint:
+ ruff check treecutter/ bin/treecutter
+ pylint treecutter/ bin/treecutter
+
+.PHONY: mkvenv
+mkvenv:
+ python3 -m venv venv/ --prompt treecutter
+ . venv/bin/activate
+ pip install --upgrade pip
+ pip install -r requirements.txt
+
+
+.PHONY: venv
+venv:
+ . venv/bin/activate
+
+.PHONY: novenv
+novenv:
+ deactivate
+
+.PHONY: requirements
+requirements:
+ python -m pip freeze > requirements.txt
+
+.PHONY: checklist
+checklist: lint typehint test
+
+.PHONY: black
+black:
+ black -l 79 treecutter/ bin/treecutter xinclude/
+
+.PHONY: wc
+wc:
+ find ./ -type f -name '*.py' -exec wc {} +
+
+.PHONY: tar
+tar: dist/treecutter.bin
+ tar cvfz dist/treecutter.tar.gz treecutter bin etc/treecutter.json doc/*pdf dist/treecutter.bin
+
+.PHONY: clean
+clean:
+ find . -type f -name "*.pyc" | xargs rm -fr
+ find . -type d -name __pycache__ | xargs rm -fr
+ rm -fr .mypy_cache
+ rm -f ~/.cache/pylint/treecutter_1.stats
+ rm -fr .pytest_cache
+ rm -fr .ruff_cache
+ rm -fr dist/treecutter.build
+ rm -f dist/treecutter.bin
+ rm -f doc/treecutter.en.xml.pdf
+ rm -f doc/treecutter.en.xml.docx
#!/usr/bin/python
-valid_scripts = ['.py','.pl']
+valid_scripts = [".py", ".pl"]
-PREFIXES={u'db': u'http://docbook.org/ns/docbook',
- u'xi': u'http://www.w3.org/2001/XInclude',
- u'xl': u'http://www.w3.org/1999/xlink',
- u'xml': u'http://www.w3.org/XML/1998/namespace',
- u'html' : u'http://www.w3.org/1999/xhtml'}
+PREFIXES = {
+ "db": "http://docbook.org/ns/docbook",
+ "xi": "http://www.w3.org/2001/XInclude",
+ "xl": "http://www.w3.org/1999/xlink",
+ "xml": "http://www.w3.org/XML/1998/namespace",
+ "html": "http://www.w3.org/1999/xhtml",
+}
-DB_NS="http://docbook.org/ns/docbook"
+DB_NS = "http://docbook.org/ns/docbook"
DB = "{%s}" % DB_NS
-XI_NS="http://www.w3.org/2001/XInclude"
+XI_NS = "http://www.w3.org/2001/XInclude"
XI = "{%s}" % XI_NS
-XLINK_NS="http://www.w3.org/1999/xlink"
+XLINK_NS = "http://www.w3.org/1999/xlink"
XLINK = "{%s}" % XLINK_NS
-XML_NS="http://www.w3.org/XML/1998/namespace"
+XML_NS = "http://www.w3.org/XML/1998/namespace"
XML = "{%s}" % XML_NS
-HTML_NS="http://www.w3.org/1999/xhtml"
+HTML_NS = "http://www.w3.org/1999/xhtml"
HTML = "{%s}" % HTML_NS
-NSMAP = {None : DB_NS,
- 'xi' : XI_NS,
- 'xlink' : XLINK_NS,
- 'xml' : XML_NS,
- 'html' : HTML_NS}
-XPATH = {'db' : DB_NS,
- 'xi' : XI_NS,
- 'xlink' : XLINK_NS,
- 'xml' : XML_NS,
- 'html' : HTML_NS}
+NSMAP = {
+ None: DB_NS,
+ "xi": XI_NS,
+ "xlink": XLINK_NS,
+ "xml": XML_NS,
+ "html": HTML_NS,
+}
+XPATH = {
+ "db": DB_NS,
+ "xi": XI_NS,
+ "xlink": XLINK_NS,
+ "xml": XML_NS,
+ "html": HTML_NS,
+}
import re
from itertools import chain
-class Directory():
+
+class Directory:
"""Class containing the state of the directory with articles"""
+
def __init__(self):
- self._cwd = u'.'
+ self._cwd = "."
self._translations = []
self._tree = []
- self._basepath = re.compile(r'[/\w\._-]*/[\w-]+')
+ self._basepath = re.compile(r"[/\w\._-]*/[\w-]+")
def translations(self, directory):
paths = (self._cwd, directory)
- for dirname, dirnames, filenames in chain.from_iterable(os.walk(path) for path in paths):
+ for dirname, dirnames, filenames in chain.from_iterable(
+ os.walk(path) for path in paths
+ ):
for filename in filenames:
- if fnmatch.fnmatch(filename, '*.xlf'):
- file_ = os.path.join(dirname,filename)
+ if fnmatch.fnmatch(filename, "*.xlf"):
+ file_ = os.path.join(dirname, filename)
self._translations.append(file_)
return self._translations
-
def scan(self, draftflag, levelflag):
for dirname, dirnames, filenames in os.walk(self._cwd):
for filename in filenames:
- if fnmatch.fnmatch(filename, '*.xml'):
- file_ = os.path.join(dirname,filename)
+ if fnmatch.fnmatch(filename, "*.xml"):
+ file_ = os.path.join(dirname, filename)
doc = Docbook(file_)
(title, menu) = doc.title()
draft = doc.status() == "draft"
level = doc.userlevel()
-# doc = etree.parse(file_)
-# title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
-# menu = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
-# draft = doc.xpath(u'/db:article[@status="draft"]',namespaces=const.XPATH)
+ # doc = etree.parse(file_)
+ # title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
+ # menu = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
+ # draft = doc.xpath(u'/db:article[@status="draft"]',namespaces=const.XPATH)
if draft and draftflag:
draft = False
if title and menu and not draft and level <= levelflag:
base = self._basepath.match(file_).group()
- link = base.replace('index','')[1:]
+ link = base.replace("index", "")[1:]
self._tree.append(link)
def set(self):
import treecutter.constants as const
from treecutter.image import Image
-#from treecutter.tools import warning
-class Docbook():
+# from treecutter.tools import warning
+
+
+class Docbook:
"""Class representing a docbook document"""
- def __init__(self,filename):
+
+ def __init__(self, filename):
self._filename = filename
self._doc = etree.parse(self._filename)
self._dirname = os.path.dirname(self._filename)
def title(self):
- t = self._doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
+ t = self._doc.xpath(
+ "/db:article/db:info/db:title", namespaces=const.XPATH
+ )
if t:
t = t[0].text
- ta = self._doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
+ ta = self._doc.xpath(
+ "/db:article/db:info/db:titleabbrev", namespaces=const.XPATH
+ )
if ta:
- ta = ta[0].text
+ ta = ta[0].text
else:
- ta = self._doc.xpath(u'/db:article/db:info/db:subtitle',namespaces=const.XPATH)
- if ta:
- ta = ta[0].text
+ ta = self._doc.xpath(
+ "/db:article/db:info/db:subtitle", namespaces=const.XPATH
+ )
+ if ta:
+ ta = ta[0].text
return (t, ta)
def status(self):
- status = self._doc.xpath(u'/db:article[@status]',namespaces=const.XPATH)
+ status = self._doc.xpath(
+ "/db:article[@status]", namespaces=const.XPATH
+ )
if status:
- return status[0].get('status')
+ return status[0].get("status")
return None
def role(self):
- art = self._doc.xpath(u'/db:article[@role]',namespaces=const.XPATH)
+ art = self._doc.xpath("/db:article[@role]", namespaces=const.XPATH)
if art:
- return art[0].get('role')
- return 'index'
+ return art[0].get("role")
+ return "index"
def userlevel(self):
- lvl = self._doc.xpath(u'/db:article[@userlevel]',namespaces=const.XPATH)
+ lvl = self._doc.xpath(
+ "/db:article[@userlevel]", namespaces=const.XPATH
+ )
if lvl:
- lvlstr = lvl[0].get('userlevel')
+ lvlstr = lvl[0].get("userlevel")
return {
- 'Level 1': 1,
- 'Level 2': 2,
- 'Level 3': 3,
- 'Level 4': 4,
- 'Level 5': 5,
+ "Level 1": 1,
+ "Level 2": 2,
+ "Level 3": 3,
+ "Level 4": 4,
+ "Level 5": 5,
}.get(lvlstr, 0)
return 0
def expand_imageobjects(self):
cwd = os.getcwd()
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
- images = self._doc.xpath(u"//db:imageobject/db:imagedata[@fileref]",namespaces=const.XPATH)
+ images = self._doc.xpath(
+ "//db:imageobject/db:imagedata[@fileref]", namespaces=const.XPATH
+ )
for i in images:
os.chdir(self._dirname)
- im = i.get('fileref')
+ im = i.get("fileref")
imf = im
caption = db.caption()
if im.endswith((".png", ".jpg")):
img = Image(im)
- for p in img.caption().split('\n\n'):
+ for p in img.caption().split("\n\n"):
caption.append(db.para(p))
- link = db.para(db.link(img.infostr(),
- **{const.XLINK+"href": img.filename()}))
+ link = db.para(
+ db.link(
+ img.infostr(), **{const.XLINK + "href": img.filename()}
+ )
+ )
caption.append(link)
- imf = img.resize(800,600)
- mo = db.mediaobject(db.imageobject(
- db.imagedata(fileref=imf)),caption)
+ imf = img.resize(800, 600)
+ mo = db.mediaobject(
+ db.imageobject(db.imagedata(fileref=imf)), caption
+ )
iop = i.getparent()
mop = iop.getparent()
mopp = mop.getparent()
- mopp.insert(mopp.index(mop)+1,mo)
+ mopp.insert(mopp.index(mop) + 1, mo)
mopp.remove(mop)
os.chdir(cwd)
-
def parse_xincludes(self):
cwd = os.getcwd()
- for c in self._doc.xpath(u"//xi:include[@parse='text']",namespaces=const.XPATH):
- href = c.get('href')
- alang = c.get('accept-language')
- xpointer = c.get('xpointer')
+ for c in self._doc.xpath(
+ "//xi:include[@parse='text']", namespaces=const.XPATH
+ ):
+ href = c.get("href")
+ alang = c.get("accept-language")
+ xpointer = c.get("xpointer")
(p, ext) = os.path.splitext(href)
if ext in const.valid_scripts:
exe = []
- script = os.path.join(os.path.abspath(self._dirname)+'/'+href)
+ script = os.path.join(
+ os.path.abspath(self._dirname) + "/" + href
+ )
if os.path.isfile(script) and os.access(script, os.X_OK):
exe.append(script)
else:
- if href in resource_listdir('xinclude', ''):
- script = resource_filename('xinclude', href)
+ if href in resource_listdir("xinclude", ""):
+ script = resource_filename("xinclude", href)
exe.append(script)
else:
- print("Script "+href+" in "+self._filename+" missing")
+ print(
+ "Script "
+ + href
+ + " in "
+ + self._filename
+ + " missing"
+ )
if alang:
- exe.append("lang="+alang)
+ exe.append("lang=" + alang)
if xpointer:
- exe.append("xptr="+xpointer)
+ exe.append("xptr=" + xpointer)
if exe == []:
continue
print(" executing %15s" % (href)),
ts = time()
os.chdir(self._dirname)
- xml = subprocess.Popen(exe,stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ xml = subprocess.Popen(
+ exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
(stdout, stderr) = xml.communicate()
- #print xml.returnvalue
-# if stderr:
-# warning("%s : %s" % (" ".join(exe),stderr))
-# warning(stdout)
-# exit
+ # print xml.returnvalue
+ # if stderr:
+ # warning("%s : %s" % (" ".join(exe),stderr))
+ # warning(stdout)
+ # exit
os.chdir(cwd)
te = time()
- print(" [%5.2f s] (%s)" % (round(te-ts,2),xpointer))
+ print(" [%5.2f s] (%s)" % (round(te - ts, 2), xpointer))
xstr = etree.fromstring(stdout)
-# inserting the generated code and remove the xinclude reference
+ # inserting the generated code and remove the xinclude reference
idp = c.getparent()
- idp.insert(idp.index(c)+1,xstr)
+ idp.insert(idp.index(c) + 1, xstr)
idp.remove(c)
def xinclude(self):
def collect_links(self):
res = []
- for r in self._doc.xpath(u"//db:link[@xlink:href]",namespaces=const.XPATH):
- rf = os.path.join(self._dirname,r.get(const.XLINK+'href'))
+ for r in self._doc.xpath(
+ "//db:link[@xlink:href]", namespaces=const.XPATH
+ ):
+ rf = os.path.join(self._dirname, r.get(const.XLINK + "href"))
if os.path.isfile(rf):
- if r.get('security')=='encrypt':
- with open(rf, 'rb') as f:
+ if r.get("security") == "encrypt":
+ with open(rf, "rb") as f:
gpg = gnupg.GPG()
status = gpg.encrypt_file(
- f, None, passphrase=getpass.getpass(rf+' password:'), symmetric=True,
- output=rf+'.gpg')
- r.set(const.XLINK+'href', r.get(const.XLINK+'href')+'.gpg')
- rf=rf+'.gpg'
+ f,
+ None,
+ passphrase=getpass.getpass(rf + " password:"),
+ symmetric=True,
+ output=rf + ".gpg",
+ )
+ r.set(
+ const.XLINK + "href",
+ r.get(const.XLINK + "href") + ".gpg",
+ )
+ rf = rf + ".gpg"
res.append(rf)
return res
def collect_images(self):
res = []
- for i in self._doc.xpath(u"//db:imagedata[@fileref]",namespaces=const.XPATH):
- im = os.path.join(self._dirname,i.get('fileref'))
+ for i in self._doc.xpath(
+ "//db:imagedata[@fileref]", namespaces=const.XPATH
+ ):
+ im = os.path.join(self._dirname, i.get("fileref"))
if os.path.isfile(im):
res.append(im)
else:
- print("WARNING: File "+im+" is missing!")
+ print("WARNING: File " + im + " is missing!")
return res
def collect_videos(self):
res = []
- for i in self._doc.xpath(u"//db:videodata[@fileref]",namespaces=const.XPATH):
- im = os.path.join(self._dirname,i.get('fileref'))
+ for i in self._doc.xpath(
+ "//db:videodata[@fileref]", namespaces=const.XPATH
+ ):
+ im = os.path.join(self._dirname, i.get("fileref"))
if os.path.isfile(im):
res.append(im)
else:
- print("WARNING: File "+im+" is missing!")
+ print("WARNING: File " + im + " is missing!")
return res
def collect_forms(self):
res = []
- for i in self._doc.xpath(u"//html:form[@action]",namespaces=const.XPATH):
- pyscript = re.split('\.py',i.get('action'),1)[0]+'.py'
- im = os.path.join(self._dirname,pyscript)
+ for i in self._doc.xpath(
+ "//html:form[@action]", namespaces=const.XPATH
+ ):
+ pyscript = re.split("\.py", i.get("action"), 1)[0] + ".py"
+ im = os.path.join(self._dirname, pyscript)
if os.path.isfile(im):
res.append(im)
return res
def tostring(self):
- return etree.tostring(self._doc,encoding='UTF-8',pretty_print=False)
+ return etree.tostring(self._doc, encoding="UTF-8", pretty_print=False)
- def xslt(self,transform):
+ def xslt(self, transform):
return etree.tostring(transform(self._doc))
def clean(self):
import subprocess
import errno
-class Image():
+
+class Image:
"""Class representing an image"""
- def __init__(self,filename):
+
+ def __init__(self, filename):
self._filename = filename
self._format = {}
def infostr(self):
im = PIL_Image.open(self._filename)
- w,d = im.size
+ w, d = im.size
im.close()
byte = os.path.getsize(self._filename)
- return "[%dx%d (%s)]" % (w,d,sizeof_fmt(byte))
+ return "[%dx%d (%s)]" % (w, d, sizeof_fmt(byte))
- def resize(self,x,y,pad=0):
+ def resize(self, x, y, pad=0):
size = (x, y)
outfile, ext = os.path.splitext(self._filename)
outfile = "%s.%dx%d%s" % (outfile, size[0], size[1], ext)
im = PIL_Image.open(self._filename)
im.thumbnail(size, PIL_Image.LANCZOS)
if pad:
- bg = PIL_Image.new('RGBA', size, (0, 0, 0, 0))
- bg.paste(im,((size[0]-im.size[0])/2, (size[1]-im.size[1])/2))
+ bg = PIL_Image.new("RGBA", size, (0, 0, 0, 0))
+ bg.paste(
+ im,
+ ((size[0] - im.size[0]) / 2, (size[1] - im.size[1]) / 2),
+ )
bg.save(outfile)
else:
im.save(outfile)
xmpfile = XMPFiles(file_path=outfile)
xmp = xmpfile.get_xmp()
if xmp:
- xmp.set_property(consts.XMP_NS_XMP, u'CreatorTool',u'treecutter')
+ xmp.set_property(
+ consts.XMP_NS_XMP, "CreatorTool", "treecutter"
+ )
if xmpfile.can_put_xmp(xmp):
xmpfile.put_xmp(xmp)
else:
- cmd = ['exiftool', '-XMP:CreatorTool=treecutter','-quiet','-overwrite_original', outfile]
+ cmd = [
+ "exiftool",
+ "-XMP:CreatorTool=treecutter",
+ "-quiet",
+ "-overwrite_original",
+ outfile,
+ ]
retcode = subprocess.call(cmd)
if retcode:
- print('Error: '+' '.join(cmd)+' Returncode ['+str(retcode)+']')
+ print(
+ "Error: "
+ + " ".join(cmd)
+ + " Returncode ["
+ + str(retcode)
+ + "]"
+ )
exit
else:
- cmd = ['exiftool', '-XMP:CreatorTool=treecutter','-quiet','-overwrite_original', outfile]
+ cmd = [
+ "exiftool",
+ "-XMP:CreatorTool=treecutter",
+ "-quiet",
+ "-overwrite_original",
+ outfile,
+ ]
retcode = subprocess.call(cmd)
if retcode:
- print('Error: '+' '.join(cmd)+' Returncode ['+str(retcode)+']')
+ print(
+ "Error: "
+ + " ".join(cmd)
+ + " Returncode ["
+ + str(retcode)
+ + "]"
+ )
exit
xmpfile.close_file()
self._format[size] = outfile
if not xmp:
return False
cr = None
- if xmp.does_property_exist(consts.XMP_NS_XMP, u'CreatorTool'):
- cr = xmp.get_property(consts.XMP_NS_XMP, u'CreatorTool')
- return cr == u'treecutter'
+ if xmp.does_property_exist(consts.XMP_NS_XMP, "CreatorTool"):
+ cr = xmp.get_property(consts.XMP_NS_XMP, "CreatorTool")
+ return cr == "treecutter"
def thumbnail(self):
- return self.resize(50,50,1)
+ return self.resize(50, 50, 1)
def slider(self):
- return self.resize(700,438,1)
+ return self.resize(700, 438, 1)
def caption(self):
cap = "Beskrivning saknas"
xmpfile = XMPFiles(file_path=self._filename)
xmp = xmpfile.get_xmp()
if xmp:
- if xmp.does_property_exist(consts.XMP_NS_DC, 'description[1]'):
- cap = xmp.get_property(consts.XMP_NS_DC, 'description[1]')
+ if xmp.does_property_exist(consts.XMP_NS_DC, "description[1]"):
+ cap = xmp.get_property(consts.XMP_NS_DC, "description[1]")
xmpfile.close_file()
return cap
import glob
from treecutter.page import Page
-class Link():
+
+class Link:
"""Class representing a webpage on the site"""
- def __init__(self,link):
+
+ def __init__(self, link):
self._link = link
# find the representations of the link.
self._pages = []
- self._langregexp = re.compile('.*\.(\w\w)\.xml')
+ self._langregexp = re.compile(".*\.(\w\w)\.xml")
path = link
- if self._link[-1] == '/':
- path = path+'index'
+ if self._link[-1] == "/":
+ path = path + "index"
lang = self._scan_languages(path)
for l in lang:
- self._pages.append(Page(self,l))
+ self._pages.append(Page(self, l))
- def add_page(self,l):
- self._pages.append(Page(self,l))
+ def add_page(self, l):
+ self._pages.append(Page(self, l))
- def _scan_languages(self,path):
+ def _scan_languages(self, path):
lang = []
- for l in glob.glob('.'+path+'*.xml'):
+ for l in glob.glob("." + path + "*.xml"):
langcode = self._langregexp.search(l).group(1)
- lang.append((langcode,l))
+ lang.append((langcode, l))
return lang
def link(self):
for page in self._pages:
page.render(transform)
- def template(self,sitemap,style,tdir,subdir):
+ def template(self, sitemap, style, tdir, subdir):
for page in self._pages:
- page.template(sitemap,style,tdir,subdir)
+ page.template(sitemap, style, tdir, subdir)
- def page(self,lang):
+ def page(self, lang):
for page in self._pages:
- if page.language()==lang:
+ if page.language() == lang:
return page
return None
def resources(self):
- res = set()
+ res = set()
for page in self._pages:
res = res.union(page.resources())
return res
from treecutter.sitemap import Sitemap
from treecutter.tools import translate
+
def main():
- parser = argparse.ArgumentParser(description='Process docbook article tree.')
- parser.add_argument('--style', nargs='?',
- default=os.path.dirname(os.getcwd())+'/style/default/')
- parser.add_argument('--output', nargs='?',
- default=os.path.dirname(os.getcwd())+'/htdocs/')
- parser.add_argument('--subdir', nargs='?',
- default='')
- parser.add_argument('--draft', action='store_true')
- parser.add_argument('--level', type=int, choices=[1, 2, 3, 4, 5], default=0)
+ parser = argparse.ArgumentParser(
+ description="Process docbook article tree."
+ )
+ parser.add_argument(
+ "--style",
+ nargs="?",
+ default=os.path.dirname(os.getcwd()) + "/style/default/",
+ )
+ parser.add_argument(
+ "--output",
+ nargs="?",
+ default=os.path.dirname(os.getcwd()) + "/htdocs/",
+ )
+ parser.add_argument("--subdir", nargs="?", default="")
+ parser.add_argument("--draft", action="store_true")
+ parser.add_argument(
+ "--level", type=int, choices=[1, 2, 3, 4, 5], default=0
+ )
args = parser.parse_args()
translate(totrans)
print("]")
t2 = time()
- print("Translate[%5.2f s]" % (round(t2-t1,2)))
-
+ print("Translate[%5.2f s]" % (round(t2 - t1, 2)))
sitemap = Sitemap(args)
missing = dirset - sitemap.set()
removed = sitemap.set() - dirset
for page in removed:
- print(page+' page not availible in this config')
+ print(page + " page not availible in this config")
for page in missing:
- print('adding missing page '+page)
+ print("adding missing page " + page)
sitemap.add_link(page)
if len(missing) != 0:
- print('writing new sitemap - please adjust if needed')
+ print("writing new sitemap - please adjust if needed")
sitemap.write_map()
dirsitemap = Sitemap(args)
if l in dirset:
dirsitemap.add_link(l)
-
# Generate a pygraphviz image of the site (TODO: currently not used)
dirsitemap.graph()
# Start processing the docbook articles to static html
t1 = time()
dirsitemap.publish()
t2 = time()
- print("Publish [%5.2f s]" % (round(t2-t1,2)))
- print("Total [%5.2f s]" % (round(t2-ts,2)))
+ print("Publish [%5.2f s]" % (round(t2 - t1, 2)))
+ print("Total [%5.2f s]" % (round(t2 - ts, 2)))
return 0
+
if __name__ == "__main__":
sys.exit(main())
import codecs
from lxml import etree
from lxml.builder import ElementMaker
-#from jinja2 import Template
+
+# from jinja2 import Template
import jinja2
from time import time
import treecutter.constants as const
from treecutter.docbook import Docbook
from treecutter.tools import mkdir_p
-class Page():
+
+class Page:
"""Class representing a version of a webpage"""
- def __init__(self,link,page):
+
+ def __init__(self, link, page):
self._link = link
self._file = page[1]
self._lang = page[0]
self._title = None
self._menu = None
self._rendered_article = None
- self._template = 'index'
+ self._template = "index"
self._status = None
def language(self):
def menu(self):
return self._menu
- def set_article(self,art):
+ def set_article(self, art):
self._rendered_article = art
def prepare(self):
doc = self._doc.collect_links()
img = self._doc.collect_images()
vid = self._doc.collect_videos()
- form= self._doc.collect_forms()
+ form = self._doc.collect_forms()
self._resources = doc + img + vid + form
def render(self, transform):
- self._rendered_article = self._doc.xslt(transform['xhtml5'])
+ self._rendered_article = self._doc.xslt(transform["xhtml5"])
- def template(self,sitemap,style,tdir,subdir):
- htmlmenu = sitemap.gen_menu(self._lang,None,"links")
- levelmenu = sitemap.gen_menu(self._lang,self,"tree")
- langmenu = sitemap.lang_menu(self._lang,self._link)
+ def template(self, sitemap, style, tdir, subdir):
+ htmlmenu = sitemap.gen_menu(self._lang, None, "links")
+ levelmenu = sitemap.gen_menu(self._lang, self, "tree")
+ langmenu = sitemap.lang_menu(self._lang, self._link)
article = self._rendered_article
- templateLoader = jinja2.FileSystemLoader( searchpath="/" )
- templateEnv = jinja2.Environment( loader=templateLoader )
+ templateLoader = jinja2.FileSystemLoader(searchpath="/")
+ templateEnv = jinja2.Environment(loader=templateLoader)
- templateVars = {'title':self._title,
- 'menu':htmlmenu,
- 'article':article,
- 'levelmenu':levelmenu,
- 'langmen':langmenu,
- 'subdir':subdir}
+ templateVars = {
+ "title": self._title,
+ "menu": htmlmenu,
+ "article": article,
+ "levelmenu": levelmenu,
+ "langmen": langmenu,
+ "subdir": subdir,
+ }
s = style
- t = s+self._template+'.'+self._lang+'.html.tmpl'
- template = templateEnv.get_template( t )
- templateout = template.render( templateVars )
+ t = s + self._template + "." + self._lang + ".html.tmpl"
+ template = templateEnv.get_template(t)
+ templateout = template.render(templateVars)
- outfile = tdir+'html'.join(self._file.rsplit('xml',1))
+ outfile = tdir + "html".join(self._file.rsplit("xml", 1))
mkdir_p(os.path.dirname(outfile))
- out = codecs.open(outfile, 'w', 'utf-8')
+ out = codecs.open(outfile, "w", "utf-8")
out.write(templateout)
out.close()
from treecutter import constants as const
from treecutter.trie import Trie
from treecutter.link import Link
-from treecutter.tools import ssh_cmd, publish, mkdir_p,get_folder_size,sizeof_fmt
+from treecutter.tools import (
+ ssh_cmd,
+ publish,
+ mkdir_p,
+ get_folder_size,
+ sizeof_fmt,
+)
-class Sitemap():
+class Sitemap:
"""Class keeping the internal site structure"""
- def __init__(self,args):
+
+ def __init__(self, args):
self._output = args.output
self._style = args.style
self._subdir = args.subdir
- self._file = 'sitemap.txt'
+ self._file = "sitemap.txt"
self._tree = Trie()
self._sitelang = set()
- self._isocode = etree.parse('/usr/share/xml/iso-codes/iso_639_3.xml')
+ self._isocode = etree.parse("/usr/share/xml/iso-codes/iso_639_3.xml")
self._tranlang = {}
- self._tmptarget = tempfile.mkdtemp()+'/'
+ self._tmptarget = tempfile.mkdtemp() + "/"
# The sitemap uses a trie structure to keep track of links
# A link represents the path to the document and the link
# representing the text on the site.
# A link can have several pages in different languages.
def add_link(self, link):
- tokens = list(filter(None,re.split(r'(^/[\w\.:-]*$|^/[\w\.:-]*/|[\w\.:-]*/)',link,flags=re.UNICODE)))
- self._tree.add(tokens,Link(link))
+ tokens = list(
+ filter(
+ None,
+ re.split(
+ r"(^/[\w\.:-]*$|^/[\w\.:-]*/|[\w\.:-]*/)",
+ link,
+ flags=re.UNICODE,
+ ),
+ )
+ )
+ self._tree.add(tokens, Link(link))
def write_map(self):
- f = codecs.open(self._file,'w','utf-8')
- s = '\n'.join(link.link() for link in self._tree)
+ f = codecs.open(self._file, "w", "utf-8")
+ s = "\n".join(link.link() for link in self._tree)
f.write(s)
f.close()
def read_map(self):
try:
- f = codecs.open(self._file, 'r', 'utf-8')
+ f = codecs.open(self._file, "r", "utf-8")
sml = f.read().split()
f.close()
for line in sml:
self.add_link(line)
- except (IOError):
- print('INFO: Could not read sitemap.txt - one will be created')
+ except IOError:
+ print("INFO: Could not read sitemap.txt - one will be created")
# Create a set of the current tree for comparison with the
# directory scan
for link in self._tree:
link.prepare()
t2 = time()
- print("Prepare [%5.2f s]" % (round(t2-t1,2)))
+ print("Prepare [%5.2f s]" % (round(t2 - t1, 2)))
for link in self._tree:
self._sitelang = self._sitelang.union(set(link.languages()))
for tran in self._sitelang:
- if tran != 'en':
- self._tranlang[tran] = gettext.translation('iso_639_3',
- languages=[tran])
+ if tran != "en":
+ self._tranlang[tran] = gettext.translation(
+ "iso_639_3", languages=[tran]
+ )
t3 = time()
- print("Language [%5.2f s]" % (round(t3-t2,2)))
+ print("Language [%5.2f s]" % (round(t3 - t2, 2)))
transform = {}
- transform['xhtml5'] = etree.XSLT(etree.parse(self._style+"docbook.xhtml5.xsl"))
+ transform["xhtml5"] = etree.XSLT(
+ etree.parse(self._style + "docbook.xhtml5.xsl")
+ )
for link in self._tree:
link.render(transform)
t4 = time()
- print("Render [%5.2f s]" % (round(t4-t3,2)))
+ print("Render [%5.2f s]" % (round(t4 - t3, 2)))
for link in self._tree:
- link.template(self, self._style, self._tmptarget,self._subdir)
+ link.template(self, self._style, self._tmptarget, self._subdir)
t5 = time()
- print("Template [%5.2f s]" % (round(t5-t4,2)))
+ print("Template [%5.2f s]" % (round(t5 - t4, 2)))
t6 = time()
res = set()
# Collect all files used by the documents
for link in self._tree:
res = res.union(link.resources())
for f in res:
- outfile = self._tmptarget+f
+ outfile = self._tmptarget + f
mkdir_p(os.path.dirname(outfile))
- shutil.copyfile(f,outfile)
- print( "Resources[%5.2f s]" % (round(t6-t5,2)))
+ shutil.copyfile(f, outfile)
+ print("Resources[%5.2f s]" % (round(t6 - t5, 2)))
# TODO: Improve the sitemap, it is a page that is generated from
# the ground up and added a bit adhoc.
- sitmaplink = Link('/sitemap')
+ sitmaplink = Link("/sitemap")
for l in self._sitelang:
- sitmaplink.add_page((l,'/sitemap.'+l+'.xml'))
+ sitmaplink.add_page((l, "/sitemap." + l + ".xml"))
for l in self._sitelang:
- txtmenu = self.gen_menu(l,None,"tree sitemap")
+ txtmenu = self.gen_menu(l, None, "tree sitemap")
sitmaplink.page(l).set_article(txtmenu)
- f = open(self._tmptarget+'sitemap.'+l+'.txt', "w")
+ f = open(self._tmptarget + "sitemap." + l + ".txt", "w")
f.write(txtmenu)
f.close()
- sitmaplink.page(l).template(self,self._style,self._tmptarget,self._subdir)
+ sitmaplink.page(l).template(
+ self, self._style, self._tmptarget, self._subdir
+ )
t7 = time()
- print("Sitemap [%5.2f s]" % (round(t7-t6,2)))
+ print("Sitemap [%5.2f s]" % (round(t7 - t6, 2)))
def graph(self):
self._tree.graph()
- def gen_menu(self,lang,page,cssclass):
- return self._tree.menu(lang,page,cssclass,self._subdir)
+ def gen_menu(self, lang, page, cssclass):
+ return self._tree.menu(lang, page, cssclass, self._subdir)
- def lang_menu(self,lang,link):
+ def lang_menu(self, lang, link):
html = ElementMaker()
menu = html.ul()
for l in link.languages():
- isoxml = u"//iso_639_3_entry[@*='"+l+"']"
- ln = self._isocode.xpath(isoxml)[0].get('name')
- if lang != 'en':
+ isoxml = "//iso_639_3_entry[@*='" + l + "']"
+ ln = self._isocode.xpath(isoxml)[0].get("name")
+ if lang != "en":
ln = self._tranlang[lang].ugettext(ln)
p = link.link()
- if p[-1] == u'/':
- p = p +u'index'
- p = p+u'.'+l
- li = html.li(html.a(ln,
- href=self._subdir+p,hreflang=l))
+ if p[-1] == "/":
+ p = p + "index"
+ p = p + "." + l
+ li = html.li(html.a(ln, href=self._subdir + p, hreflang=l))
menu.append(li)
-# print type(etree.tostring(menu,encoding='unicode',pretty_print=False))
- return etree.tostring(menu,encoding='unicode',pretty_print=False)
+ # print type(etree.tostring(menu,encoding='unicode',pretty_print=False))
+ return etree.tostring(menu, encoding="unicode", pretty_print=False)
def publish(self):
print("Size [ %7s ]" % (sizeof_fmt(get_folder_size(self._tmptarget))))
- ssh_cmd(self._output,"mkdir -p")
+ ssh_cmd(self._output, "mkdir -p")
publish(self._tmptarget, self._output)
- for res in ["stylesheets","images","js","fonts","favicon.ico"]:
- if (os.path.exists(self._style+res)):
- print("Size [ %7s ]" % (sizeof_fmt(get_folder_size(self._style+res))))
- publish(self._style+res, self._output)
- ssh_cmd(self._output,"chmod a+rx")
+ for res in ["stylesheets", "images", "js", "fonts", "favicon.ico"]:
+ if os.path.exists(self._style + res):
+ print(
+ "Size [ %7s ]"
+ % (sizeof_fmt(get_folder_size(self._style + res)))
+ )
+ publish(self._style + res, self._output)
+ ssh_cmd(self._output, "chmod a+rx")
#!/usr/bin/python
-#from __future__ import print_function
+# from __future__ import print_function
import os
import subprocess
import errno
import sys
+
def mkdir_p(path):
try:
os.makedirs(path)
- except OSError as exc: # Python >2.5
+ except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
- else: raise
+ else:
+ raise
+
-def publish(src,target):
- cmd = ["rsync","-a","--copy-links","--partial",src,target]
+def publish(src, target):
+ cmd = ["rsync", "-a", "--copy-links", "--partial", src, target]
retcode = subprocess.call(cmd)
if retcode:
- error('%s : retruncode %s' % (' '.join(cmd),str(retcode)))
+ error("%s : retruncode %s" % (" ".join(cmd), str(retcode)))
+
def ssh_cmd(target, command):
t = target.split(":")
c = command.split()
- if len(t)==1:
- cmd = [c[0],c[1],t[0]]
+ if len(t) == 1:
+ cmd = [c[0], c[1], t[0]]
else:
- cmd = ["ssh",t[0],c[0],c[1],t[1]]
+ cmd = ["ssh", t[0], c[0], c[1], t[1]]
retcode = subprocess.call(cmd)
if retcode:
- error('%s : retruncode %s' % (' '.join(cmd),str(retcode)))
+ error("%s : retruncode %s" % (" ".join(cmd), str(retcode)))
+
-def sizeof_fmt(num, suffix='B'):
- for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
+def sizeof_fmt(num, suffix="B"):
+ for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
- return "%.1f%s%s" % (num, 'Yi', suffix)
+ return "%.1f%s%s" % (num, "Yi", suffix)
+
def get_folder_size(folder):
total_size = os.path.getsize(folder)
total_size += get_folder_size(itempath)
return total_size
-#proc = subprocess.Popen(args, env={'PATH': os.getenv('PATH')})
+
+# proc = subprocess.Popen(args, env={'PATH': os.getenv('PATH')})
def translate(files):
for f in files:
out = subprocess.check_output(["translate.sh", f])
- sys.stdout.write('#')
+ sys.stdout.write("#")
+
-#def warning(*objs):
+# def warning(*objs):
# print("WARNING: ", *objs, file=sys.stderr)
-#def error(*objs):
+# def error(*objs):
# print("ERROR: ", *objs, file=sys.stderr)
#!/usr/bin/python
import pygraphviz as pgv
-class Node():
- def __init__(self,token,value):
+
+class Node:
+ def __init__(self, token, value):
self._token = token
self._value = value
self._children = []
def children(self):
return self._children
-class Trie():
+
+class Trie:
def __init__(self):
self._root = []
def __iter__(self):
return self.inorder(self._root)
- def inorder(self,t):
+ def inorder(self, t):
for l in t:
yield l.value()
for x in self.inorder(l.children()):
yield x
- def _add(self,trie, key, content):
+ def _add(self, trie, key, content):
# is the key a leaf
k = key.pop(0)
if key == []:
- node = Node(k,content)
+ node = Node(k, content)
trie.append(node)
else:
for ch in trie:
if ch.token() == k:
self._add(ch.children(), key, content)
- def add(self,key, content):
+ def add(self, key, content):
self._add(self._root, key, content)
def _graph(self, trie, G):
for l in trie:
G.add_node(l.token())
for ch in l.children():
- G.add_edge(l.token(),ch.token())
+ G.add_edge(l.token(), ch.token())
self._graph(l.children(), G)
def graph(self):
G = pgv.AGraph(directed=True)
G.add_node("sitemap")
for ch in self._root:
- G.add_edge("sitemap",ch.token())
+ G.add_edge("sitemap", ch.token())
self._graph(self._root, G)
-# G.layout('dot')
-# G.draw('g.png')
-# print G.string()
+
+ # G.layout('dot')
+ # G.draw('g.png')
+ # print G.string()
def _menu(self, trie, lang, page, css, subdir):
html = "<ul%s>\n" % css
for l in trie:
- sel = ''
+ sel = ""
p = l.value().page(lang)
if p == page:
sel = ' class="selected"'
if p != None:
- html += '<li%s><a href="%s%s">%s</a>\n' \
- % (sel,subdir,l.value().link(),p.menu())
+ html += '<li%s><a href="%s%s">%s</a>\n' % (
+ sel,
+ subdir,
+ l.value().link(),
+ p.menu(),
+ )
else:
- link =l.value().link()
- if link[-1] == u'/':
- link = link +u'index'
- html += '<li%s><a href="%s%s.en" hreflang="en">%s</a>\n' \
- % (sel,subdir,link, l.value().page('en').menu())
+ link = l.value().link()
+ if link[-1] == "/":
+ link = link + "index"
+ html += '<li%s><a href="%s%s.en" hreflang="en">%s</a>\n' % (
+ sel,
+ subdir,
+ link,
+ l.value().page("en").menu(),
+ )
if l.children():
html += self._menu(l.children(), lang, page, "", subdir)
html += "</li>\n"
html += "</ul>\n"
return html
- def menu(self,lang,page,cssclass,subdir):
- css = ''
+ def menu(self, lang, page, cssclass, subdir):
+ css = ""
if cssclass:
- css = ' class="'+cssclass+'"'
+ css = ' class="' + cssclass + '"'
return self._menu(self._root, lang, page, css, subdir)
from os import path
from httplib2 import Http
from urllib import urlencode
-from math import log,tan,pi,cos,radians,ceil,floor
+from math import log, tan, pi, cos, radians, ceil, floor
from lxml import etree
from lxml.builder import ElementMaker
from PIL import Image, ImageDraw
from treecutter import constants as const
# EC Equator lenght
-EC = 40075016.686 # meter
+EC = 40075016.686 # meter
# ER Earth radius
-ER = 6372798.2 # meter
+ER = 6372798.2 # meter
# Availible zoom levels in OSM
ZOOMRANGE = range(1, 18)
# tile size
h = Http(".cache")
+
class Coord(object):
def __init__(self, lat, lon):
- self.latitude = float(lat)
+ self.latitude = float(lat)
self.longitude = float(lon)
self.image = None
def osmlink(self):
- return "http://www.openstreetmap.org/?mlat=%s&mlon=%s&zoom=18&layers=M"\
- % (self.latitude,self.longitude)
+ return (
+ "http://www.openstreetmap.org/?mlat=%s&mlon=%s&zoom=18&layers=M"
+ % (self.latitude, self.longitude)
+ )
def dms(self):
ns = self.latitude
ew = self.longitude
- mnt,sec = divmod(ns*3600,60)
- deg,mnt = divmod(mnt,60)
- out = u'''%d°%2d'%5.2f"%s''' % ( deg,mnt,sec,'N')
- mnt,sec = divmod(ew*3600,60)
- deg,mnt = divmod(mnt,60)
- out += u''' %d°%2d'%05.2f"%s''' % ( deg,mnt,sec,'E')
+ mnt, sec = divmod(ns * 3600, 60)
+ deg, mnt = divmod(mnt, 60)
+ out = """%d°%2d'%5.2f"%s""" % (deg, mnt, sec, "N")
+ mnt, sec = divmod(ew * 3600, 60)
+ deg, mnt = divmod(mnt, 60)
+ out += """ %d°%2d'%05.2f"%s""" % (deg, mnt, sec, "E")
return out
def lontile(self, zoom):
def lattile(self, zoom):
rad = radians(self.latitude)
- tile = (1-log(tan(rad)+1/cos(rad))/pi)/2*2**zoom
+ tile = (1 - log(tan(rad) + 1 / cos(rad)) / pi) / 2 * 2**zoom
return tile
- def offset(self,zoom):
+ def offset(self, zoom):
x = self.lontile(zoom)
y = self.lattile(zoom)
- xo = int(floor((x-floor(x))*TS))
- yo = int(floor((y-floor(y))*TS))
+ xo = int(floor((x - floor(x)) * TS))
+ yo = int(floor((y - floor(y)) * TS))
return (xo, yo)
def distance(self, point):
- res = Geodesic.WGS84.Inverse(self.latitude, self.longitude,
- point.latitude, point.longitude)
- return res['s12']
+ res = Geodesic.WGS84.Inverse(
+ self.latitude, self.longitude, point.latitude, point.longitude
+ )
+ return res["s12"]
def direct(self, direction, lenght):
- point = Geodesic.WGS84.Direct(self.latitude, self.longitude,
- direction, length)
- return self.__class__(point['lat2'],point['lon2'])
-
- def png(self,zoom=15,size=(400,150)):
- filename = encode(self.latitude, self.longitude)+'.png'
-# if path.isfile(filename):
-# if path.getctime(filename) > time.time() - 60*60*24*2:
-# return
+ point = Geodesic.WGS84.Direct(
+ self.latitude, self.longitude, direction, length
+ )
+ return self.__class__(point["lat2"], point["lon2"])
+
+ def png(self, zoom=15, size=(400, 150)):
+ filename = encode(self.latitude, self.longitude) + ".png"
+ # if path.isfile(filename):
+ # if path.getctime(filename) > time.time() - 60*60*24*2:
+ # return
im = Image.new("RGB", size, None)
ew = int(self.lontile(zoom))
ns = int(self.lattile(zoom))
(xo, yo) = self.offset(zoom)
- et = int(floor((xo - ceil(size[0]/2))/TS))
- nt = int(floor((yo - ceil(size[1]/2))/TS))
- wt = int(floor((xo + ceil(size[0]/2))/TS))
- st = int(floor((yo + ceil(size[1]/2))/TS))
-
- lontiles = range(ew+et,ew+wt+1)
- lattiles = range(ns+nt,ns+st+1)
- imsize = (len(lontiles)*TS,len(lattiles)*TS)
+ et = int(floor((xo - ceil(size[0] / 2)) / TS))
+ nt = int(floor((yo - ceil(size[1] / 2)) / TS))
+ wt = int(floor((xo + ceil(size[0] / 2)) / TS))
+ st = int(floor((yo + ceil(size[1] / 2)) / TS))
+
+ lontiles = range(ew + et, ew + wt + 1)
+ lattiles = range(ns + nt, ns + st + 1)
+ imsize = (len(lontiles) * TS, len(lattiles) * TS)
grid = Image.new("RGB", imsize, None)
for yi, y in enumerate(lattiles):
for xi, x in enumerate(lontiles):
- url = 'http://tile.openstreetmap.org/%s/%s/%s.png' % (zoom,x,y)
+ url = "http://tile.openstreetmap.org/%s/%s/%s.png" % (
+ zoom,
+ x,
+ y,
+ )
request, content = h.request(url)
img = Image.open(StringIO(content))
-# dr = ImageDraw.Draw(img)
-# dr.rectangle([0,0,TS,TS], outline=0)
- box = (xi*TS, yi*TS)
+ # dr = ImageDraw.Draw(img)
+ # dr.rectangle([0,0,TS,TS], outline=0)
+ box = (xi * TS, yi * TS)
grid.paste(img, box)
- yp = [i for i,j in enumerate(lattiles) if j == int(ns)][0]*TS+yo
- xp = [i for i,j in enumerate(lontiles) if j == int(ew)][0]*TS+xo
- mark(grid, (xp,yp))
- xc = int(ceil(size[0]/2))
- yc = int(ceil(size[1]/2))
+ yp = [i for i, j in enumerate(lattiles) if j == int(ns)][0] * TS + yo
+ xp = [i for i, j in enumerate(lontiles) if j == int(ew)][0] * TS + xo
+ mark(grid, (xp, yp))
+ xc = int(ceil(size[0] / 2))
+ yc = int(ceil(size[1] / 2))
-# draw = ImageDraw.Draw(grid)
-# draw.rectangle([xp-xc,yp-yc,xp+xc,yp+yc], outline="red")
- gridc = grid.crop((xp-xc,yp-yc,xp+xc,yp+yc))
+ # draw = ImageDraw.Draw(grid)
+ # draw.rectangle([xp-xc,yp-yc,xp+xc,yp+yc], outline="red")
+ gridc = grid.crop((xp - xc, yp - yc, xp + xc, yp + yc))
gridc.save(filename)
def db_xml(self):
self.png()
- img = encode(self.latitude, self.longitude)+'.png'
- phr = "geo:"+str(self.latitude)+","+str(self.longitude)
+ img = encode(self.latitude, self.longitude) + ".png"
+ phr = "geo:" + str(self.latitude) + "," + str(self.longitude)
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
- uri = db.uri(db.link(
+ uri = db.uri(
+ db.link(
db.inlinemediaobject(
- db.imageobject(db.imagedata(
- fileref=img,
- format='PNG')),
- db.textobject(db.phrase(phr))
- ),
+ db.imageobject(db.imagedata(fileref=img, format="PNG")),
+ db.textobject(db.phrase(phr)),
+ ),
self.dms(),
- **{const.XLINK+"href": self.osmlink()}))
+ **{const.XLINK + "href": self.osmlink()}
+ )
+ )
return uri
+
def c(s):
- return s or ''
+ return s or ""
-def s(s,n):
+
+def s(s, n):
if n is not None:
return s or n.text
return s
class Address(object):
"""Address object to contain everything known about an address"""
- def __init__(self,street=None,postcode=None,city=None,country=None):
+
+ def __init__(self, street=None, postcode=None, city=None, country=None):
self.street = street
self.postcode = postcode
self.city = city
self.name = None
self.coord = None
-
- def geocode(self, language='en'):
- base_url = 'http://nominatim.openstreetmap.org/search?%s'
- params = { 'addressdetails': 1,
- 'limit': 1,
- 'format': 'xml',
- 'polygon': 0,
- 'accept-language': language}
+ def geocode(self, language="en"):
+ base_url = "http://nominatim.openstreetmap.org/search?%s"
+ params = {
+ "addressdetails": 1,
+ "limit": 1,
+ "format": "xml",
+ "polygon": 0,
+ "accept-language": language,
+ }
if self.country:
- t = etree.parse('/usr/share/xml/iso-codes/iso_3166.xml')
- r = t.xpath('//iso_3166_entry[@name="'+self.country+'"]')
- if len(r)==1:
+ t = etree.parse("/usr/share/xml/iso-codes/iso_3166.xml")
+ r = t.xpath('//iso_3166_entry[@name="' + self.country + '"]')
+ if len(r) == 1:
self.country_code = r[0].get("alpha_2_code")
if self.country_code:
- params['countrycodes'] = self.country_code
+ params["countrycodes"] = self.country_code
- addrlist=[]
- addrlist.append(u''+c(self.name)+', '+c(self.street)+', '+c(self.city))
- addrlist.append(u''+c(self.street)+', '+c(self.postcode)+', '+c(self.city))
- addrlist.append(u''+c(self.street)+', '+c(self.city))
+ addrlist = []
+ addrlist.append(
+ "" + c(self.name) + ", " + c(self.street) + ", " + c(self.city)
+ )
+ addrlist.append(
+ "" + c(self.street) + ", " + c(self.postcode) + ", " + c(self.city)
+ )
+ addrlist.append("" + c(self.street) + ", " + c(self.city))
for addr in addrlist:
- params['q'] = addr.encode('utf-8')
+ params["q"] = addr.encode("utf-8")
url = base_url % urlencode(params)
time.sleep(1)
resp, content = h.request(url)
root = etree.fromstring(content)
- places = int(root.xpath('count(//place[@place_id])'))
+ places = int(root.xpath("count(//place[@place_id])"))
if places == 1:
place = root.find("place")
-# print etree.tostring(place,encoding='UTF-8',pretty_print=True)
- self.postcode = s(self.postcode,place.find("postcode"))
- self.city = s(self.city,place.find("city"))
- self.country = s(self.country,place.find("country"))
- self.country_code = s(self.country_code,place.find("country_code"))
- self.coord=Coord(place.get("lat"),place.get("lon"))
+ # print etree.tostring(place,encoding='UTF-8',pretty_print=True)
+ self.postcode = s(self.postcode, place.find("postcode"))
+ self.city = s(self.city, place.find("city"))
+ self.country = s(self.country, place.find("country"))
+ self.country_code = s(
+ self.country_code, place.find("country_code")
+ )
+ self.coord = Coord(place.get("lat"), place.get("lon"))
return
def add_phone(self, phone):
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
- co = ''
+ co = ""
if self.coord:
co = self.coord.db_xml()
- adr = db.address(db.street(c(self.street)),
- db.postcode(c(self.postcode)),
- db.city(c(self.city)),
- db.country(c(self.country)),
- db.phone(c(self.phone)),
- co)
-# type=self.type,
+ adr = db.address(
+ db.street(c(self.street)),
+ db.postcode(c(self.postcode)),
+ db.city(c(self.city)),
+ db.country(c(self.country)),
+ db.phone(c(self.phone)),
+ co,
+ )
+ # type=self.type,
return adr
def distance(p1, p2):
res = Geodesic.WGS84.Inverse(p1[0], p1[1], p2[0], p2[1])
- return res['s12']
+ return res["s12"]
+
def mark(image, coord):
draw = ImageDraw.Draw(image)
x, y = coord
r = 5
- bbox = (x-r, y-r, x+r, y+r)
+ bbox = (x - r, y - r, x + r, y + r)
draw.ellipse(bbox, outline="red")
-def box(image,box):
+
+def box(image, box):
draw = ImageDraw.Draw(image)
draw.rectangle(box, outline="red")
-def mapimages(coords, zoom=15,size=(TS,TS)):
-
- minlat = min(coords,key=attrgetter('latitude'))
- maxlat = max(coords,key=attrgetter('latitude'))
- minlon = min(coords,key=attrgetter('longitude'))
- maxlon = max(coords,key=attrgetter('longitude'))
-
- # Find minimal bounding box and expand it 5%
- hyp = distance((maxlat,minlon),(minlat,maxlon))
- hyp = hyp*0.05
- uld = Geodesic.WGS84.Direct(maxlat, minlon, 315, hyp)
- urd = Geodesic.WGS84.Direct(maxlat, maxlon, 45, hyp)
- lld = Geodesic.WGS84.Direct(minlat, minlon, 225, hyp)
- lrd = Geodesic.WGS84.Direct(minlat, maxlon, 135, hyp)
-
- ul = Coord(maxlat,minlon).direct(315, hyp)
-
- ul = (uld['lat2'],uld['lon2'])
- ur = (urd['lat2'],urd['lon2'])
- ll = (lld['lat2'],lld['lon2'])
- lr = (lrd['lat2'],lrd['lon2'])
- top = distance(ul,ur)
- bottom = distance(ll,lr)
- left = distance(ul,ll)
- right = distance(ur,lr)
-# m_per_pix = EC*abs(cos(lat))/2**(zoomlevel+8)
-# m_per_pix = ER*2*pi*abs(cos(lat))/2**(zoomlevel+8)
- for zoom in range(1,18):
- t = 2**(zoom)*TS/(ER*2*pi*abs(cos(ul[0])))
- toppix = t*top
- leftpix = t*left
- b = 2**(zoom)*TS/(ER*2*pi*abs(cos(ll[0])))
- bottompix = b*bottom
- rightpix = b*right
-# print "Zoom: %s : %s %s %s %s" % (zoom, toppix, bottompix, leftpix, rightpix)
- if max(toppix,bottompix,leftpix,rightpix) > TS*4:
- break
-# print "Zoom to use : %s" % (zoom)
- ult = coordtile(ul,zoom)
- lrt = coordtile(lr,zoom)
- lattiles = range(int(ult[0]),int(lrt[0])+1)
- lontiles = range(int(ult[1]),int(lrt[1])+1)
- size = (len(lontiles)*TS,len(lattiles)*TS)
- grid = Image.new("RGB", size, None)
- img = []
- for yi, y in enumerate(lattiles):
- for xi, x in enumerate(lontiles):
- url = tile(x,y,zoom)
-# print url
- time.sleep(1)
- request, content = h.request(url)
- img = Image.open(StringIO(content))
- box = (xi*TS, yi*TS)
- grid.paste(img, box)
- for c in coords:
- t = coordtile(c,zoom)
- o = offset(c,zoom)
- yp = [i for i,x in enumerate(lattiles) if x == int(t[0])][0]*TS+o[1]
- xp = [i for i,x in enumerate(lontiles) if x == int(t[1])][0]*TS+o[0]
- mark(grid, (xp,yp))
-
- t = coordtile(ul,zoom)
- o = offset(ul,zoom)
- yp = [i for i,x in enumerate(lattiles) if x == int(t[0])][0]*TS+o[1]
- xp = [i for i,x in enumerate(lontiles) if x == int(t[1])][0]*TS+o[0]
- t = coordtile(lr,zoom)
- o = offset(lr,zoom)
- ypl = [i for i,x in enumerate(lattiles) if x == int(t[0])][0]*TS+o[1]
- xpl = [i for i,x in enumerate(lontiles) if x == int(t[1])][0]*TS+o[0]
- gridc = grid.crop((xp,yp,xpl,ypl))
- gridc.show()
+
+def mapimages(coords, zoom=15, size=(TS, TS)):
+
+ minlat = min(coords, key=attrgetter("latitude"))
+ maxlat = max(coords, key=attrgetter("latitude"))
+ minlon = min(coords, key=attrgetter("longitude"))
+ maxlon = max(coords, key=attrgetter("longitude"))
+
+ # Find minimal bounding box and expand it 5%
+ hyp = distance((maxlat, minlon), (minlat, maxlon))
+ hyp = hyp * 0.05
+ uld = Geodesic.WGS84.Direct(maxlat, minlon, 315, hyp)
+ urd = Geodesic.WGS84.Direct(maxlat, maxlon, 45, hyp)
+ lld = Geodesic.WGS84.Direct(minlat, minlon, 225, hyp)
+ lrd = Geodesic.WGS84.Direct(minlat, maxlon, 135, hyp)
+
+ ul = Coord(maxlat, minlon).direct(315, hyp)
+
+ ul = (uld["lat2"], uld["lon2"])
+ ur = (urd["lat2"], urd["lon2"])
+ ll = (lld["lat2"], lld["lon2"])
+ lr = (lrd["lat2"], lrd["lon2"])
+ top = distance(ul, ur)
+ bottom = distance(ll, lr)
+ left = distance(ul, ll)
+ right = distance(ur, lr)
+ # m_per_pix = EC*abs(cos(lat))/2**(zoomlevel+8)
+ # m_per_pix = ER*2*pi*abs(cos(lat))/2**(zoomlevel+8)
+ for zoom in range(1, 18):
+ t = 2 ** (zoom) * TS / (ER * 2 * pi * abs(cos(ul[0])))
+ toppix = t * top
+ leftpix = t * left
+ b = 2 ** (zoom) * TS / (ER * 2 * pi * abs(cos(ll[0])))
+ bottompix = b * bottom
+ rightpix = b * right
+ # print "Zoom: %s : %s %s %s %s" % (zoom, toppix, bottompix, leftpix, rightpix)
+ if max(toppix, bottompix, leftpix, rightpix) > TS * 4:
+ break
+ # print "Zoom to use : %s" % (zoom)
+ ult = coordtile(ul, zoom)
+ lrt = coordtile(lr, zoom)
+ lattiles = range(int(ult[0]), int(lrt[0]) + 1)
+ lontiles = range(int(ult[1]), int(lrt[1]) + 1)
+ size = (len(lontiles) * TS, len(lattiles) * TS)
+ grid = Image.new("RGB", size, None)
+ img = []
+ for yi, y in enumerate(lattiles):
+ for xi, x in enumerate(lontiles):
+ url = tile(x, y, zoom)
+ # print url
+ time.sleep(1)
+ request, content = h.request(url)
+ img = Image.open(StringIO(content))
+ box = (xi * TS, yi * TS)
+ grid.paste(img, box)
+ for c in coords:
+ t = coordtile(c, zoom)
+ o = offset(c, zoom)
+ yp = [i for i, x in enumerate(lattiles) if x == int(t[0])][0] * TS + o[
+ 1
+ ]
+ xp = [i for i, x in enumerate(lontiles) if x == int(t[1])][0] * TS + o[
+ 0
+ ]
+ mark(grid, (xp, yp))
+
+ t = coordtile(ul, zoom)
+ o = offset(ul, zoom)
+ yp = [i for i, x in enumerate(lattiles) if x == int(t[0])][0] * TS + o[1]
+ xp = [i for i, x in enumerate(lontiles) if x == int(t[1])][0] * TS + o[0]
+ t = coordtile(lr, zoom)
+ o = offset(lr, zoom)
+ ypl = [i for i, x in enumerate(lattiles) if x == int(t[0])][0] * TS + o[1]
+ xpl = [i for i, x in enumerate(lontiles) if x == int(t[1])][0] * TS + o[0]
+ gridc = grid.crop((xp, yp, xpl, ypl))
+ gridc.show()
+
+
# gridc.save("cap-un.png")
if __name__ == "__main__":
if al[0] == "lang":
lang = al[1]
if al[0] == "xptr":
- argument = al[1].decode('utf-8')
+ argument = al[1].decode("utf-8")
- addrlist = argument.split(',')
+ addrlist = argument.split(",")
addrfmt = "street,postcode,city,country"
- adict = addrfmt.split(',')
- argdict = dict(zip(adict,addrlist))
+ adict = addrfmt.split(",")
+ argdict = dict(zip(adict, addrlist))
addr = Address(**argdict)
addr.geocode()
axml = addr.db_xml()
-# clean_db(axml)
+ # clean_db(axml)
- #print(etree.tostring(cxml, pretty_print=True))
- #sys.stdout.write(out.encode('utf-8'))
- sys.stdout.write(etree.tostring(axml,encoding='UTF-8',pretty_print=False))
+ # print(etree.tostring(cxml, pretty_print=True))
+ # sys.stdout.write(out.encode('utf-8'))
+ sys.stdout.write(
+ etree.tostring(axml, encoding="UTF-8", pretty_print=False)
+ )
from address import Address
from treecutter import constants as const
+
class Contacts(object):
def __init__(self, uri):
self.uri = uri
self.card_data = None
- if uri.scheme == 'file':
- with open(self.uri.path, 'r') as f:
+ if uri.scheme == "file":
+ with open(self.uri.path, "r") as f:
self.card_data = f.read()
f.closed
- if uri.scheme == 'http':
+ if uri.scheme == "http":
pw = getpass()
print("http not yet implemented")
- if uri.scheme == 'https':
+ if uri.scheme == "https":
print("https not yet implemented")
- def filter(self,query):
- (key, name) = query.split(':')
+ def filter(self, query):
+ (key, name) = query.split(":")
card = None
for c in readComponents(self.card_data):
if key in c.contents.keys():
- if name.decode('utf-8') == c.contents[key][0].value[0]:
+ if name.decode("utf-8") == c.contents[key][0].value[0]:
card = c
- if key == 'firstname':
- if name.decode('utf-8') == c.n.value.given:
+ if key == "firstname":
+ if name.decode("utf-8") == c.n.value.given:
card = c
- if key == 'surname':
- if name.decode('utf-8') == c.n.value.family:
+ if key == "surname":
+ if name.decode("utf-8") == c.n.value.family:
card = c
return Contact(card)
+
class Contact(object):
- def __init__(self,card):
+ def __init__(self, card):
self.card = card
self.person = None
- self.organization = Organization('')
+ self.organization = Organization("")
self.address = []
- self.phone = ''
- self.jobtitle = ''
- self.url = ''
- self.email = ''
+ self.phone = ""
+ self.jobtitle = ""
+ self.url = ""
+ self.email = ""
self.parsecard()
def parsecard(self):
- if 'n' in self.card.contents.keys():
+ if "n" in self.card.contents.keys():
n = self.card.n.value
- empty = n.prefix == '' and n.given == '' and \
- n.additional =='' and n.family =='' and n.suffix == ''
+ empty = (
+ n.prefix == ""
+ and n.given == ""
+ and n.additional == ""
+ and n.family == ""
+ and n.suffix == ""
+ )
if not empty:
- self.person = Person(n.given,n.family,
- n.prefix,n.additional,n.suffix)
- if 'title' in self.card.contents.keys():
+ self.person = Person(
+ n.given, n.family, n.prefix, n.additional, n.suffix
+ )
+ if "title" in self.card.contents.keys():
self.jobtitle = self.card.title.value
- if 'org' in self.card.contents.keys():
+ if "org" in self.card.contents.keys():
self.organization = Organization(self.card.org.value[0])
- for av in self.card.contents['adr']:
+ for av in self.card.contents["adr"]:
a = av.value
- addr = Address(a.street,a.code,a.city,a.country)
- if av.type_param == 'work':
+ addr = Address(a.street, a.code, a.city, a.country)
+ if av.type_param == "work":
self.organization.add_address(addr)
- if av.type_param == 'home':
+ if av.type_param == "home":
self.address.append(addr)
addr.geocode()
- for i,t in enumerate(self.card.contents['tel']):
- if av.type_param == 'cell': # Does not exist ?
+ for i, t in enumerate(self.card.contents["tel"]):
+ if av.type_param == "cell": # Does not exist ?
self.phone = t.value
- if av.type_param == 'work':
- self.organization.add_phone(i,t.value)
- if av.type_param == 'home':
+ if av.type_param == "work":
+ self.organization.add_phone(i, t.value)
+ if av.type_param == "home":
self.address[i].add_phone(t.value)
- if 'url' in self.card.contents.keys():
+ if "url" in self.card.contents.keys():
self.url = self.card.url.value
-# if 'email' in self.card.contents.keys():
-# self.email = self.card.email.value
-
+ # if 'email' in self.card.contents.keys():
+ # self.email = self.card.email.value
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
if self.person:
- pers = db.person(self.person.db_xml(),db.phone(self.phone))
+ pers = db.person(self.person.db_xml(), db.phone(self.phone))
for a in self.address:
pers.append(a.db_xml())
- pers.append(db.affiliation(db.jobtitle(self.jobtitle),
- self.organization.db_xml()))
+ pers.append(
+ db.affiliation(
+ db.jobtitle(self.jobtitle), self.organization.db_xml()
+ )
+ )
pers.append(db.email(self.email))
else:
pers = self.organization.db_xml()
- pers.append(db.uri(db.link(self.url,**{const.XLINK+"href": self.url}),
- type='website'))
+ pers.append(
+ db.uri(
+ db.link(self.url, **{const.XLINK + "href": self.url}),
+ type="website",
+ )
+ )
return pers
+
class Person(object):
- def __init__(self,firstname,surname,honorific,othername,linage):
+ def __init__(self, firstname, surname, honorific, othername, linage):
self.honorific = honorific
self.firstname = firstname
self.othername = othername
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
p = db.personname(
- db.honorific(self.honorific),
- db.firstname(self.firstname),
- db.othername(self.othername),
- db.surname(self.surname),
- db.linage(self.linage)
- )
+ db.honorific(self.honorific),
+ db.firstname(self.firstname),
+ db.othername(self.othername),
+ db.surname(self.surname),
+ db.linage(self.linage),
+ )
return p
+
class Organization(object):
- def __init__(self,orgname):
+ def __init__(self, orgname):
self.orgname = orgname
self.address = []
- def add_address(self,addr):
+ def add_address(self, addr):
addr.set_name(self.orgname)
self.address.append(addr)
org.append(a.db_xml())
return org
+
def recursively_empty(e):
- if e.text or e.tag == const.DB+'imagedata':
+ if e.text or e.tag == const.DB + "imagedata":
return False
return all((recursively_empty(c) for c in e.iterchildren()))
+
def clean_db(xml):
context = etree.iterwalk(xml)
for action, elem in context:
if __name__ == "__main__":
for arg in sys.argv[1:]:
- al = arg.split("=",1)
+ al = arg.split("=", 1)
if al[0] == "lang":
lang = al[1]
if al[0] == "xptr":
argument = al[1]
- (uristr,query) = argument.split('|')
+ (uristr, query) = argument.split("|")
uri = urlparse(uristr)
contact = Contacts(uri).filter(query)
cxml = contact.db_xml()
clean_db(cxml)
- #print(etree.tostring(cxml, pretty_print=True))
- #sys.stdout.write(out.encode('utf-8'))
- sys.stdout.write(etree.tostring(cxml,encoding='UTF-8',pretty_print=False))
+ # print(etree.tostring(cxml, pretty_print=True))
+ # sys.stdout.write(out.encode('utf-8'))
+ sys.stdout.write(
+ etree.tostring(cxml, encoding="UTF-8", pretty_print=False)
+ )
self.uri = uri
self.events = []
self.data = None
- if uri.scheme == 'file':
- with open(self.uri.path, 'r') as f:
+ if uri.scheme == "file":
+ with open(self.uri.path, "r") as f:
self.data = f.read()
f.closed
- if uri.scheme == 'http':
+ if uri.scheme == "http":
pw = getpass()
print("http not yet implemented")
- if uri.scheme == 'https':
+ if uri.scheme == "https":
pw = getpass()
- headers = {"User-Agent": "Mozilla/5.0",
- "Content-Type": "text/xml",
- "Accept": "text/xml"}
-
- headers['authorization'] = "Basic %s" % (("%s:%s" % (self.uri.username, pw)).encode('base64')[:-1])
- handle = httplib.HTTPSConnection(self.uri.hostname,self.uri.port)
- res = handle.request('GET', self.uri.path, "", headers)
+ headers = {
+ "User-Agent": "Mozilla/5.0",
+ "Content-Type": "text/xml",
+ "Accept": "text/xml",
+ }
+
+ headers["authorization"] = "Basic %s" % (
+ ("%s:%s" % (self.uri.username, pw)).encode("base64")[:-1]
+ )
+ handle = httplib.HTTPSConnection(self.uri.hostname, self.uri.port)
+ res = handle.request("GET", self.uri.path, "", headers)
r = handle.getresponse()
if r.status != 200:
print("Failed to connect! Wrong Password ?")
sys.exit(5)
self.data = r.read()
handle.close()
- headers = ['dtstart','summary','location','description']
+ headers = ["dtstart", "summary", "location", "description"]
for cal in vobject.readComponents(self.data):
for ev in cal.vevent_list:
details = {}
details[p.name.lower()] = p.value
self.events.append(Event(details))
- def filter(self,query):
- (key, name) = query.split(':')
+ def filter(self, query):
+ (key, name) = query.split(":")
fev = []
- if key == 'year':
+ if key == "year":
for ev in self.events:
if ev.start.year == int(name):
fev.append(ev)
for ev in self.events:
ev.geocode()
- def sorted(self,order):
- if order == 'start':
+ def sorted(self, order):
+ if order == "start":
self.events.sort(key=lambda x: x.start, reverse=True)
-
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
- evlist = db.variablelist(db.title(u'Stammtisch träffar'),
- role=u'calendar')
+ evlist = db.variablelist(
+ db.title("Stammtisch träffar"), role="calendar"
+ )
for ev in self.events:
evlist.append(ev.db_xml())
return evlist
class Event(object):
- def __init__(self,ev):
- self.start = ev['dtstart']
- self.end = ev['dtend']
- self.summary = ev['summary']
- self.location = ev['location']
+ def __init__(self, ev):
+ self.start = ev["dtstart"]
+ self.end = ev["dtend"]
+ self.summary = ev["summary"]
+ self.location = ev["location"]
self.org = None
- self.description = ev['description']
+ self.description = ev["description"]
def geocode(self):
- addrlist = self.location.split(',')
+ addrlist = self.location.split(",")
addrfmt = "org,street,postcode,city"
- adict = addrfmt.split(',')
- argdict = dict(zip(adict,addrlist))
- self.org = Organization(argdict['org'])
- del argdict['org']
+ adict = addrfmt.split(",")
+ argdict = dict(zip(adict, addrlist))
+ self.org = Organization(argdict["org"])
+ del argdict["org"]
addr = Address(**argdict)
self.org.add_address(addr)
addr.geocode()
-
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
# Build paragraphs from the description
paras = db.listitem(role="description")
- for p in re.split('\n\n',unicode(self.description)):
- paras.append(db.para(p,role="desc"))
+ for p in re.split("\n\n", unicode(self.description)):
+ paras.append(db.para(p, role="desc"))
lst = db.varlistentry(
- db.term(db.date(self.start.strftime('%Y %b %d'),role='calendar')),
- db.listitem(db.para(self.summary),db.variablelist(
+ db.term(db.date(self.start.strftime("%Y %b %d"), role="calendar")),
+ db.listitem(
+ db.para(self.summary),
+ db.variablelist(
db.varlistentry(
db.term("Tid"),
- db.listitem(db.para(self.start.strftime('%H:%M')))
- ),
+ db.listitem(db.para(self.start.strftime("%H:%M"))),
+ ),
db.varlistentry(
db.term("Plats"),
- db.listitem(db.para(self.org.db_xml()))
- ),
- db.varlistentry(
- db.term("Beskrivning"),
- paras
- )
- )
- )
- )
+ db.listitem(db.para(self.org.db_xml())),
+ ),
+ db.varlistentry(db.term("Beskrivning"), paras),
+ ),
+ ),
+ )
return lst
+
# ln = db.link("Text",**{XLINK+"href": "https://"})
if __name__ == "__main__":
if al[0] == "xptr":
argument = al[1]
- (uristr,query) = argument.split('|')
+ (uristr, query) = argument.split("|")
uri = urlparse(uristr)
events = Events(uri)
events.filter(query)
events.geocode()
- events.sorted('start')
+ events.sorted("start")
exml = events.db_xml()
- #clean_db(exml)
+ # clean_db(exml)
- #print(etree.tostring(cxml, pretty_print=True))
- #sys.stdout.write(out.encode('utf-8'))
- sys.stdout.write(etree.tostring(exml,encoding='UTF-8',pretty_print=False))
+ # print(etree.tostring(cxml, pretty_print=True))
+ # sys.stdout.write(out.encode('utf-8'))
+ sys.stdout.write(
+ etree.tostring(exml, encoding="UTF-8", pretty_print=False)
+ )
from treecutter import constants as const
+
class Latest(object):
def __init__(self, count):
self.count = count
self.events = []
- for dirname, dirnames, filenames in os.walk('.'):
+ for dirname, dirnames, filenames in os.walk("."):
for filename in filenames:
- if fnmatch.fnmatch(filename, '*index.??.xml'):
- file_ = os.path.join(dirname,filename)
+ if fnmatch.fnmatch(filename, "*index.??.xml"):
+ file_ = os.path.join(dirname, filename)
doc = etree.parse(file_)
if doc.xpath(
- u'/db:article/db:info/db:mediaobject/db:imageobject/db:imagedata',
- namespaces=const.XPATH):
- self.events.append(Doc(doc,dirname))
+ "/db:article/db:info/db:mediaobject/db:imageobject/db:imagedata",
+ namespaces=const.XPATH,
+ ):
+ self.events.append(Doc(doc, dirname))
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
- doclist = db.itemizedlist(db.title(u'Latest Projects'))
+ doclist = db.itemizedlist(db.title("Latest Projects"))
for ev in self.events:
doclist.append(ev.db_xml())
return doclist
class Doc(object):
- def __init__(self,doc,link):
- self.pubdate = doc.xpath(u'/db:article/db:info/db:pubdate',
- namespaces=const.XPATH)
- self.fn = doc.xpath(u'/db:article/db:info/db:author/db:personname/db:firstname',
- namespaces=const.XPATH)
- self.ln = doc.xpath(u'/db:article/db:info/db:author/db:personname/db:surname',
- namespaces=const.XPATH)
- self.title = doc.xpath(u'/db:article/db:info/db:title',
- namespaces=const.XPATH)
+ def __init__(self, doc, link):
+ self.pubdate = doc.xpath(
+ "/db:article/db:info/db:pubdate", namespaces=const.XPATH
+ )
+ self.fn = doc.xpath(
+ "/db:article/db:info/db:author/db:personname/db:firstname",
+ namespaces=const.XPATH,
+ )
+ self.ln = doc.xpath(
+ "/db:article/db:info/db:author/db:personname/db:surname",
+ namespaces=const.XPATH,
+ )
+ self.title = doc.xpath(
+ "/db:article/db:info/db:title", namespaces=const.XPATH
+ )
self.photo = doc.xpath(
- u'/db:article/db:info/db:mediaobject/db:imageobject/db:imagedata',
- namespaces=const.XPATH)
+ "/db:article/db:info/db:mediaobject/db:imageobject/db:imagedata",
+ namespaces=const.XPATH,
+ )
self.link = link
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
lst = db.listitem(
- db.link(db.inlinemediaobject(
+ db.link(
+ db.inlinemediaobject(
db.imageobject(
- db.imagedata(fileref=self.link+'/'+self.photo[0].attrib['fileref'],
- width="198", depth="111"))),
- **{const.XLINK+"href": self.link}),
- db.para("Project: "+self.title[0].text,role="project"),
- db.para("Author: "+self.fn[0].text+" "+self.ln[0].text,role="author"),
- db.link("Read Details",**{const.XLINK+"href": self.link})
- )
+ db.imagedata(
+ fileref=self.link
+ + "/"
+ + self.photo[0].attrib["fileref"],
+ width="198",
+ depth="111",
+ )
+ )
+ ),
+ **{const.XLINK + "href": self.link}
+ ),
+ db.para("Project: " + self.title[0].text, role="project"),
+ db.para(
+ "Author: " + self.fn[0].text + " " + self.ln[0].text,
+ role="author",
+ ),
+ db.link("Read Details", **{const.XLINK + "href": self.link}),
+ )
return lst
+
if __name__ == "__main__":
for arg in sys.argv[1:]:
al = arg.split("=")
latest = Latest(argument)
exml = latest.db_xml()
- sys.stdout.write(etree.tostring(exml,encoding='UTF-8',pretty_print=False))
+ sys.stdout.write(
+ etree.tostring(exml, encoding="UTF-8", pretty_print=False)
+ )
import locale
for arg in sys.argv[1:]:
- al = arg.split("=")
- if al[0] == "lang":
- lang = al[1]
- if al[0] == "xptr":
- argument = al[1]
+ al = arg.split("=")
+ if al[0] == "lang":
+ lang = al[1]
+ if al[0] == "xptr":
+ argument = al[1]
# example input: ./openinghours.py lang=cs 'xptr=1,4;1-2|2,3;3-4:35'
loc_alias = {
- 'sv': 'sv_SE.utf8',
- 'de': 'de_DE.utf8',
- 'en': 'en_US.utf8',
- 'cs': 'cs_CZ.utf8',
+ "sv": "sv_SE.utf8",
+ "de": "de_DE.utf8",
+ "en": "en_US.utf8",
+ "cs": "cs_CZ.utf8",
}
title = {
- 'sv': u'Öppningstider',
- 'de': u'Öffnungszeiten',
- 'en': u'Opening hours',
- 'cs': u'Otevírací doba',
+ "sv": "Öppningstider",
+ "de": "Öffnungszeiten",
+ "en": "Opening hours",
+ "cs": "Otevírací doba",
}
day_header = {
- 'sv': u'Dag',
- 'de': u'Tag',
- 'en': u'Day',
- 'cs': u'Den',
+ "sv": "Dag",
+ "de": "Tag",
+ "en": "Day",
+ "cs": "Den",
}
time_header = {
- 'sv': u'Tid',
- 'de': u'Zeit',
- 'en': u'Time',
- 'cs': u'Čas',
+ "sv": "Tid",
+ "de": "Zeit",
+ "en": "Time",
+ "cs": "Čas",
}
loc = locale.getlocale()
locale.setlocale(locale.LC_ALL, loc_alias[lang])
-day_names = [ locale.nl_langinfo(x)
- for x in (locale.DAY_2, locale.DAY_3, locale.DAY_4,
- locale.DAY_5, locale.DAY_6, locale.DAY_7, locale.DAY_1) ]
+day_names = [
+ locale.nl_langinfo(x)
+ for x in (
+ locale.DAY_2,
+ locale.DAY_3,
+ locale.DAY_4,
+ locale.DAY_5,
+ locale.DAY_6,
+ locale.DAY_7,
+ locale.DAY_1,
+ )
+]
locale.setlocale(locale.LC_ALL, loc)
-times = dict(enumerate('-------',1))
+times = dict(enumerate("-------", 1))
+
+blocks = argument.split("|")
-blocks = argument.split('|')
def tfmt(time):
- if ':' in time:
- (th, tm) = time.split(':')
+ if ":" in time:
+ (th, tm) = time.split(":")
else:
th = time
- tm = '00'
+ tm = "00"
td = datetime.datetime(2000, 1, 1, int(th), int(tm), 0)
- return '{:%H:%M}'.format(td)
+ return "{:%H:%M}".format(td)
+
for b in blocks:
- (days, time) = b.split(';')
- days = days.split(',')
- (ts, te) = time.split('-')
- t = tfmt(ts)+' - '+tfmt(te)
+ (days, time) = b.split(";")
+ days = days.split(",")
+ (ts, te) = time.split("-")
+ t = tfmt(ts) + " - " + tfmt(te)
for d in days:
times[int(d)] = t
-out = u''
-out += '''<table frame='all' xmlns="http://docbook.org/ns/docbook"
+out = ""
+out += (
+ """<table frame='all' xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink">
- <title>'''+title[lang]+'''</title>
+ <title>"""
+ + title[lang]
+ + """</title>
<tgroup cols='2' align='left' colsep='1' rowsep='1'>
<colspec colname='day'/>
<colspec colname='time'/>
<thead>
<row>
- <entry align="center">'''+day_header[lang]+'''</entry>
- <entry align="center">'''+time_header[lang]+'''</entry>
+ <entry align="center">"""
+ + day_header[lang]
+ + """</entry>
+ <entry align="center">"""
+ + time_header[lang]
+ + """</entry>
</row>
</thead>
- <tbody>'''
+ <tbody>"""
+)
-for day,t in zip(day_names,times.values()) :
- out += '''
+for day, t in zip(day_names, times.values()):
+ out += """
<row>
<entry>%s</entry>
<entry>%s</entry>
- </row>''' % (day.decode('utf-8'),t)
-out += '''
+ </row>""" % (
+ day.decode("utf-8"),
+ t,
+ )
+out += """
</tbody>
</tgroup>
</table>
-'''
+"""
-sys.stdout.write(out.encode('utf-8'))
+sys.stdout.write(out.encode("utf-8"))
import os
import sys
import glob
-from shutil import rmtree,copytree
+from shutil import rmtree, copytree
from lxml import etree
from lxml.builder import ElementMaker
from treecutter.image import Image
from treecutter import constants as const
+
class PhotoAlbum(object):
def __init__(self, uri):
self.uri = unicode(uri)
d = self.uri
for root, subdir, files in os.walk(d):
for f in files:
- img = Image(os.path.join(root,f))
+ img = Image(os.path.join(root, f))
if not img.generated():
self.filelist.append(img)
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
- sl = db.itemizedlist(**{const.XML+"id": "slider"})
+ sl = db.itemizedlist(**{const.XML + "id": "slider"})
cnt = 0
for img in self.filelist:
cnt = cnt + 1
caption = db.caption()
- for p in img.caption().split('\n\n'):
+ for p in img.caption().split("\n\n"):
caption.append(db.para(p))
- link = db.para(db.link(img.infostr(),
- **{const.XLINK+"href": img.filename()}))
+ link = db.para(
+ db.link(
+ img.infostr(), **{const.XLINK + "href": img.filename()}
+ )
+ )
caption.append(link)
sl.append(
- db.listitem(db.mediaobject(
- db.imageobject(db.imagedata(fileref=img.slider())),caption),
- **{const.XML+"id": "p%x%d" % (self.albumid,cnt)}))
+ db.listitem(
+ db.mediaobject(
+ db.imageobject(db.imagedata(fileref=img.slider())),
+ caption,
+ ),
+ **{const.XML + "id": "p%x%d" % (self.albumid, cnt)}
+ )
+ )
- th = db.itemizedlist(**{const.XML+"id": "thumb"})
+ th = db.itemizedlist(**{const.XML + "id": "thumb"})
cnt = 0
for img in self.filelist:
cnt = cnt + 1
- th.append(db.listitem(db.para(db.link(db.inlinemediaobject(
- db.imageobject(db.imagedata(fileref=img.thumbnail()))),**{const.XLINK+"href": "#p%x%d" % (self.albumid, cnt)}))))
- return db.informalfigure(sl,th,**{const.XML+"id": "box"})
+ th.append(
+ db.listitem(
+ db.para(
+ db.link(
+ db.inlinemediaobject(
+ db.imageobject(
+ db.imagedata(fileref=img.thumbnail())
+ )
+ ),
+ **{
+ const.XLINK
+ + "href": "#p%x%d" % (self.albumid, cnt)
+ }
+ )
+ )
+ )
+ )
+ return db.informalfigure(sl, th, **{const.XML + "id": "box"})
+
def recursively_empty(e):
if e.text:
return False
return all((recursively_empty(c) for c in e.iterchildren()))
+
def clean_db(xml):
context = etree.iterwalk(xml)
for action, elem in context:
if recursively_empty(elem):
parent.remove(elem)
+
if __name__ == "__main__":
for arg in sys.argv[1:]:
- al = arg.split("=",1)
+ al = arg.split("=", 1)
if al[0] == "lang":
lang = al[1]
if al[0] == "xptr":
album = PhotoAlbum(argument)
album.files()
axml = album.db_xml()
-# clean_db(axml)
+ # clean_db(axml)
- #print(etree.tostring(cxml, pretty_print=True))
- #sys.stderr.write(axml.encode('utf-8'))
- #sys.stderr.write(etree.tostring(axml,encoding='UTF-8',pretty_print=True))
- sys.stdout.write(etree.tostring(axml,encoding='UTF-8',pretty_print=True))
+ # print(etree.tostring(cxml, pretty_print=True))
+ # sys.stderr.write(axml.encode('utf-8'))
+ # sys.stderr.write(etree.tostring(axml,encoding='UTF-8',pretty_print=True))
+ sys.stdout.write(etree.tostring(axml, encoding="UTF-8", pretty_print=True))
from treecutter import constants as const
+
class Pic(object):
def __init__(self, picture, caption):
self.picture = picture
self.caption = caption
- self.sizes = [(500,500),(800,800),(1024,1024)]
+ self.sizes = [(500, 500), (800, 800), (1024, 1024)]
self.outfiles = []
def thumbnails(self):
infile = os.path.splitext(self.picture)
for size in self.sizes:
- outfile = infile[0]+"-"+str(size[0])+infile[1]
+ outfile = infile[0] + "-" + str(size[0]) + infile[1]
if infile != outfile:
try:
im = Image.open(self.picture)
def db_xml(self):
webpic = self.outfiles[0]
imw = Image.open(webpic)
- ww,dw = imw.size
+ ww, dw = imw.size
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
pics = db.para()
for f in self.outfiles:
im = Image.open(f)
- w,d = im.size
- pics.append(db.link(str(w)+"x"+str(d)+" ",**{const.XLINK+"href": f}))
- #pics = list(', '.join(pics))
- #pics.append(')')
+ w, d = im.size
+ pics.append(
+ db.link(
+ str(w) + "x" + str(d) + " ", **{const.XLINK + "href": f}
+ )
+ )
+ # pics = list(', '.join(pics))
+ # pics.append(')')
# pics.insert(0,'Sizes (')
lst = db.mediaobject(
db.imageobject(
- db.imagedata(fileref=webpic, width=str(ww), depth=str(dw))),
- db.caption(db.para(self.caption),pics))
+ db.imagedata(fileref=webpic, width=str(ww), depth=str(dw))
+ ),
+ db.caption(db.para(self.caption), pics),
+ )
return lst
+
if __name__ == "__main__":
for arg in sys.argv[1:]:
al = arg.split("=")
lang = al[1]
if al[0] == "xptr":
argument = al[1]
- p,c = argument.split("|")
- pic = Pic(p,c)
+ p, c = argument.split("|")
+ pic = Pic(p, c)
pic.thumbnails()
pxml = pic.db_xml()
- sys.stdout.write(etree.tostring(pxml,encoding='UTF-8',pretty_print=False))
+ sys.stdout.write(
+ etree.tostring(pxml, encoding="UTF-8", pretty_print=False)
+ )
import os
import sys
-from shutil import rmtree,copytree
+from shutil import rmtree, copytree
from lxml import etree
from lxml.builder import ElementMaker
from treecutter import constants as const
+
class Reprepro(object):
def __init__(self, uri):
self.uri = uri
self.filelist = []
def files(self):
- dirs = ['dists','pool']
- rootdirs = [self.uri+'/'+d for d in dirs]
- for rootdir,d in zip(rootdirs,dirs):
+ dirs = ["dists", "pool"]
+ rootdirs = [self.uri + "/" + d for d in dirs]
+ for rootdir, d in zip(rootdirs, dirs):
if os.path.exists(d):
rmtree(d)
copytree(rootdir, d)
for d in dirs:
for root, subdir, files in os.walk(d):
for file in files:
- self.filelist.append(os.path.join(root,file))
+ self.filelist.append(os.path.join(root, file))
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
co = db.computeroutput()
for f in self.filelist:
- co.append(db.filename(db.link(f,**{const.XLINK+"href": f})))
+ co.append(db.filename(db.link(f, **{const.XLINK + "href": f})))
return db.para(co)
+
def recursively_empty(e):
if e.text:
return False
return all((recursively_empty(c) for c in e.iterchildren()))
+
def clean_db(xml):
context = etree.iterwalk(xml)
for action, elem in context:
if recursively_empty(elem):
parent.remove(elem)
+
if __name__ == "__main__":
for arg in sys.argv[1:]:
- al = arg.split("=",1)
+ al = arg.split("=", 1)
if al[0] == "lang":
lang = al[1]
if al[0] == "xptr":
rxml = repo.db_xml()
clean_db(rxml)
- #print(etree.tostring(cxml, pretty_print=True))
- #sys.stdout.write(out.encode('utf-8'))
- sys.stdout.write(etree.tostring(rxml,encoding='UTF-8',pretty_print=True))
+ # print(etree.tostring(cxml, pretty_print=True))
+ # sys.stdout.write(out.encode('utf-8'))
+ sys.stdout.write(etree.tostring(rxml, encoding="UTF-8", pretty_print=True))
from lxml.builder import ElementMaker
from treecutter import constants as const
+
def append_text(tree, text):
children = tree.getchildren()
if children:
tree.text += text
return tree
+
def linkify(text):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
ent = db.entry(align="center")
r = re.search(r"(?P<url>https?://[^ ]+)\|(?P<title>[\w\-\.]+)", text)
if r:
rep = r.groups(r.group(1))
- ent.append(db.link(rep[1],**{const.XLINK+"href": rep[0]}))
- ts = text.split(',')
+ ent.append(db.link(rep[1], **{const.XLINK + "href": rep[0]}))
+ ts = text.split(",")
c = 0
for t in ts:
c = c + 1
n = parseaddr(t)
- if n[0] != '' and n[1] != '':
- ent.append(db.address(db.personname(db.firstname(n[0].split(' ')[0]), db.surname(n[0].split(' ')[1])),db.email(n[1])))
+ if n[0] != "" and n[1] != "":
+ ent.append(
+ db.address(
+ db.personname(
+ db.firstname(n[0].split(" ")[0]),
+ db.surname(n[0].split(" ")[1]),
+ ),
+ db.email(n[1]),
+ )
+ )
else:
- append_text(ent,t)
- if c<len(ts):
- append_text(ent,',')
+ append_text(ent, t)
+ if c < len(ts):
+ append_text(ent, ",")
return ent
+
class Table(object):
def __init__(self, tablefile, title):
- self.tablefile = tablefile
+ self.tablefile = tablefile
self.title = title
self.cols = []
def parse(self):
- f = codecs.open(self.tablefile, encoding='utf-8')
+ f = codecs.open(self.tablefile, encoding="utf-8")
for line in f:
- c = re.split(r'\t+', line.rstrip())
+ c = re.split(r"\t+", line.rstrip())
self.cols.append(c)
def db_xml(self):
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
cols = self.cols
nrcol = str(len(cols[0]))
- if cols[0][0][0] == '*':
+ if cols[0][0][0] == "*":
cols[0][0] = cols[0][0][1:]
h = cols.pop(0)
row = db.row()
body.append(row)
for e in r:
row.append(linkify(e))
- tab = db.table(db.title(self.title),
- db.tgroup(head,body,cols=nrcol,
- colsep='1',rowsep='1',align='left'),
- frame='all')
+ tab = db.table(
+ db.title(self.title),
+ db.tgroup(
+ head, body, cols=nrcol, colsep="1", rowsep="1", align="left"
+ ),
+ frame="all",
+ )
return tab
+
if __name__ == "__main__":
for arg in sys.argv[1:]:
al = arg.split("=")
if al[0] == "lang":
lang = al[1]
if al[0] == "xptr":
- argument = al[1].decode('utf-8')
+ argument = al[1].decode("utf-8")
- (tablefile, title) = argument.split('|')
- tab = Table(tablefile,title)
+ (tablefile, title) = argument.split("|")
+ tab = Table(tablefile, title)
tab.parse()
txml = tab.db_xml()
- sys.stdout.write(etree.tostring(txml,encoding='UTF-8',pretty_print=False))
+ sys.stdout.write(
+ etree.tostring(txml, encoding="UTF-8", pretty_print=False)
+ )