sitemap: support for draft, levels and templates
authorFredrik Unger <fred@tree.se>
Tue, 22 Jan 2019 20:10:26 +0000 (21:10 +0100)
committerFredrik Unger <fred@tree.se>
Tue, 22 Jan 2019 20:10:26 +0000 (21:10 +0100)
Adding support for using article status attribute to indicate
if a document is just a draft. Adding the article attribute
userlevel to create 5 access levels. Access control is not
implemented but the differen sites can be uploaded to different
areas which is protected by for example apache access.
This makes sure that one content source is used for each site.

Adding template support by using the article attribute role to
select the style template to generate the page.

treecutter/directory.py
treecutter/docbook.py
treecutter/main.py
treecutter/page.py
treecutter/sitemap.py

index 1b7a3e9f3ee6562ff9bfedd51ea0bdac409b1d7c..d4a0973cbd6cb3eee76906a7f6babf56585a5955 100644 (file)
@@ -3,6 +3,7 @@ import os
 import fnmatch
 from lxml import etree
 import treecutter.constants as const
+from treecutter.docbook import Docbook
 import re
 
 class Directory():
@@ -17,10 +18,18 @@ class Directory():
             for filename in filenames:
                 if fnmatch.fnmatch(filename, '*.xml'):
                     file_ = os.path.join(dirname,filename)
-                    doc = etree.parse(file_)
-                    title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
-                    menu  = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
-                    if title and menu:
+                    doc = Docbook(file_)
+                    (title, menu) = doc.title()
+                    draft = doc.status() == "draft"
+                    level = doc.userlevel()
+
+#                    doc = etree.parse(file_)
+#                    title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
+#                    menu  = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
+#                    draft = doc.xpath(u'/db:article[@status="draft"]',namespaces=const.XPATH)
+                    if draft and draftflag:
+                        draft = False
+                    if title and menu and not draft and level <= levelflag:
                         base = self._basepath.match(file_).group()
                         link = base.replace('index','')[1:]
                         self._tree.append(link)
index 32cff722ed72211e414d265250a72ece77202da0..9e970c98b679b2d8b81146e99e0e3141fac02d65 100644 (file)
@@ -2,6 +2,7 @@
 
 import os
 import subprocess
+import re
 
 from lxml import etree
 from lxml.builder import ElementMaker
@@ -11,7 +12,7 @@ from time import time
 
 import treecutter.constants as const
 from treecutter.image import Image
-from treecutter.tools import warning
+#from treecutter.tools import warning
 
 class Docbook():
     """Class representing a docbook document"""
@@ -29,6 +30,31 @@ class Docbook():
            ta = unicode(ta[0].text)
         return (t, ta)
 
+    def status(self):
+        status = self._doc.xpath(u'/db:article[@status]',namespaces=const.XPATH)
+        if status:
+            return unicode(status[0].get('status'))
+        return None
+
+    def role(self):
+        art = self._doc.xpath(u'/db:article[@role]',namespaces=const.XPATH)
+        if art:
+            return unicode(art[0].get('role'))
+        return 'index'
+
+    def userlevel(self):
+        lvl = self._doc.xpath(u'/db:article[@userlevel]',namespaces=const.XPATH)
+        if lvl:
+            lvlstr = unicode(lvl[0].get('userlevel'))
+            return {
+                'Level 1': 1,
+                'Level 2': 2,
+                'Level 3': 3,
+                'Level 4': 4,
+                'Level 5': 5,
+            }.get(lvlstr, 0)
+        return 0
+
     def expand_imageobjects(self):
         cwd = os.getcwd()
         db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
@@ -63,7 +89,7 @@ class Docbook():
             if ext in const.valid_scripts:
                 exe = []
                 script = os.path.join(os.path.abspath(self._dirname)+'/'+href)
-                if os.path.isfile(script):
+                if os.path.isfile(script) and os.access(script, os.X_OK):
                     exe.append(script)
                 else:
                     if href in resource_listdir('xinclude', ''):
@@ -75,6 +101,8 @@ class Docbook():
                     exe.append("lang="+alang)
                 if xpointer:
                     exe.append("xptr="+xpointer)
+                if exe == []:
+                    continue
                 print "  executing %15s" % (href),
                 ts = time()
                 os.chdir(self._dirname)
@@ -82,10 +110,10 @@ class Docbook():
                                        stderr=subprocess.PIPE)
                 (stdout, stderr) = xml.communicate()
                 #print xml.returnvalue
-                if stderr:
-                    warning("%s : %s" % (" ".join(exe),stderr))
-                    warning(stdout)
-                    exit
+#                if stderr:
+#                    warning("%s : %s" % (" ".join(exe),stderr))
+#                    warning(stdout)
+#                    exit
                 os.chdir(cwd)
                 te = time()
                 print " [%5.2f s]  (%s)" % (round(te-ts,2),xpointer)
@@ -95,6 +123,9 @@ class Docbook():
                 idp.insert(idp.index(c)+1,xstr)
                 idp.remove(c)
 
+    def xinclude(self):
+        self._doc.xinclude()
+
     def collect_links(self):
         res = []
         for r in self._doc.xpath(u"//db:link[@xlink:href]",namespaces=const.XPATH):
index 6d7b7ddcecbd679f4775cebaaa55f54e7a288fce..c685f7d3756951ea9a123c29461a52b49ad8aa06 100644 (file)
@@ -14,37 +14,48 @@ def main():
                         default=os.path.dirname(os.getcwd())+'/htdocs/')
     parser.add_argument('--subdir', nargs='?',
                         default='')
+    parser.add_argument('--draft', action='store_true')
+    parser.add_argument('--level', type=int, choices=[1, 2, 3, 4, 5], default=0)
+
     args = parser.parse_args()
 
     ts = time()
+    print "--= Treecutter =--"
     dir_ = Directory()
     sitemap = Sitemap(args)
 
     # Scanning current directory and subdirectory for docbook articles
-    dir_.scan()
+    dir_.scan(args.draft, args.level)
     # Reading the sitemap.txt building a Trie structure
     sitemap.read_map()
 
     # Comparing the current state of the dir with the sitemap
-    missing = dir_.set() - sitemap.set()
-    removed = sitemap.set() - dir_.set()
+    dirset = dir_.set()
+    missing = dirset - sitemap.set()
+    removed = sitemap.set() - dirset
     for page in removed:
-        print page+' pages missing!!'
+        print page+' page not availible in this config'
     for page in missing:
         print 'adding missing page '+page
         sitemap.add_link(page)
-    if len(missing)+len(removed) != 0:
+    if len(missing) != 0:
         print 'writing new sitemap - please adjust if needed'
         sitemap.write_map()
 
+    dirsitemap = Sitemap(args)
+    for l in sitemap.linklist():
+        if l in dirset:
+            dirsitemap.add_link(l)
+
+
     # Generate a pygraphviz image of the site (TODO: currently not used)
-    sitemap.graph()
+    dirsitemap.graph()
     # Start processing the docbook articles to static html
-    sitemap.process()
+    dirsitemap.process()
 
     # Publish static html and style data (css, images, fonts) to destination dir
     t1 = time()
-    sitemap.publish()
+    dirsitemap.publish()
     t2 = time()
     print "Publish  [%5.2f s]" % (round(t2-t1,2))
     print "Total    [%5.2f s]" % (round(t2-ts,2))
index 3da0eef87674995d46712ec9171b9fbff89ad358..c05ef90b4573efd07aeac697e5f3b44770dbd659 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/python
+# -*- coding: utf-8 -*-
 import os
 import subprocess
 import tempfile
@@ -26,6 +27,8 @@ class Page():
         self._title = None
         self._menu = None
         self._rendered_article = None
+        self._template = 'index'
+        self._status = None
 
     def language(self):
         return self._lang
@@ -44,6 +47,9 @@ class Page():
         (self._title, self._menu) = self._doc.title()
         self._doc.expand_imageobjects()
         self._doc.parse_xincludes()
+        self._doc.xinclude()
+        self._template = self._doc.role()
+        self._status = self._doc.status()
         doc = self._doc.collect_links()
         img = self._doc.collect_images()
         vid = self._doc.collect_videos()
@@ -54,7 +60,7 @@ class Page():
         self._rendered_article = self._doc.xslt(transform['xhtml5'])
 
     def template(self,sitemap,style,tdir,subdir):
-        htmlmenu =  sitemap.gen_menu(self._lang,None,"menu")
+        htmlmenu =  sitemap.gen_menu(self._lang,None,"links")
         levelmenu = sitemap.gen_menu(self._lang,self,"tree")
         langmenu = sitemap.lang_menu(self._lang,self._link)
         article = self._rendered_article
@@ -69,7 +75,7 @@ class Page():
                         'subdir':subdir}
 
         s = unicode(style, "utf-8")
-        t = s+'index.'+self._lang+'.xhtml.tmpl'
+        t = s+self._template+'.'+self._lang+'.xhtml'
         template = templateEnv.get_template( t )
         templateout = template.render( templateVars )
 
index 707067f1f8cf28a284759248ce111ef520bf780a..347440b777eb8418158ed90d21ceaa0ac139b4fd 100644 (file)
@@ -57,11 +57,13 @@ class Sitemap():
     def set(self):
         return set(link.link() for link in self._tree)
 
+    def linklist(self):
+        return [link.link() for link in self._tree]
+
     # Main driver in the application processing the documents
     # in the collected sitemap
     def process(self):
         t1 = time()
-        print "Prepareing the input"
         for link in self._tree:
             link.prepare()
         t2 = time()