import fnmatch
from lxml import etree
import treecutter.constants as const
+from treecutter.docbook import Docbook
import re
class Directory():
for filename in filenames:
if fnmatch.fnmatch(filename, '*.xml'):
file_ = os.path.join(dirname,filename)
- doc = etree.parse(file_)
- title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
- menu = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
- if title and menu:
+ doc = Docbook(file_)
+ (title, menu) = doc.title()
+ draft = doc.status() == "draft"
+ level = doc.userlevel()
+
+# doc = etree.parse(file_)
+# title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
+# menu = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
+# draft = doc.xpath(u'/db:article[@status="draft"]',namespaces=const.XPATH)
+ if draft and draftflag:
+ draft = False
+ if title and menu and not draft and level <= levelflag:
base = self._basepath.match(file_).group()
link = base.replace('index','')[1:]
self._tree.append(link)
import os
import subprocess
+import re
from lxml import etree
from lxml.builder import ElementMaker
import treecutter.constants as const
from treecutter.image import Image
-from treecutter.tools import warning
+#from treecutter.tools import warning
class Docbook():
"""Class representing a docbook document"""
ta = unicode(ta[0].text)
return (t, ta)
+ def status(self):
+ status = self._doc.xpath(u'/db:article[@status]',namespaces=const.XPATH)
+ if status:
+ return unicode(status[0].get('status'))
+ return None
+
+ def role(self):
+ art = self._doc.xpath(u'/db:article[@role]',namespaces=const.XPATH)
+ if art:
+ return unicode(art[0].get('role'))
+ return 'index'
+
+ def userlevel(self):
+ lvl = self._doc.xpath(u'/db:article[@userlevel]',namespaces=const.XPATH)
+ if lvl:
+ lvlstr = unicode(lvl[0].get('userlevel'))
+ return {
+ 'Level 1': 1,
+ 'Level 2': 2,
+ 'Level 3': 3,
+ 'Level 4': 4,
+ 'Level 5': 5,
+ }.get(lvlstr, 0)
+ return 0
+
def expand_imageobjects(self):
cwd = os.getcwd()
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
if ext in const.valid_scripts:
exe = []
script = os.path.join(os.path.abspath(self._dirname)+'/'+href)
- if os.path.isfile(script):
+ if os.path.isfile(script) and os.access(script, os.X_OK):
exe.append(script)
else:
if href in resource_listdir('xinclude', ''):
exe.append("lang="+alang)
if xpointer:
exe.append("xptr="+xpointer)
+ if exe == []:
+ continue
print " executing %15s" % (href),
ts = time()
os.chdir(self._dirname)
stderr=subprocess.PIPE)
(stdout, stderr) = xml.communicate()
#print xml.returnvalue
- if stderr:
- warning("%s : %s" % (" ".join(exe),stderr))
- warning(stdout)
- exit
+# if stderr:
+# warning("%s : %s" % (" ".join(exe),stderr))
+# warning(stdout)
+# exit
os.chdir(cwd)
te = time()
print " [%5.2f s] (%s)" % (round(te-ts,2),xpointer)
idp.insert(idp.index(c)+1,xstr)
idp.remove(c)
+ def xinclude(self):
+ self._doc.xinclude()
+
def collect_links(self):
res = []
for r in self._doc.xpath(u"//db:link[@xlink:href]",namespaces=const.XPATH):
default=os.path.dirname(os.getcwd())+'/htdocs/')
parser.add_argument('--subdir', nargs='?',
default='')
+ parser.add_argument('--draft', action='store_true')
+ parser.add_argument('--level', type=int, choices=[1, 2, 3, 4, 5], default=0)
+
args = parser.parse_args()
ts = time()
+ print "--= Treecutter =--"
dir_ = Directory()
sitemap = Sitemap(args)
# Scanning current directory and subdirectory for docbook articles
- dir_.scan()
+ dir_.scan(args.draft, args.level)
# Reading the sitemap.txt building a Trie structure
sitemap.read_map()
# Comparing the current state of the dir with the sitemap
- missing = dir_.set() - sitemap.set()
- removed = sitemap.set() - dir_.set()
+ dirset = dir_.set()
+ missing = dirset - sitemap.set()
+ removed = sitemap.set() - dirset
for page in removed:
- print page+' pages missing!!'
+ print page+' page not availible in this config'
for page in missing:
print 'adding missing page '+page
sitemap.add_link(page)
- if len(missing)+len(removed) != 0:
+ if len(missing) != 0:
print 'writing new sitemap - please adjust if needed'
sitemap.write_map()
+ dirsitemap = Sitemap(args)
+ for l in sitemap.linklist():
+ if l in dirset:
+ dirsitemap.add_link(l)
+
+
# Generate a pygraphviz image of the site (TODO: currently not used)
- sitemap.graph()
+ dirsitemap.graph()
# Start processing the docbook articles to static html
- sitemap.process()
+ dirsitemap.process()
# Publish static html and style data (css, images, fonts) to destination dir
t1 = time()
- sitemap.publish()
+ dirsitemap.publish()
t2 = time()
print "Publish [%5.2f s]" % (round(t2-t1,2))
print "Total [%5.2f s]" % (round(t2-ts,2))
#!/usr/bin/python
+# -*- coding: utf-8 -*-
import os
import subprocess
import tempfile
self._title = None
self._menu = None
self._rendered_article = None
+ self._template = 'index'
+ self._status = None
def language(self):
return self._lang
(self._title, self._menu) = self._doc.title()
self._doc.expand_imageobjects()
self._doc.parse_xincludes()
+ self._doc.xinclude()
+ self._template = self._doc.role()
+ self._status = self._doc.status()
doc = self._doc.collect_links()
img = self._doc.collect_images()
vid = self._doc.collect_videos()
self._rendered_article = self._doc.xslt(transform['xhtml5'])
def template(self,sitemap,style,tdir,subdir):
- htmlmenu = sitemap.gen_menu(self._lang,None,"menu")
+ htmlmenu = sitemap.gen_menu(self._lang,None,"links")
levelmenu = sitemap.gen_menu(self._lang,self,"tree")
langmenu = sitemap.lang_menu(self._lang,self._link)
article = self._rendered_article
'subdir':subdir}
s = unicode(style, "utf-8")
- t = s+'index.'+self._lang+'.xhtml.tmpl'
+ t = s+self._template+'.'+self._lang+'.xhtml'
template = templateEnv.get_template( t )
templateout = template.render( templateVars )
def set(self):
return set(link.link() for link in self._tree)
+ def linklist(self):
+ return [link.link() for link in self._tree]
+
# Main driver in the application processing the documents
# in the collected sitemap
def process(self):
t1 = time()
- print "Prepareing the input"
for link in self._tree:
link.prepare()
t2 = time()