Adding support for using article status attribute to indicate
if a document is just a draft. Adding the article attribute
userlevel to create 5 access levels. Access control is not
implemented but the differen sites can be uploaded to different
areas which is protected by for example apache access.
This makes sure that one content source is used for each site.
Adding template support by using the article attribute role to
select the style template to generate the page.
import fnmatch
from lxml import etree
import treecutter.constants as const
import fnmatch
from lxml import etree
import treecutter.constants as const
+from treecutter.docbook import Docbook
import re
class Directory():
import re
class Directory():
for filename in filenames:
if fnmatch.fnmatch(filename, '*.xml'):
file_ = os.path.join(dirname,filename)
for filename in filenames:
if fnmatch.fnmatch(filename, '*.xml'):
file_ = os.path.join(dirname,filename)
- doc = etree.parse(file_)
- title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
- menu = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
- if title and menu:
+ doc = Docbook(file_)
+ (title, menu) = doc.title()
+ draft = doc.status() == "draft"
+ level = doc.userlevel()
+
+# doc = etree.parse(file_)
+# title = doc.xpath(u'/db:article/db:info/db:title',namespaces=const.XPATH)
+# menu = doc.xpath(u'/db:article/db:info/db:titleabbrev',namespaces=const.XPATH)
+# draft = doc.xpath(u'/db:article[@status="draft"]',namespaces=const.XPATH)
+ if draft and draftflag:
+ draft = False
+ if title and menu and not draft and level <= levelflag:
base = self._basepath.match(file_).group()
link = base.replace('index','')[1:]
self._tree.append(link)
base = self._basepath.match(file_).group()
link = base.replace('index','')[1:]
self._tree.append(link)
import os
import subprocess
import os
import subprocess
from lxml import etree
from lxml.builder import ElementMaker
from lxml import etree
from lxml.builder import ElementMaker
import treecutter.constants as const
from treecutter.image import Image
import treecutter.constants as const
from treecutter.image import Image
-from treecutter.tools import warning
+#from treecutter.tools import warning
class Docbook():
"""Class representing a docbook document"""
class Docbook():
"""Class representing a docbook document"""
ta = unicode(ta[0].text)
return (t, ta)
ta = unicode(ta[0].text)
return (t, ta)
+ def status(self):
+ status = self._doc.xpath(u'/db:article[@status]',namespaces=const.XPATH)
+ if status:
+ return unicode(status[0].get('status'))
+ return None
+
+ def role(self):
+ art = self._doc.xpath(u'/db:article[@role]',namespaces=const.XPATH)
+ if art:
+ return unicode(art[0].get('role'))
+ return 'index'
+
+ def userlevel(self):
+ lvl = self._doc.xpath(u'/db:article[@userlevel]',namespaces=const.XPATH)
+ if lvl:
+ lvlstr = unicode(lvl[0].get('userlevel'))
+ return {
+ 'Level 1': 1,
+ 'Level 2': 2,
+ 'Level 3': 3,
+ 'Level 4': 4,
+ 'Level 5': 5,
+ }.get(lvlstr, 0)
+ return 0
+
def expand_imageobjects(self):
cwd = os.getcwd()
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
def expand_imageobjects(self):
cwd = os.getcwd()
db = ElementMaker(namespace=const.DB_NS, nsmap=const.NSMAP)
if ext in const.valid_scripts:
exe = []
script = os.path.join(os.path.abspath(self._dirname)+'/'+href)
if ext in const.valid_scripts:
exe = []
script = os.path.join(os.path.abspath(self._dirname)+'/'+href)
- if os.path.isfile(script):
+ if os.path.isfile(script) and os.access(script, os.X_OK):
exe.append(script)
else:
if href in resource_listdir('xinclude', ''):
exe.append(script)
else:
if href in resource_listdir('xinclude', ''):
exe.append("lang="+alang)
if xpointer:
exe.append("xptr="+xpointer)
exe.append("lang="+alang)
if xpointer:
exe.append("xptr="+xpointer)
+ if exe == []:
+ continue
print " executing %15s" % (href),
ts = time()
os.chdir(self._dirname)
print " executing %15s" % (href),
ts = time()
os.chdir(self._dirname)
stderr=subprocess.PIPE)
(stdout, stderr) = xml.communicate()
#print xml.returnvalue
stderr=subprocess.PIPE)
(stdout, stderr) = xml.communicate()
#print xml.returnvalue
- if stderr:
- warning("%s : %s" % (" ".join(exe),stderr))
- warning(stdout)
- exit
+# if stderr:
+# warning("%s : %s" % (" ".join(exe),stderr))
+# warning(stdout)
+# exit
os.chdir(cwd)
te = time()
print " [%5.2f s] (%s)" % (round(te-ts,2),xpointer)
os.chdir(cwd)
te = time()
print " [%5.2f s] (%s)" % (round(te-ts,2),xpointer)
idp.insert(idp.index(c)+1,xstr)
idp.remove(c)
idp.insert(idp.index(c)+1,xstr)
idp.remove(c)
+ def xinclude(self):
+ self._doc.xinclude()
+
def collect_links(self):
res = []
for r in self._doc.xpath(u"//db:link[@xlink:href]",namespaces=const.XPATH):
def collect_links(self):
res = []
for r in self._doc.xpath(u"//db:link[@xlink:href]",namespaces=const.XPATH):
default=os.path.dirname(os.getcwd())+'/htdocs/')
parser.add_argument('--subdir', nargs='?',
default='')
default=os.path.dirname(os.getcwd())+'/htdocs/')
parser.add_argument('--subdir', nargs='?',
default='')
+ parser.add_argument('--draft', action='store_true')
+ parser.add_argument('--level', type=int, choices=[1, 2, 3, 4, 5], default=0)
+
args = parser.parse_args()
ts = time()
args = parser.parse_args()
ts = time()
+ print "--= Treecutter =--"
dir_ = Directory()
sitemap = Sitemap(args)
# Scanning current directory and subdirectory for docbook articles
dir_ = Directory()
sitemap = Sitemap(args)
# Scanning current directory and subdirectory for docbook articles
+ dir_.scan(args.draft, args.level)
# Reading the sitemap.txt building a Trie structure
sitemap.read_map()
# Comparing the current state of the dir with the sitemap
# Reading the sitemap.txt building a Trie structure
sitemap.read_map()
# Comparing the current state of the dir with the sitemap
- missing = dir_.set() - sitemap.set()
- removed = sitemap.set() - dir_.set()
+ dirset = dir_.set()
+ missing = dirset - sitemap.set()
+ removed = sitemap.set() - dirset
- print page+' pages missing!!'
+ print page+' page not availible in this config'
for page in missing:
print 'adding missing page '+page
sitemap.add_link(page)
for page in missing:
print 'adding missing page '+page
sitemap.add_link(page)
- if len(missing)+len(removed) != 0:
print 'writing new sitemap - please adjust if needed'
sitemap.write_map()
print 'writing new sitemap - please adjust if needed'
sitemap.write_map()
+ dirsitemap = Sitemap(args)
+ for l in sitemap.linklist():
+ if l in dirset:
+ dirsitemap.add_link(l)
+
+
# Generate a pygraphviz image of the site (TODO: currently not used)
# Generate a pygraphviz image of the site (TODO: currently not used)
# Start processing the docbook articles to static html
# Start processing the docbook articles to static html
# Publish static html and style data (css, images, fonts) to destination dir
t1 = time()
# Publish static html and style data (css, images, fonts) to destination dir
t1 = time()
t2 = time()
print "Publish [%5.2f s]" % (round(t2-t1,2))
print "Total [%5.2f s]" % (round(t2-ts,2))
t2 = time()
print "Publish [%5.2f s]" % (round(t2-t1,2))
print "Total [%5.2f s]" % (round(t2-ts,2))
import os
import subprocess
import tempfile
import os
import subprocess
import tempfile
self._title = None
self._menu = None
self._rendered_article = None
self._title = None
self._menu = None
self._rendered_article = None
+ self._template = 'index'
+ self._status = None
def language(self):
return self._lang
def language(self):
return self._lang
(self._title, self._menu) = self._doc.title()
self._doc.expand_imageobjects()
self._doc.parse_xincludes()
(self._title, self._menu) = self._doc.title()
self._doc.expand_imageobjects()
self._doc.parse_xincludes()
+ self._doc.xinclude()
+ self._template = self._doc.role()
+ self._status = self._doc.status()
doc = self._doc.collect_links()
img = self._doc.collect_images()
vid = self._doc.collect_videos()
doc = self._doc.collect_links()
img = self._doc.collect_images()
vid = self._doc.collect_videos()
self._rendered_article = self._doc.xslt(transform['xhtml5'])
def template(self,sitemap,style,tdir,subdir):
self._rendered_article = self._doc.xslt(transform['xhtml5'])
def template(self,sitemap,style,tdir,subdir):
- htmlmenu = sitemap.gen_menu(self._lang,None,"menu")
+ htmlmenu = sitemap.gen_menu(self._lang,None,"links")
levelmenu = sitemap.gen_menu(self._lang,self,"tree")
langmenu = sitemap.lang_menu(self._lang,self._link)
article = self._rendered_article
levelmenu = sitemap.gen_menu(self._lang,self,"tree")
langmenu = sitemap.lang_menu(self._lang,self._link)
article = self._rendered_article
'subdir':subdir}
s = unicode(style, "utf-8")
'subdir':subdir}
s = unicode(style, "utf-8")
- t = s+'index.'+self._lang+'.xhtml.tmpl'
+ t = s+self._template+'.'+self._lang+'.xhtml'
template = templateEnv.get_template( t )
templateout = template.render( templateVars )
template = templateEnv.get_template( t )
templateout = template.render( templateVars )
def set(self):
return set(link.link() for link in self._tree)
def set(self):
return set(link.link() for link in self._tree)
+ def linklist(self):
+ return [link.link() for link in self._tree]
+
# Main driver in the application processing the documents
# in the collected sitemap
def process(self):
t1 = time()
# Main driver in the application processing the documents
# in the collected sitemap
def process(self):
t1 = time()
- print "Prepareing the input"
for link in self._tree:
link.prepare()
t2 = time()
for link in self._tree:
link.prepare()
t2 = time()