#!/usr/bin/python
import os
-import time
+from time import time
import argparse
from treecutter.directory import Directory
from treecutter.sitemap import Sitemap
+from treecutter.tools import translate
-parser = argparse.ArgumentParser(description='Process docbook article tree.')
-parser.add_argument('--style', nargs='?',
- default=os.path.dirname(os.getcwd())+'/style/default/')
-parser.add_argument('--output', nargs='?',
- default=os.path.dirname(os.getcwd())+'/htdocs/')
-args = parser.parse_args()
-
-ts = time.time()
-dir_ = Directory()
-sitemap = Sitemap()
-
-dir_.scan()
-sitemap.read_map()
-
-missing = dir_.set() - sitemap.set()
-removed = sitemap.set() - dir_.set()
-for page in removed:
- print page+' pages missing!!'
-for page in missing:
- print 'adding missing page '+page
- sitemap.add_link(page)
-if len(missing)+len(removed) != 0:
- print 'writing new sitemap - please adjust if needed'
- sitemap.write_map()
-sitemap.graph()
-
-sitemap.process(args.style)
-
-t1 = time.time()
-sitemap.publish(args.output,args.style)
-t2 = time.time()
-print "Publish [%5.2f s]" % (round(t2-t1,2))
-print "Total [%5.2f s]" % (round(t2-ts,2))
+def main():
+
+ parser = argparse.ArgumentParser(description='Process docbook article tree.')
+ parser.add_argument('--style', nargs='?',
+ default=os.path.dirname(os.getcwd())+'/style/default/')
+ parser.add_argument('--output', nargs='?',
+ default=os.path.dirname(os.getcwd())+'/htdocs/')
+ parser.add_argument('--subdir', nargs='?',
+ default='')
+ parser.add_argument('--draft', action='store_true')
+ parser.add_argument('--level', type=int, choices=[1, 2, 3, 4, 5], default=0)
+
+ args = parser.parse_args()
+
+ ts = time()
+ print "--= Treecutter =--"
+ dir_ = Directory()
+ t1 = time()
+ totrans = dir_.translations(args.style)
+ print "Translate [%d] : [" % (len(totrans)),
+ translate(totrans)
+ print "]"
+ t2 = time()
+ print "Translate[%5.2f s]" % (round(t2-t1,2))
+
+
+ sitemap = Sitemap(args)
+
+ # Scanning current directory and subdirectory for docbook articles
+ dir_.scan(args.draft, args.level)
+ # Reading the sitemap.txt building a Trie structure
+ sitemap.read_map()
+
+ # Comparing the current state of the dir with the sitemap
+ dirset = dir_.set()
+ missing = dirset - sitemap.set()
+ removed = sitemap.set() - dirset
+ for page in removed:
+ print page+' page not availible in this config'
+ for page in missing:
+ print 'adding missing page '+page
+ sitemap.add_link(page)
+ if len(missing) != 0:
+ print 'writing new sitemap - please adjust if needed'
+ sitemap.write_map()
+
+ dirsitemap = Sitemap(args)
+ for l in sitemap.linklist():
+ if l in dirset:
+ dirsitemap.add_link(l)
+
+
+ # Generate a pygraphviz image of the site (TODO: currently not used)
+ dirsitemap.graph()
+ # Start processing the docbook articles to static html
+ dirsitemap.process()
+
+ # Publish static html and style data (css, images, fonts) to destination dir
+ t1 = time()
+ dirsitemap.publish()
+ t2 = time()
+ print "Publish [%5.2f s]" % (round(t2-t1,2))
+ print "Total [%5.2f s]" % (round(t2-ts,2))
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())