# Copyright (C) 2016 Parsons Government Services ("PARSONS")
# Portions copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2012 Internet Systems Consortium ("ISC")
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND PARSONS, DRL, AND ISC DISCLAIM
# ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# PARSONS, DRL, OR ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Trac Wiki -> Markdown converter, hacked from old Trac Wiki -> PDF/flat
text converter.
Pull HTML pages from a Trac Wiki, feed the useful bits to
html2text to generate Markdown.
Assumes you're using the TracNav plugin for the Wiki pages, and uses
the same list as the TracNav plugin does to determine the set of pages
to convert.
"""
# Dependencies, at least on Ubuntu Xenial:
#
# apt-get install python-lxml python-html2text
#
# Be warned that there are many unrelated packages named "html2text",
# installed under various names on various platforms. This one
# happens to be a useful HTML-to-Markdown converter.
# Most of the work of massaging the HTML is done using XSL transforms,
# because the template-driven style makes that easy. There's probably
# some clever way to use lxml's XPath code to do the same thing in a
# more pythonic way with ElementTrees, but I already had the XSL
# transforms and there's a point of diminishing returns on this sort of
# thing.
import sys
import os
import argparse
import lxml.etree
import urllib
import urlparse
import subprocess
import zipfile
# Main program, up front so it doesn't get lost under all the XSL
def main():
base = "https://trac.rpki.net"
parser = argparse.ArgumentParser(description = __doc__, formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-b", "--base_url",
default = base,
help = "base URL for documentation web site")
parser.add_argument("-t", "--toc",
default = base + "/wiki/doc/RPKI/TOC",
help = "table of contents URL")
parser.add_argument("-d", "--directory",
default = ".",
help = "output directory")
parser.add_argument("-p", "--prefix",
default = "/wiki/doc",
help = "page name prefix on wiki")
args = parser.parse_args()
urls = str(xsl_get_toc(lxml.etree.parse(urllib.urlopen(args.toc)).getroot(),
basename = repr(args.base_url))).splitlines()
assert all(urlparse.urlparse(url).path.startswith(args.prefix) for url in urls)
for pagenum, url in enumerate(urls):
path = urlparse.urlparse(url).path
page = xsl_get_page(lxml.etree.parse(urllib.urlopen(url)).getroot(),
basename = repr(args.base_url),
path = repr(path))
fn_base = os.path.join(args.directory, "{:02d}{}".format(pagenum, path[len(args.prefix):].replace("/", ".")))
fn = fn_base + ".zip"
zip_url = urlparse.urljoin(url, "/zip-attachment{}/".format(path))
urllib.urlretrieve(zip_url, fn)
with zipfile.ZipFile(fn, "r") as z:
if len(z.namelist()) == 0:
os.unlink(fn)
else:
sys.stderr.write("Wrote {}\n".format(fn))
for imgnum, img in enumerate(page.xpath("//img | //object | //embed")):
img_url = img.get("data" if img.tag == "object" else "src")
img_url = urlparse.urljoin(url, img_url)
fn = "{}.{:02d}{}".format(fn_base, imgnum, os.path.splitext(img_url)[1])
urllib.urlretrieve(img_url, fn)
sys.stderr.write("Wrote {}\n".format(fn))
html2markdown = subprocess.Popen(("html2markdown", "--no-skip-internal-links", "--reference-links"),
stdin = subprocess.PIPE, stdout = subprocess.PIPE)
page.write(html2markdown.stdin)
html2markdown.stdin.close()
lines = html2markdown.stdout.readlines()
html2markdown.stdout.close()
html2markdown.wait()
while lines and lines[0].isspace():
del lines[0]
fn = fn_base + ".md"
with open(fn, "w") as f:
want_blank = False
for line in lines:
blank = line.isspace()
if want_blank and not blank:
f.write("\n")
if not blank:
f.write(line)
want_blank = blank
sys.stderr.write("Wrote {}\n".format(fn))
fn = fn[:-3] + ".wiki"
urllib.urlretrieve(url + "?format=txt", fn)
sys.stderr.write("Wrote {}\n".format(fn))
# XSL transform to extract list of Wiki page URLs from the TOC Wiki page
xsl_get_toc = lxml.etree.XSLT(lxml.etree.XML('''\