#!/bin/sh set -e unset SKIP_FOOTER unset SKIP_HEADER unset SKIP_LIST unset VERBOSE unset PRINT_HELP unset SKIP_SITEMAP usage() { printf "Usage: rivet [-o destdir] [-hovelf] srcdir domain\n" printf "\t-h: prints this message\n" printf "\t-o [destdir]: specifies the output direcotory to be [destdir]\n" printf "\t-v: makes the script verbose\n" printf "\t-e: skips prepending _header.html to .html files\n" printf "\t-f: skips appending _footer.html to .html files\n" printf "\t-l: skips the generation of .html file list\n" printf "\t-s: skips sitemap.xml creation\n" printf "\t-u: makes all references to the url 'http' instead of 'https'\n" exit 2 } convert() { tmpfile="tmpconvfile.tmp" infile="$1" outfile="${infile%???}.html" cp "$1" "$tmpfile" # TODO: convert links to .md to .html lowdown -s -Thtml -o "$outfile" "$tmpfile" rm -f "$tmpfile" "$1" } # Check dependencies if ! command -v lowdown > /dev/null; then echo "lowdown is not installed" exit fi destdir='dst' prefix='https' # Get arguments using getopts # rivet [src] [-vhelf] [-o dir] # -o [dir]: set the destination directory, defaults to "dst" # -v: verbose, sets x so it prints every line # -h: prints usage # -e: skip prepend header # -l: skip article list # -f: skip append footer # -s: skip sitemap creation while getopts 'o:vhelfsu' c do case "$c" in o) destdir=${OPTARG%%\/} ;; v) VERBOSE=true ;; h) PRINT_HELP=true ;; e) SKIP_HEADER=true ;; l) SKIP_LIST=true ;; f) SKIP_FOOTER=true ;; s) SKIP_SITEMAP=true ;; u) prefix='http' ;; *) ;; esac done shift $((OPTIND - 1)) if ! [ "$1" ] || ! [ "$2" ]; then echo "Not enough arguments" usage fi src="$1" srcdir=${src%%\/} unset src # Remove any junk from the domain eg. [https://]domain.com[/] url="$(echo "$2" | sed -e 's/^https*:\/\///' | sed -e 's/\/$//' | sed -e 's/[]\/$*.^[]/\\&/g')" headerfile=$srcdir/_header.html footerfile=$srcdir/_footer.html if [ "$PRINT_HELP" ]; then usage fi if [ "$VERBOSE" ]; then set -x fi if ! [ -d "$srcdir" ]; then echo "Error: missing source direcotry" usage fi mkdir -p "$destdir" cp -r "$srcdir"/* "$destdir" rm -f "$destdir"/_header.html "$destdir"/_footer.html # Convert markdown files find "$destdir" -name "*.md" | while IFS="" read -r file; do convert "$file" done # Prepare the header if ! [ "$SKIP_HEADER" ]; then find "$destdir" -name "*.html" | while IFS="" read -r file; do sed -i "//r $headerfile" "$file" done fi # Prepate the footer if ! [ "$SKIP_FOOTER" ]; then tmpfoot="tmpfootfile.tmp" cp "$footerfile" "$tmpfoot" sed -i '1s/^/' >> "$tmpfoot" find "$destdir" -name "*.html" | while IFS="" read -r file; do sed -i "/<\/body>/r $tmpfoot" "$file" done rm -f "$tmpfoot" fi # Prepare the sitemap & file list if ! [ "$SKIP_SITEMAP" ] || ! [ "$SKIP_LIST" ]; then linklist="linklist.tmp" # echo "" > "$linklist" rm -f "$linklist" "$destdir"/sitemap.xml find "$destdir" -name "*.html" | while IFS="" read -r file; do echo "${file#$destdir/}" >> "$linklist" done if ! [ "$SKIP_LIST" ]; then tmpfile="linkindex.tmp" rm -f "$tmpfile" cat << EOF >> "$tmpfile"

Pages

EOF while IFS="" read -r line; do if echo "$line" | grep -q 'index\.html'; then continue fi title="$(grep -e '^.*<\/title>' "$destdir"/"$line" | sed -e 's/<title>//' -e 's/<\/title>//')" if ! [ "$title" ]; then title=${line%?????} fi printf "<p><a href=\"%s\">%s</a></p>\n" "$line" "$title" >> "$tmpfile" done < "$linklist" echo '</div>' >> "$tmpfile" sed -i '/<\/body>/i REPLACE' "$destdir"/index.html sed -i "/^REPLACE/r $tmpfile" "$destdir"/index.html sed -i 's/^REPLACE//' "$destdir"/index.html rm -f "$tmpfile" fi if ! [ "$SKIP_SITEMAP" ]; then sed -i -e "s/^/$prefix:\/\/$url\//" "$linklist" cat << EOF >> "$destdir"/sitemap.xml <?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> </urlset> EOF while IFS="" read -r line; do sed -i "/<\/urlset>/i \ <url><loc>$line<\/loc><\/url>" "$destdir"/sitemap.xml done < "$linklist" sed -i 's/^<url>/\t<url>/' "$destdir"/sitemap.xml fi rm -f "$linklist" fi exit