diff --git a/src/Tools/offlinedoc/buildpdf.py b/src/Tools/offlinedoc/buildpdf.py index 24d65a2158..8469886268 100755 --- a/src/Tools/offlinedoc/buildpdf.py +++ b/src/Tools/offlinedoc/buildpdf.py @@ -70,9 +70,8 @@ Embedding_FreeCAD Embedding_FreeCADGui Code_snippets""" -import sys, os, re, tempfile, getopt, shutil, time +import sys, os, shutil, time from urllib.request import urlopen -from urllib.error import HTTPError # CONFIGURATION ################################################# @@ -99,7 +98,7 @@ def crawl(): if PDFCONVERTOR == 'pisa': try: import ho.pisa as pisa - except: + except Exception: "Error: Python-pisa not installed, exiting." return 1 elif PDFCONVERTOR == 'htmldoc': @@ -108,7 +107,7 @@ def crawl(): return 1 try: from PyPDF2 import PdfFileReader,PdfFileWriter - except: + except Exception: print("Error: Python-pypdf2 not installed, exiting.") # run ######################################################## @@ -229,7 +228,7 @@ def joinpdf(): if page == "end": parent = False continue - if VERBOSE: print('Appending',page, "at position",count) + if VERBOSE: print('Appending ',page, " at position ",count) title = page.replace("_"," ") pdffile = page + ".pdf" if exists(pdffile,True): @@ -245,7 +244,7 @@ def joinpdf(): result.addBookmark(title,count,parent) count += numpages else: - print("page",pdffile,"not found, aborting.") + print("page ",pdffile," not found, aborting.") sys.exit() if VERBOSE: print("Writing...") diff --git a/src/Tools/offlinedoc/buildqhelp.py b/src/Tools/offlinedoc/buildqhelp.py index de90461b5d..5608154d7b 100755 --- a/src/Tools/offlinedoc/buildqhelp.py +++ b/src/Tools/offlinedoc/buildqhelp.py @@ -30,8 +30,7 @@ __url__ = "http://www.freecadweb.org" This script builds qhrlp files from a local copy of the wiki """ -import sys, os, re, tempfile, getopt, shutil -from urllib.request import urlopen, HTTPError +import os, re, shutil # CONFIGURATION ################################################# diff --git a/src/Tools/offlinedoc/buildwikiindex.py b/src/Tools/offlinedoc/buildwikiindex.py index 5af78f0fa4..34f7730110 100755 --- a/src/Tools/offlinedoc/buildwikiindex.py +++ b/src/Tools/offlinedoc/buildwikiindex.py @@ -33,7 +33,7 @@ This script parses the contents of a wiki site and saves a file containing names of pages and images to be downloaded. """ -import sys, os, re, tempfile, getopt +import sys, os, re from urllib2 import urlopen, HTTPError # CONFIGURATION ################################################# @@ -155,7 +155,7 @@ def getlinks(html): if not rg: rg = re.findall('href="\/wiki\/(.*?)"',l) if not rg: - rg = re.findall('href=".*?wiki.freecadweb.org\/(.*?)"',l) + rg = re.findall('href=".*?wiki\\.freecadweb\\.org\/(.*?)"',l) if not rg: rg = re.findall('href="\/(.*?)"',l) if "images" in rg: diff --git a/src/Tools/offlinedoc/downloadwiki.py b/src/Tools/offlinedoc/downloadwiki.py index 727445ff42..f6e20bec55 100755 --- a/src/Tools/offlinedoc/downloadwiki.py +++ b/src/Tools/offlinedoc/downloadwiki.py @@ -32,7 +32,7 @@ __url__ = "http://www.freecadweb.org" This script retrieves the contents of a wiki site from a pages list """ -import sys, os, re, tempfile, getopt +import os, re from urllib2 import urlopen, HTTPError # CONFIGURATION ################################################# @@ -159,9 +159,9 @@ def crawl(): for l in lfile: locallist.append(l.replace("\n","")) lfile.close() todolist = locallist[:] - print ("getting",len(todolist),"files...") + print ("getting ",len(todolist)," files...") count = 1 - indexpages = get(INDEX) + get(INDEX) while todolist: targetpage = todolist.pop() if VERBOSE: print (count,(3-len(str(count)))*" ", ": Fetching ", targetpage) @@ -172,7 +172,7 @@ def crawl(): return 0 def get(page): - "downloads a single page, returns the other pages it links to" + "downloads a single page" localpage = page if "Command_Reference" in localpage: localpage = localpage.replace("Category:","") @@ -188,7 +188,7 @@ def get(page): html = cleanimagelinks(html) output(html,page) else: - if VERBOSE: print (" skipping",page) + if VERBOSE: print (" skipping ",page) def getlinks(html): "returns a list of wikipage links in html file" @@ -302,15 +302,15 @@ def fetchimage(imagelink): file = open(path,'wb') file.write(data) file.close() - except: + except Exception: failcount += 1 else: processed.append(filename) - if VERBOSE: print (" saving",local(filename,image=True)) + if VERBOSE: print (" saving ",local(filename,image=True)) return print ('Error: unable to fetch file ' + filename) else: - if VERBOSE: print (" skipping",filename) + if VERBOSE: print (" skipping ",filename) def local(page,image=False): "returns a local path for a given page/image" @@ -345,7 +345,7 @@ def output(html,page): filename = filename.replace("&pagefrom=","+") filename = filename.replace("#mw-pages","") filename = filename.replace(".html.html",".html") - print (" saving",filename) + print (" saving ",filename) file = open(filename,'wb') file.write(html) file.close() diff --git a/src/Tools/offlinedoc/update.py b/src/Tools/offlinedoc/update.py index fd3b8ac729..0ff3eae205 100755 --- a/src/Tools/offlinedoc/update.py +++ b/src/Tools/offlinedoc/update.py @@ -40,7 +40,7 @@ This script needs to be run after the wiki has been fully downloaded. It has thr files downloaded. Revision.txt and wikifiles.txt get also updated. """ -import sys, os, re, tempfile, getopt +import sys, os, re from urllib.request import urlopen from urllib.error import HTTPError