rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
print "WARNING: Link to unknown language %s" % (match.group(1))
|
print "WARNING: Link to unknown language %s" % (interwikiMatch.group(1))
|
def removeLanguageLinks(text, site = None): """Given the wiki-text of a page, return that page with all interwiki links removed. If a link to an unknown language is encountered, a warning is printed.""" if site == None: site = getSite() # This regular expression will find every link that is possibly an # interwiki link, plus trailing whitespace. The language code is grouped. # NOTE: This assumes that language codes only consist of non-capital # ASCII letters and hyphens. interwikiR = re.compile(r'\[\[([a-z\-]+):[^\]]*\]\][\s]*') nowikiOrHtmlCommentR = re.compile(r'<nowiki>.*?</nowiki>|<!--.*?-->', re.IGNORECASE | re.DOTALL) # How much of the text we have looked at so far index = 0 while True: interwikiMatch = interwikiR.search(text, index) if interwikiMatch: nextTagMatch = interwikiMatch else: break nowikiOrHtmlCommentMatch = nowikiOrHtmlCommentR.search(text, index) if nowikiOrHtmlCommentMatch and nowikiOrHtmlCommentMatch.start() < nextTagMatch.start(): # an HTML comment or text in nowiki tags stands before the next interwiki link. Skip. index = nowikiOrHtmlCommentMatch.end() else: # Extract what would be the language code code = interwikiMatch.group(1) if code in site.family.langs: # We found a valid interwiki link. Remove it. text = text[:interwikiMatch.start()] + text[interwikiMatch.end():] # continue the search on the remaining text index = interwikiMatch.start() else: index = interwikiMatch.end() if len(code) == 2 or len(code) == 3: print "WARNING: Link to unknown language %s" % (match.group(1)) return normalWhitespace(text)
|
print "OK. I'm not uploading"
|
def convert(text): ''' Converts all HTML tables in text to wiki syntax. If text contains wiki tables, tries to beautify them. Returns converted text if page was successfully changed, otherwise returns None. ''' warnings = 0 # this array will contain strings that will be shown in case of possible # errors, before the user is asked if he wants to accept the changes. warning_messages = [] newText = text ################## # bring every <tag> into one single line. num = 1 while num != 0: newText, num = re.subn("([^\r\n]{1})(<[tT]{1}[dDhHrR]{1})", r"\1\r\n\2", newText) ################## # every open-tag gets a new line. ################## # <table> tag with attributes, with more text on the same line newText = re.sub("[\r\n]*?<(?i)(table) ([\w\W]*?)>([\w\W]*?)[\r\n ]*", r"\r\n{| \2\r\n\3", newText) # <table> tag without attributes, with more text on the same line newText = re.sub("[\r\n]*?<(TABLE|table)>([\w\W]*?)[\r\n ]*", r"\r\n{|\n\2\r\n", newText) # <table> tag with attributes, without more text on the same line newText = re.sub("[\r\n]*?<(TABLE|table) ([\w\W]*?)>[\r\n ]*", r"\r\n{| \2\r\n", newText) # <table> tag without attributes, without more text on the same line newText = re.sub("[\r\n]*?<(TABLE|table)>[\r\n ]*", "\r\n{|\r\n", newText) # end </table> newText = re.sub("[\s]*<\/(TABLE|table)>", "\r\n|}", newText) ################## # captions newText = re.sub("<caption ([\w\W]*?)>([\w\W]*?)<\/caption>", r"\r\n|+\1 | \2", newText) newText = re.sub("<caption>([\w\W]*?)<\/caption>", r"\r\n|+ \1", newText) ################## # <th> often people don't write them within <tr>, be warned! newText = re.sub("[\r\n]+<(TH|th)([^>]*?)>([\w\W]*?)<\/(th|TH)>", r"\r\n!\2 | \3\r\n", newText) # fail save. sometimes people forget </th> # <th> without attributes newText, n = re.subn("[\r\n]+<(th|TH)>([\w\W]*?)[\r\n]+", r"\r\n! \2\r\n", newText) if n>0: warning_messages.append('WARNING: found <th> without </th>. (%d occurences)' % n) warnings += n # <th> with attributes newText, n = re.subn("[\r\n]+<(th|TH)([^>]*?)>([\w\W]*?)[\r\n]+", r"\n!\2 | \3\r\n", newText) if n>0: warning_messages.append('WARNING: found <th> without </th>. (%d occurences)' % n) warnings += n ################## # very simple <tr> newText = re.sub("[\r\n]*<(tr|TR)([^>]*?)>[\r\n]*", r"\r\n|-----\2\r\n", newText) newText = re.sub("[\r\n]*<(tr|TR)>[\r\n]*", r"\r\n|-----\r\n", newText) ################## # normal <td> without arguments newText = re.sub("[\r\n]+<(td|TD)>([\w\W]*?)<\/(TD|td)>", r"\r\n| \2\r\n", newText) ################## # normal <td> with arguments newText = re.sub("[\r\n]+<(td|TD)([^>]*?)>([\w\W]*?)<\/(TD|td)>", r"\r\n|\2 | \3", newText) # WARNING: this sub might eat cells of bad HTML, but most likely it # will correct errors # TODO: some more docu please newText, n = re.subn("[\r\n]+<(td|TD)>([^\r\n]*?)<(td|TD)>", r"\r\n| \2\r\n", newText) if n>0: warning_messages.append('WARNING: (sorry, bot code unreadable (1). I don\'t know why this warning is given.) (%d occurences)' % n) warnings += n # fail save, sometimes it's a <td><td></tr> # newText, n = re.subn("[\r\n]+<(td|TD)>([^<]*?)<(td|TD)><\/(tr|TR)>", # "\r\n| \\2\r\n", newText) # newText, n = re.subn("[\r\n]+<(td|TD)([^>]*?)>([^<]*?)<(td|TD)><\/(tr|TR)>", # "\r\n|\\2| \\3\r\n", newText) # newText, n = re.subn("[\r\n]+<(td|TD)([^>]+?)>([^\r\n]*?)<\/(td|TD)>", r"\r\n|\2 | \3\r\n", newText) if n>0: warning_messages.append('WARNING: found <td><td></tr>, but no </td>. (%d occurences)' % n) warnings += n # fail save. sometimes people forget </td> # <td> without arguments, with missing </td> newText, n = re.subn("<(td|TD)>([^<]*?)[\r\n]+", r"\r\n| \2\r\n", newText) if n>0: warning_messages.append('WARNING: found <td> without </td>. (%d occurences)' % n) warnings += n # <td> with arguments, with missing </td> newText, n = re.subn("[\r\n]*<(td|TD)([^>]*?)>([\w\W]*?)[\r\n]+", r"\r\n|\2 | \3\r\n", newText) if n > 0: warning_messages.append('NOTE: Found <td> without </td>. This shouldn\'t cause problems.') # TODO: some docu please newText, n = re.subn("<(td|TD)>([\w\W]*?)[\r\n]+", r"\r\n| \2\r\n", newText) if n>0: warning_messages.append('WARNING: (sorry, bot code unreadable (2). I don\'t know why this warning is given.) (%d occurences)' % n) warnings += n ################## # Garbage collecting ;-) newText = re.sub("<td>[\r\n]*<\/tr>", "", newText) newText = re.sub("[\r\n]*<\/[Tt][rRdDhH]>", "", newText) ################## # OK, that's only theory but works most times. # Most browsers assume that <th> gets a new row and we do the same # newText, n = re.subn("([\r\n]+\|\ [^\r\n]*?)([\r\n]+\!)", # "\\1\r\n|-----\\2", newText) # warnings = warnings + n # adds a |---- below for the case the new <tr> is missing # newText, n = re.subn("([\r\n]+\!\ [^\r\n]*?[\r\n]+)(\|\ )", # "\\1|-----\r\n\\2", newText) # warnings = warnings + n ################## # most <th> come with '''title'''. Senseless in my eyes cuz # <th> should be bold anyways. newText = re.sub("[\r\n]+\!([^'\n\r]*)'''([^'\r\n]*)'''", r"\r\n!\1\2", newText) ################## # kills indention within tables. Be warned, it might seldom bring # bad results. # True by default. Set 'deIndentTables = False' in user-config.py if config.deIndentTables: num = 1 while num != 0: newText, num = re.subn("(\{\|[\w\W]*?)\n[ \t]+([\w\W]*?\|\})", r"\1\r\n\2", newText) ################## # kills additional spaces after | or ! or {| # This line was creating problems, so I commented it out --Daniel # newText = re.sub("[\r\n]+\|[\t ]+?[\r\n]+", "\r\n| ", newText) # kills trailing spaces and tabs newText = re.sub("\r\n(.*)[\t\ ]+[\r\n]+", r"\r\n\1\r\n", newText) # kill extra new-lines newText = re.sub("[\r\n]{4,}(\!|\|)", r"\r\n\1", newText); ################## # shortening if <table> had no arguments/parameters newText = re.sub("[\r\n]+\{\|[\ ]+\| ", "\r\n\{| ", newText) # shortening if <td> had no articles newText = re.sub("[\r\n]+\|[\ ]+\| ", "\r\n| ", newText) # shortening if <th> had no articles newText = re.sub("\n\|\+[\ ]+\|", "\n|+ ", newText) # shortening of <caption> had no articles newText = re.sub("[\r\n]+\![\ ]+\| ", "\r\n! ", newText) ################## # proper attributes. attribute values need to be in quotation marks. num = 1 while num != 0: # group 1 starts with newlines, followed by a table tag # (either !, |, {|, or |---), then zero or more attribute key-value # pairs where the value already has correct quotation marks, and # finally the key of the attribute we want to fix here. # group 3 is the value of the attribute we want to fix here. # We recognize it by searching for a string of non-whitespace characters # - [^\s]+? - which is not embraced by quotation marks - [^"] # group 4 is a whitespace character and probably unnecessary.. newText, num = re.subn(r'([\r\n]+(\!|\||\{\|)[^\r\n\|]+)[ ]*=[ ]*([^"][^\s]+?[^"])(\s)', r'\1="\3"\4', newText, 1) ################## # merge two short <td>s num = 1 while num != 0: newText, num = re.subn("[\r\n]+(\|[^\|\-\}]{1}[^\n\r]{0,35})" + "[\r\n]+(\|[^\|\-\}]{1}[^\r\n]{0,35})[\r\n]+", r"\r\n\1 |\2\r\n", newText) #### # add a new line if first is * or # newText = re.sub("[\r\n]+\| ([*#]{1})", r"\r\n|\r\n\1", newText) ################## # strip <center> from <th> newText = re.sub("([\r\n]+\![^\r\n]+?)<center>([\w\W]+?)<\/center>", r"\1 \2", newText) # strip align="center" from <th> because the .css does it # if there are no other attributes than align, we don't need that | either newText = re.sub("([\r\n]+\! +)align\=\"center\" +\|", r"\1", newText) # if there are other attributes, simply strip the align="center" newText = re.sub("([\r\n]+\![^\r\n\|]+?)align\=\"center\"([^\n\r\|]+?\|)", r"\1 \2", newText) ################## # kill additional spaces within arguments num = 1 while num != 0: newText, num = re.subn("[\r\n]+(\||\!)([^|\r\n]*?)[ \t]{2,}([^\r\n]+?)", r"\r\n\1\2 \3", newText) ################## # I hate those long lines because they make a wall of letters # Off by default, set 'splitLongParagraphs = True' in user-config.py if config.splitLongParagraphs: num = 1 while num != 0: # TODO: how does this work? docu please. # why are only äöüß used, but not other special characters? newText, num = re.subn("(\r\n[A-Z]{1}[^\n\r]{200,}?[a-zäöüß]\.)\ ([A-ZÄÖÜ]{1}[^\n\r]{200,})", r"\1\r\n\2", newText) ################## if newText!=text: import difflib if debug: print text print newText elif not quietMode: for line in difflib.ndiff(text.split('\n'), newText.split('\n')): if line[0] == '-': wikipedia.output(line) for line in difflib.ndiff(text.split('\n'), newText.split('\n')): if line[0] == '+': wikipedia.output(line) if config.table2wikiAskOnlyWarnings and warnings == 0: doUpload="y" else: for warning_message in warning_messages: print warning_message if config.table2wikiSkipWarnings: doUpload="n" else: print "There were " + str(warnings) + " replacement(s) that might lead to bad output" doUpload = wikipedia.input(u'Is it correct? [y|N]') if doUpload == 'y': warn = "" if warnings == 0: # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_no_warnings)) elif warnings == 1: wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_one_warning) % warnings) else: wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_multiple_warnings) % warnings) return newText else: print "OK. I'm not uploading" return None else: print "No changes were necessary in " + article return None
|
|
print "No changes were necessary in " + article
|
print "No changes were necessary"
|
def convert(text): ''' Converts all HTML tables in text to wiki syntax. If text contains wiki tables, tries to beautify them. Returns converted text if page was successfully changed, otherwise returns None. ''' warnings = 0 # this array will contain strings that will be shown in case of possible # errors, before the user is asked if he wants to accept the changes. warning_messages = [] newText = text ################## # bring every <tag> into one single line. num = 1 while num != 0: newText, num = re.subn("([^\r\n]{1})(<[tT]{1}[dDhHrR]{1})", r"\1\r\n\2", newText) ################## # every open-tag gets a new line. ################## # <table> tag with attributes, with more text on the same line newText = re.sub("[\r\n]*?<(?i)(table) ([\w\W]*?)>([\w\W]*?)[\r\n ]*", r"\r\n{| \2\r\n\3", newText) # <table> tag without attributes, with more text on the same line newText = re.sub("[\r\n]*?<(TABLE|table)>([\w\W]*?)[\r\n ]*", r"\r\n{|\n\2\r\n", newText) # <table> tag with attributes, without more text on the same line newText = re.sub("[\r\n]*?<(TABLE|table) ([\w\W]*?)>[\r\n ]*", r"\r\n{| \2\r\n", newText) # <table> tag without attributes, without more text on the same line newText = re.sub("[\r\n]*?<(TABLE|table)>[\r\n ]*", "\r\n{|\r\n", newText) # end </table> newText = re.sub("[\s]*<\/(TABLE|table)>", "\r\n|}", newText) ################## # captions newText = re.sub("<caption ([\w\W]*?)>([\w\W]*?)<\/caption>", r"\r\n|+\1 | \2", newText) newText = re.sub("<caption>([\w\W]*?)<\/caption>", r"\r\n|+ \1", newText) ################## # <th> often people don't write them within <tr>, be warned! newText = re.sub("[\r\n]+<(TH|th)([^>]*?)>([\w\W]*?)<\/(th|TH)>", r"\r\n!\2 | \3\r\n", newText) # fail save. sometimes people forget </th> # <th> without attributes newText, n = re.subn("[\r\n]+<(th|TH)>([\w\W]*?)[\r\n]+", r"\r\n! \2\r\n", newText) if n>0: warning_messages.append('WARNING: found <th> without </th>. (%d occurences)' % n) warnings += n # <th> with attributes newText, n = re.subn("[\r\n]+<(th|TH)([^>]*?)>([\w\W]*?)[\r\n]+", r"\n!\2 | \3\r\n", newText) if n>0: warning_messages.append('WARNING: found <th> without </th>. (%d occurences)' % n) warnings += n ################## # very simple <tr> newText = re.sub("[\r\n]*<(tr|TR)([^>]*?)>[\r\n]*", r"\r\n|-----\2\r\n", newText) newText = re.sub("[\r\n]*<(tr|TR)>[\r\n]*", r"\r\n|-----\r\n", newText) ################## # normal <td> without arguments newText = re.sub("[\r\n]+<(td|TD)>([\w\W]*?)<\/(TD|td)>", r"\r\n| \2\r\n", newText) ################## # normal <td> with arguments newText = re.sub("[\r\n]+<(td|TD)([^>]*?)>([\w\W]*?)<\/(TD|td)>", r"\r\n|\2 | \3", newText) # WARNING: this sub might eat cells of bad HTML, but most likely it # will correct errors # TODO: some more docu please newText, n = re.subn("[\r\n]+<(td|TD)>([^\r\n]*?)<(td|TD)>", r"\r\n| \2\r\n", newText) if n>0: warning_messages.append('WARNING: (sorry, bot code unreadable (1). I don\'t know why this warning is given.) (%d occurences)' % n) warnings += n # fail save, sometimes it's a <td><td></tr> # newText, n = re.subn("[\r\n]+<(td|TD)>([^<]*?)<(td|TD)><\/(tr|TR)>", # "\r\n| \\2\r\n", newText) # newText, n = re.subn("[\r\n]+<(td|TD)([^>]*?)>([^<]*?)<(td|TD)><\/(tr|TR)>", # "\r\n|\\2| \\3\r\n", newText) # newText, n = re.subn("[\r\n]+<(td|TD)([^>]+?)>([^\r\n]*?)<\/(td|TD)>", r"\r\n|\2 | \3\r\n", newText) if n>0: warning_messages.append('WARNING: found <td><td></tr>, but no </td>. (%d occurences)' % n) warnings += n # fail save. sometimes people forget </td> # <td> without arguments, with missing </td> newText, n = re.subn("<(td|TD)>([^<]*?)[\r\n]+", r"\r\n| \2\r\n", newText) if n>0: warning_messages.append('WARNING: found <td> without </td>. (%d occurences)' % n) warnings += n # <td> with arguments, with missing </td> newText, n = re.subn("[\r\n]*<(td|TD)([^>]*?)>([\w\W]*?)[\r\n]+", r"\r\n|\2 | \3\r\n", newText) if n > 0: warning_messages.append('NOTE: Found <td> without </td>. This shouldn\'t cause problems.') # TODO: some docu please newText, n = re.subn("<(td|TD)>([\w\W]*?)[\r\n]+", r"\r\n| \2\r\n", newText) if n>0: warning_messages.append('WARNING: (sorry, bot code unreadable (2). I don\'t know why this warning is given.) (%d occurences)' % n) warnings += n ################## # Garbage collecting ;-) newText = re.sub("<td>[\r\n]*<\/tr>", "", newText) newText = re.sub("[\r\n]*<\/[Tt][rRdDhH]>", "", newText) ################## # OK, that's only theory but works most times. # Most browsers assume that <th> gets a new row and we do the same # newText, n = re.subn("([\r\n]+\|\ [^\r\n]*?)([\r\n]+\!)", # "\\1\r\n|-----\\2", newText) # warnings = warnings + n # adds a |---- below for the case the new <tr> is missing # newText, n = re.subn("([\r\n]+\!\ [^\r\n]*?[\r\n]+)(\|\ )", # "\\1|-----\r\n\\2", newText) # warnings = warnings + n ################## # most <th> come with '''title'''. Senseless in my eyes cuz # <th> should be bold anyways. newText = re.sub("[\r\n]+\!([^'\n\r]*)'''([^'\r\n]*)'''", r"\r\n!\1\2", newText) ################## # kills indention within tables. Be warned, it might seldom bring # bad results. # True by default. Set 'deIndentTables = False' in user-config.py if config.deIndentTables: num = 1 while num != 0: newText, num = re.subn("(\{\|[\w\W]*?)\n[ \t]+([\w\W]*?\|\})", r"\1\r\n\2", newText) ################## # kills additional spaces after | or ! or {| # This line was creating problems, so I commented it out --Daniel # newText = re.sub("[\r\n]+\|[\t ]+?[\r\n]+", "\r\n| ", newText) # kills trailing spaces and tabs newText = re.sub("\r\n(.*)[\t\ ]+[\r\n]+", r"\r\n\1\r\n", newText) # kill extra new-lines newText = re.sub("[\r\n]{4,}(\!|\|)", r"\r\n\1", newText); ################## # shortening if <table> had no arguments/parameters newText = re.sub("[\r\n]+\{\|[\ ]+\| ", "\r\n\{| ", newText) # shortening if <td> had no articles newText = re.sub("[\r\n]+\|[\ ]+\| ", "\r\n| ", newText) # shortening if <th> had no articles newText = re.sub("\n\|\+[\ ]+\|", "\n|+ ", newText) # shortening of <caption> had no articles newText = re.sub("[\r\n]+\![\ ]+\| ", "\r\n! ", newText) ################## # proper attributes. attribute values need to be in quotation marks. num = 1 while num != 0: # group 1 starts with newlines, followed by a table tag # (either !, |, {|, or |---), then zero or more attribute key-value # pairs where the value already has correct quotation marks, and # finally the key of the attribute we want to fix here. # group 3 is the value of the attribute we want to fix here. # We recognize it by searching for a string of non-whitespace characters # - [^\s]+? - which is not embraced by quotation marks - [^"] # group 4 is a whitespace character and probably unnecessary.. newText, num = re.subn(r'([\r\n]+(\!|\||\{\|)[^\r\n\|]+)[ ]*=[ ]*([^"][^\s]+?[^"])(\s)', r'\1="\3"\4', newText, 1) ################## # merge two short <td>s num = 1 while num != 0: newText, num = re.subn("[\r\n]+(\|[^\|\-\}]{1}[^\n\r]{0,35})" + "[\r\n]+(\|[^\|\-\}]{1}[^\r\n]{0,35})[\r\n]+", r"\r\n\1 |\2\r\n", newText) #### # add a new line if first is * or # newText = re.sub("[\r\n]+\| ([*#]{1})", r"\r\n|\r\n\1", newText) ################## # strip <center> from <th> newText = re.sub("([\r\n]+\![^\r\n]+?)<center>([\w\W]+?)<\/center>", r"\1 \2", newText) # strip align="center" from <th> because the .css does it # if there are no other attributes than align, we don't need that | either newText = re.sub("([\r\n]+\! +)align\=\"center\" +\|", r"\1", newText) # if there are other attributes, simply strip the align="center" newText = re.sub("([\r\n]+\![^\r\n\|]+?)align\=\"center\"([^\n\r\|]+?\|)", r"\1 \2", newText) ################## # kill additional spaces within arguments num = 1 while num != 0: newText, num = re.subn("[\r\n]+(\||\!)([^|\r\n]*?)[ \t]{2,}([^\r\n]+?)", r"\r\n\1\2 \3", newText) ################## # I hate those long lines because they make a wall of letters # Off by default, set 'splitLongParagraphs = True' in user-config.py if config.splitLongParagraphs: num = 1 while num != 0: # TODO: how does this work? docu please. # why are only äöüß used, but not other special characters? newText, num = re.subn("(\r\n[A-Z]{1}[^\n\r]{200,}?[a-zäöüß]\.)\ ([A-ZÄÖÜ]{1}[^\n\r]{200,})", r"\1\r\n\2", newText) ################## if newText!=text: import difflib if debug: print text print newText elif not quietMode: for line in difflib.ndiff(text.split('\n'), newText.split('\n')): if line[0] == '-': wikipedia.output(line) for line in difflib.ndiff(text.split('\n'), newText.split('\n')): if line[0] == '+': wikipedia.output(line) if config.table2wikiAskOnlyWarnings and warnings == 0: doUpload="y" else: for warning_message in warning_messages: print warning_message if config.table2wikiSkipWarnings: doUpload="n" else: print "There were " + str(warnings) + " replacement(s) that might lead to bad output" doUpload = wikipedia.input(u'Is it correct? [y|N]') if doUpload == 'y': warn = "" if warnings == 0: # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_no_warnings)) elif warnings == 1: wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_one_warning) % warnings) else: wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_multiple_warnings) % warnings) return newText else: print "OK. I'm not uploading" return None else: print "No changes were necessary in " + article return None
|
self.langs[lang] = lang+'.wikipedia.org'
|
if lang not in self.langs: self.langs[lang] = lang+'.wikipedia.org'
|
def __init__(self): family.Family.__init__(self) self.name = 'wikipedia'
|
pass try: thistxt = removeCategoryLinks(self.get(), self.code()) except IsRedirectPage: pass
|
return thistxt = removeCategoryLinks(thistxt, self.code())
|
def links(self): # Gives the normal (not-interwiki, non-category) pages the page # directs to, as strings result = [] try: thistxt = removeLanguageLinks(self.get()) except IsRedirectPage: pass try: thistxt = removeCategoryLinks(self.get(), self.code()) except IsRedirectPage: pass w=r'([^\]\|]*)' Rlink = re.compile(r'\[\['+w+r'(\|'+w+r')?\]\]') for l in Rlink.findall(thistxt): result.append(l[0]) return result
|
return getPage(self.code(),self.urlname())
|
if not hasattr(self,'_contents'): self._contents=getPage(self.code(),self.urlname()) return self._contents
|
def get(self): return getPage(self.code(),self.urlname())
|
('wpEdittime', edittime[code,space2underline(name)]),
|
('wpEdittime', edittime[code,link2url(name,code)]),
|
def putPage(code, name, text, comment=None): """Upload 'text' on page 'name' to the 'code' language wikipedia.""" import httplib host = langs[code] if host[-4:] == '.com': raise Error("Cannot put pages on a .com wikipedia") address = '/w/wiki.phtml?title=%s&action=submit'%space2underline(name) if comment is None: comment=action try: data = urlencode(( ('wpSummary', comment), ('wpMinoredit', '1'), ('wpSave', '1'), ('wpEdittime', edittime[code,space2underline(name)]), ('wpTextbox1', text))) except KeyError: print edittime raise if debug: print text print address print data #return None, None, None headers = {"Content-type": "application/x-www-form-urlencoded"} conn = httplib.HTTPConnection(host) conn.request("POST", address, data, headers) response = conn.getresponse() data = response.read() conn.close() return response.status, response.reason, data
|
uo=urllib.FancyURLopener()
|
uo=MyURLopener()
|
def getUrl(host,address): uo=urllib.FancyURLopener() f=uo.open('http://%s%s'%(host,address)) text=f.read() ct=f.info()['Content-Type'] R=re.compile('charset=([^\'\"]+)') m=R.search(ct) if m: charset=m.group(1) else: charset=None return text,charset
|
edittime[code,space2underline(name)]=m.group(1)
|
edittime[code,link2url(name,code)]=m.group(1)
|
def getPage(code, name, do_edit=1, do_quote=1): """Get the contents of page 'name' from the 'code' language wikipedia""" host = langs[code] if host[-4:]=='.com': # Old algorithm name = re.sub('_', ' ', name) n=[] for x in name.split(): n.append(x[0].capitalize()+x[1:]) name='_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name!=urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if host[-4:] == '.org': # New software address = '/w/wiki.phtml?title='+name if do_edit: address += '&action=edit' elif host[-4:]=='.com': # Old software if not do_edit: raise "can not skip edit on old-software wikipedia" address = '/wiki.cgi?action=edit&id='+name if debug: print host,address text,charset = getUrl(host,address) if do_edit: if debug: print "Raw:",len(text),type(text),text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower()==charset.lower(),"charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code]=charset if code2encoding(code).lower()!=charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code,space2underline(name)]=m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code,name]=m.group(1) else: edittime[code,name]=0 try: i1 = re.search('<textarea[^>]*>',text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>',text).start() if i2-i1 < 2: # new software raise NoPage() if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage() Rredirect=re.compile(r'\#redirect:? *\[\[(.*?)\]\]',re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) assert edittime[code,name]!=0 or host[-4:]=='.com', "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text) x=text[i1:i2] x=unescape(x) else: x=text # If not editing if charset=='utf-8': # Make it to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup('utf-8') try: x,l=decode_func(x) except UnicodeError: print code,name print repr(x) raise # Convert the unicode characters to &# references, and make it ascii. x=str(UnicodeToAsciiHtml(x)) return x
|
edittime[code,name]=m.group(1)
|
edittime[code,link2url(name,code)]=m.group(1)
|
def getPage(code, name, do_edit=1, do_quote=1): """Get the contents of page 'name' from the 'code' language wikipedia""" host = langs[code] if host[-4:]=='.com': # Old algorithm name = re.sub('_', ' ', name) n=[] for x in name.split(): n.append(x[0].capitalize()+x[1:]) name='_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name!=urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if host[-4:] == '.org': # New software address = '/w/wiki.phtml?title='+name if do_edit: address += '&action=edit' elif host[-4:]=='.com': # Old software if not do_edit: raise "can not skip edit on old-software wikipedia" address = '/wiki.cgi?action=edit&id='+name if debug: print host,address text,charset = getUrl(host,address) if do_edit: if debug: print "Raw:",len(text),type(text),text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower()==charset.lower(),"charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code]=charset if code2encoding(code).lower()!=charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code,space2underline(name)]=m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code,name]=m.group(1) else: edittime[code,name]=0 try: i1 = re.search('<textarea[^>]*>',text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>',text).start() if i2-i1 < 2: # new software raise NoPage() if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage() Rredirect=re.compile(r'\#redirect:? *\[\[(.*?)\]\]',re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) assert edittime[code,name]!=0 or host[-4:]=='.com', "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text) x=text[i1:i2] x=unescape(x) else: x=text # If not editing if charset=='utf-8': # Make it to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup('utf-8') try: x,l=decode_func(x) except UnicodeError: print code,name print repr(x) raise # Convert the unicode characters to &# references, and make it ascii. x=str(UnicodeToAsciiHtml(x)) return x
|
edittime[code,name]=0
|
edittime[code,link2url(name,code)]=0
|
def getPage(code, name, do_edit=1, do_quote=1): """Get the contents of page 'name' from the 'code' language wikipedia""" host = langs[code] if host[-4:]=='.com': # Old algorithm name = re.sub('_', ' ', name) n=[] for x in name.split(): n.append(x[0].capitalize()+x[1:]) name='_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name!=urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if host[-4:] == '.org': # New software address = '/w/wiki.phtml?title='+name if do_edit: address += '&action=edit' elif host[-4:]=='.com': # Old software if not do_edit: raise "can not skip edit on old-software wikipedia" address = '/wiki.cgi?action=edit&id='+name if debug: print host,address text,charset = getUrl(host,address) if do_edit: if debug: print "Raw:",len(text),type(text),text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower()==charset.lower(),"charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code]=charset if code2encoding(code).lower()!=charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code,space2underline(name)]=m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code,name]=m.group(1) else: edittime[code,name]=0 try: i1 = re.search('<textarea[^>]*>',text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>',text).start() if i2-i1 < 2: # new software raise NoPage() if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage() Rredirect=re.compile(r'\#redirect:? *\[\[(.*?)\]\]',re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) assert edittime[code,name]!=0 or host[-4:]=='.com', "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text) x=text[i1:i2] x=unescape(x) else: x=text # If not editing if charset=='utf-8': # Make it to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup('utf-8') try: x,l=decode_func(x) except UnicodeError: print code,name print repr(x) raise # Convert the unicode characters to &# references, and make it ascii. x=str(UnicodeToAsciiHtml(x)) return x
|
description = raw_input('Description : ')
|
description = description.encode('utf-8') if description=='': description = raw_input('Description : ')
|
def get_image(fn,target,description): # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] print "The filename on wikipedia will default to:",fn newfn = raw_input("Better name : ") if newfn: fn = newfn # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # A proper description for the submission # What I would have _liked_ to put here: # if description=='': # description = raw_input('Description : ') # Unfortunately, the result is not ASCII then. I assume # but am not sure that the problem is newlines. description = raw_input('Description : ') data = post_multipart(wikipedia.langs[wikipedia.mylang], uploadaddr, (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) return fn
|
pls.append(pl) i += 1 if i >= 60: self.start = pl.urlname() + '!' break wikipedia.getall(wikipedia.getSite(), pls) for pl in pls: if not pl.isRedirectPage(): yield pl.linkname(), pl.get()
|
pls.append(pl) i += 1 if i >= 60: wikipedia.getall(wikipedia.getSite(), pls) for pl in pls: if not pl.isRedirectPage(): yield pl.linkname(), pl.get() i = 0 pls = []
|
def generate(self): while True: i = 0 pls = [] for pl in wikipedia.allpages(start = self.start): pls.append(pl) i += 1 if i >= 60: self.start = pl.urlname() + '!' break wikipedia.getall(wikipedia.getSite(), pls) for pl in pls: if not pl.isRedirectPage(): yield pl.linkname(), pl.get()
|
acceptall = True
|
self.acceptall = True
|
def run(self): """ Starts the robot. """ # Run the generator which will yield PageLinks to pages which might need to be # changed. for pl in self.generator(): print '' try: # Load the page's text from the wiki original_text = pl.get() except wikipedia.NoPage: wikipedia.output(u'Page %s not found' % pl.linkname()) continue except wikipedia.LockedPage: wikipedia.output(u'Skipping locked page %s' % pl.linkname()) continue except wikipedia.IsRedirectPage: continue # skip all pages that contain certain texts if not self.exceptionApplies(original_text): new_text = self.doReplacements(original_text) if new_text == original_text: wikipedia.output('No changes were necessary in %s' % pl.linkname()) else: wikipedia.showColorDiff(original_text, new_text) if not self.acceptall: choice = wikipedia.input(u'Do you want to accept these changes? [y|n|a(ll)]') if choice in ['a', 'A']: acceptall = True if self.acceptall or choice in ['y', 'Y']: pl.put(new_text)
|
newTable = re.sub("(CAPTION|caption)([\w\W]*?)<\/caption>",
|
newTable = re.sub("<(CAPTION|caption)([\w\W]*?)<\/caption>",
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
fix = fixes[fix]
|
try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) sys.exit()
|
def generator(source, replacements, exceptions, regex, namespace, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * namespace - namespace to process in case of a SQL dump * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex, namespace): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
yearBCfmt = {'da':'%d f.Kr.', 'de':'%d v. Chr.', 'en':'%d BC', 'fr':'-%d', 'pl':'%d p.n.e.', 'es':'%d adC', 'eo':'-%d', 'nl':'%d v. Chr.'}
|
yearBCfmt = { 'da':'%d f.Kr.', 'de':'%d v. Chr.', 'en':'%d BC', 'fr':'-%d', 'pl':'%d p.n.e.', 'es':'%d adC', 'eo':'-%d', 'nl':'%d v. Chr.', 'is':'%d f. Kr.' }
|
def __call__(self, m, d): import wikipedia return wikipedia.html2unicode((date_format[m][self.code]) % d, language = self.code)
|
datetable = {'nl': { 'januari':{ 'sl':'%d. januar', 'it':'%d gennaio', 'en':'January %d', 'de':'%d. Januar', 'fr':'%d janvier', 'af':'01-%02d', 'ca':'%d de gener', 'oc':'%d de geni%%C3%%A8r', }, 'februari':{ 'sl':'%d. februar', 'it':'%d febbraio', 'en':'February %d', 'de':'%d. Februar', 'fr':'%d fevrier', 'af':'02-%02d', 'ca':'%d de febrer', 'oc':'%d de febri%%C3%%A8r', }, 'maart':{ 'sl':'%d. marec', 'it':'%d marzo', 'en':'March %d', 'de':'%d. März', 'fr':'%d mars', 'af':'03-%02d', 'ca':'%d de_mar%%C3%%A7', 'oc':'%d de_mar%%C3%%A7', }, 'april':{ 'sl':'%d. april', 'it':'%d aprile', 'en':'April %d', 'de':'%d. April', 'fr':'%d avril', 'af':'04-%02d', 'ca':'%d d\'abril', 'oc':'%d d\'abril', }, 'mei':{ 'sl':'%d. maj', 'it':'%d maggio', 'en':'May %d', 'de':'%d. Mai', 'fr':'%d mai', 'af':'05-%02d', 'ca':'%d de maig', 'oc':'%d de mai', }, 'juni':{ 'sl':'%d. junij', 'it':'%d giugno', 'en':'June %d', 'de':'%d. Juni', 'fr':'%d juin', 'af':'06-%02d', 'ca':'%d de juny', 'oc':'%d de junh', }, 'juli':{ 'sl':'%d. julij', 'it':'%d luglio', 'en':'July %d', 'de':'%d. Juli', 'fr':'%d juillet', 'af':'07-%02d', 'ca':'%d de juliol', 'oc':'%d de julhet', }, 'augustus':{ 'sl':'%d. avgust', 'it':'%d agosto', 'en':'August %d', 'de':'%d. August', 'fr':'%d aout', 'af':'08-%02d', 'ca':'%d d\'agost', 'oc':'%d d\'agost', }, 'september':{'sl':'%d. september', 'it':'%d settembre', 'en':'September %d', 'de':'%d. September', 'fr':'%d septembre', 'af':'09-%02d', 'ca':'%d de setembre', 'oc':'%d de setembre', }, 'oktober':{ 'sl':'%d. oktober', 'it':'%d ottobre', 'en':'October %d', 'de':'%d. Oktober', 'fr':'%d octobre', 'af':'10-%02d', 'ca':'%d d\'octubre', 'oc':'%d d\'octobre', }, 'november':{ 'sl':'%d. november', 'it':'%d novembre', 'en':'November %d', 'de':'%d. November', 'fr':'%d novembre', 'af':'11-%02d', 'ca':'%d de novembre', 'oc':'%d de novembre', }, 'december':{ 'sl':'%d. december', 'it':'%d dicembre', 'en':'December %d', 'de':'%d. Dezember', 'fr':'%d decembre', 'af':'12-%02d', 'ca':'%d de desembre', 'oc':'%d de decembre', }, }}
|
datetable = { 'nl':{ 'januari':{ 'sl':'%d. januar', 'it':'%d gennaio', 'en':'January %d', 'de':'%d. Januar', 'fr':'%d janvier', 'af':'01-%02d', 'ca':'%d de gener', 'oc':'%d de geni%%C3%%A8r', 'is':'%d. janúar', }, 'februari':{ 'sl':'%d. februar', 'it':'%d febbraio', 'en':'February %d', 'de':'%d. Februar', 'fr':'%d fevrier', 'af':'02-%02d', 'ca':'%d de febrer', 'oc':'%d de febri%%C3%%A8r', 'is':'%d. febrúar', }, 'maart':{ 'sl':'%d. marec', 'it':'%d marzo', 'en':'March %d', 'de':'%d. März', 'fr':'%d mars', 'af':'03-%02d', 'ca':'%d de_mar%%C3%%A7', 'oc':'%d de_mar%%C3%%A7', 'is':'%d. mars', }, 'april':{ 'sl':'%d. april', 'it':'%d aprile', 'en':'April %d', 'de':'%d. April', 'fr':'%d avril', 'af':'04-%02d', 'ca':'%d d\'abril', 'oc':'%d d\'abril', 'is':'%d. apríl', }, 'mei':{ 'sl':'%d. maj', 'it':'%d maggio', 'en':'May %d', 'de':'%d. Mai', 'fr':'%d mai', 'af':'05-%02d', 'ca':'%d de maig', 'oc':'%d de mai', 'is':'%d. maí', }, 'juni':{ 'sl':'%d. junij', 'it':'%d giugno', 'en':'June %d', 'de':'%d. Juni', 'fr':'%d juin', 'af':'06-%02d', 'ca':'%d de juny', 'oc':'%d de junh', 'is':'%d. júní', }, 'juli':{ 'sl':'%d. julij', 'it':'%d luglio', 'en':'July %d', 'de':'%d. Juli', 'fr':'%d juillet', 'af':'07-%02d', 'ca':'%d de juliol', 'oc':'%d de julhet', 'is':'%d. júlí', }, 'augustus':{ 'sl':'%d. avgust', 'it':'%d agosto', 'en':'August %d', 'de':'%d. August', 'fr':'%d aout', 'af':'08-%02d', 'ca':'%d d\'agost', 'oc':'%d d\'agost', 'is':'%d. ágúst', }, 'september':{ 'sl':'%d. september', 'it':'%d settembre', 'en':'September %d', 'de':'%d. September', 'fr':'%d septembre', 'af':'09-%02d', 'ca':'%d de setembre', 'oc':'%d de setembre', 'is':'%d. september', }, 'oktober':{ 'sl':'%d. oktober', 'it':'%d ottobre', 'en':'October %d', 'de':'%d. Oktober', 'fr':'%d octobre', 'af':'10-%02d', 'ca':'%d d\'octubre', 'oc':'%d d\'octobre', 'is':'%d. október', }, 'november':{ 'sl':'%d. november', 'it':'%d novembre', 'en':'November %d', 'de':'%d. November', 'fr':'%d novembre', 'af':'11-%02d', 'ca':'%d de novembre', 'oc':'%d de novembre', 'is':'%d. nóvember', }, 'december':{ 'sl':'%d. december', 'it':'%d dicembre', 'en':'December %d', 'de':'%d. Dezember', 'fr':'%d decembre', 'af':'12-%02d', 'ca':'%d de desembre', 'oc':'%d de decembre', 'is':'%d. desember', }, } }
|
def __call__(self, m, d): import wikipedia return wikipedia.html2unicode((date_format[m][self.code]) % d, language = self.code)
|
isoDate = time.strftime('%Y-%m-%d %H:%M:%S', date)
|
isoDate = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(date))
|
def log(self, url, error, containingPage): """ Logs an error report to a text file in the deadlinks subdirectory. """ site = wikipedia.getSite() errorReport = u'* %s\n' % url for (pageTitle, date, error) in self.historyDict[url]: # ISO 8601 formulation isoDate = time.strftime('%Y-%m-%d %H:%M:%S', date) errorReport += "** In [[%s]] on %s, %s\n" % (pageTitle, isoDate, error) wikipedia.output(u"** Logging link for deletion.") txtfilename = 'deadlinks/results-%s-%s.txt' % (site.family.name, site.lang) txtfile = codecs.open(txtfilename, 'a', 'utf-8') self.logCount += 1 if self.logCount % 30 == 0: # insert a caption txtfile.write('=== %s ===\n' % containingPage.title()[:3]) txtfile.write(errorReport) txtfile.close() if self.reportThread and not containingPage.isTalkPage(): self.reportThread.report(url, errorReport, containingPage)
|
elif pl.linkname().lower() == pl.linkname():
|
elif pl.linkname()[0].upper() == pl.linkname()[0]:
|
def sametranslate(pl, arr, same): site = pl.site() for newcode in site.family.seriouslangs: # Put as suggestion into array newname = pl.linkname() if newcode in ['eo','cs'] and same == 'name': newname = newname.split(' ') newname[-1] = newname[-1].upper() newname = ' '.join(newname) x=wikipedia.PageLink(wikipedia.getSite(code=newcode, fam=site.family), newname) if x not in arr: if same == "wiktionary": if site.language() in site.family.nocapitalize: if newcode in site.family.nocapitalize: arr[x] = None elif pl.linkname().lower() == pl.linkname(): arr[x] = None else: arr[x] = None
|
start = None
|
start = u'!'
|
def main(): start = None pageTitle = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'weblinkchecker') if arg: if arg.startswith('-start:'): start = arg[7:] else: pageTitle.append(arg) if start: gen = pagegenerators.AllpagesPageGenerator(start) else: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) gen = iter([page]) gen = pagegenerators.PreloadingGenerator(gen) gen = pagegenerators.RedirectFilterPageGenerator(gen) bot = WeblinkCheckerRobot(gen) try: bot.run() finally: i = 0 # Don't wait longer than 30 seconds for threads to finish. while threading.activeCount() > 1 and i < 30: wikipedia.output(u"Waiting for remaining %i threads to finish, please wait..." % (threading.activeCount() - 1)) # don't count the main thread # wait 1 second time.sleep(1) i += 1 if threading.activeCount() > 1: wikipedia.output(u"Killing remaining %i threads..." % (threading.activeCount() - 1)) # Threads will die automatically because they are daemonic bot.history.save()
|
if start:
|
if pageTitle == []:
|
def main(): start = None pageTitle = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'weblinkchecker') if arg: if arg.startswith('-start:'): start = arg[7:] else: pageTitle.append(arg) if start: gen = pagegenerators.AllpagesPageGenerator(start) else: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) gen = iter([page]) gen = pagegenerators.PreloadingGenerator(gen) gen = pagegenerators.RedirectFilterPageGenerator(gen) bot = WeblinkCheckerRobot(gen) try: bot.run() finally: i = 0 # Don't wait longer than 30 seconds for threads to finish. while threading.activeCount() > 1 and i < 30: wikipedia.output(u"Waiting for remaining %i threads to finish, please wait..." % (threading.activeCount() - 1)) # don't count the main thread # wait 1 second time.sleep(1) i += 1 if threading.activeCount() > 1: wikipedia.output(u"Killing remaining %i threads..." % (threading.activeCount() - 1)) # Threads will die automatically because they are daemonic bot.history.save()
|
wikipedia.output(u"* %s" % pl.aslink())
|
wikipedia.output(u"* %s" % curpl.aslink())
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.Page(wikipedia.getSite(), listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.Page(wikipedia.getSite(), refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_add) % newcat) cat_namespace = wikipedia.getSite().category_namespaces()[0] answer = '' for nm in pagenames: pl2 = wikipedia.Page(wikipedia.getSite(), nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.Page(wikipedia.getSite(),arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories:") for curpl in cats: wikipedia.output(u"* %s" % pl.aslink()) catpl = wikipedia.Page(wikipedia.getSite(), cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
cats.append(catpl)
|
rawcats.append(catpl)
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.Page(wikipedia.getSite(), listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.Page(wikipedia.getSite(), refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_add) % newcat) cat_namespace = wikipedia.getSite().category_namespaces()[0] answer = '' for nm in pagenames: pl2 = wikipedia.Page(wikipedia.getSite(), nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.Page(wikipedia.getSite(),arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories:") for curpl in cats: wikipedia.output(u"* %s" % pl.aslink()) catpl = wikipedia.Page(wikipedia.getSite(), cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
text = wikipedia.replaceCategoryLinks(text, cats)
|
text = wikipedia.replaceCategoryLinks(text, rawcats)
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.Page(wikipedia.getSite(), listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.Page(wikipedia.getSite(), refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_add) % newcat) cat_namespace = wikipedia.getSite().category_namespaces()[0] answer = '' for nm in pagenames: pl2 = wikipedia.Page(wikipedia.getSite(), nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.Page(wikipedia.getSite(),arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories:") for curpl in cats: wikipedia.output(u"* %s" % pl.aslink()) catpl = wikipedia.Page(wikipedia.getSite(), cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
newURL = None
|
def resolveRedirect(self): ''' Requests the header from the server. If the page is an HTTP redirect, returns the redirect target URL as a string. Otherwise returns None. ''' # TODO: Malconfigured HTTP servers might give a redirect loop. A # recursion counter and limit would help. conn = httplib.HTTPConnection(self.host) conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse()
|
|
if redirTarget.startswith('http://') or redirTarget.startswith('https://'): newURL = redirTarget elif redirTarget.startswith('/'): newURL = '%s://%s%s' % (self.protocol, self.host, redirTarget) else: newURL = '%s://%s/%s' % (self.protocol, self.host, redirTarget)
|
if redirTarget: if redirTarget.startswith('http://') or redirTarget.startswith('https://'): newURL = redirTarget elif redirTarget.startswith('/'): newURL = '%s://%s%s' % (self.protocol, self.host, redirTarget) else: newURL = '%s://%s/%s' % (self.protocol, self.host, redirTarget)
|
def resolveRedirect(self): ''' Requests the header from the server. If the page is an HTTP redirect, returns the redirect target URL as a string. Otherwise returns None. ''' # TODO: Malconfigured HTTP servers might give a redirect loop. A # recursion counter and limit would help. conn = httplib.HTTPConnection(self.host) conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse()
|
result = {}
|
result = []
|
def translate(page, hints = None, auto = True): """ Please comment your source code! --Daniel Does some magic stuff. Returns a list of pages. """ result = {} site = page.site() if hints: for h in hints: if h.find(':') == -1: # argument given as -hint:xy where xy is a language code codes = h newname = '' else: codes, newname = h.split(':', 1) if newname == '': # if given as -hint:xy or -hint:xy:, assume that there should # be a page in language xy with the same title as the page # we're currently working on newname = page.title() try: number = int(codes) codes = site.family.languages_by_size[:number] except ValueError: if codes == 'all': codes = site.family.languages_by_size elif codes == 'cyril': codes = site.family.cyrilliclangs else: codes = codes.split(',') for newcode in codes: if newcode in site.languages(): if newcode != site.language(): x = wikipedia.Page(site.getSite(code=newcode), newname) if x not in result: result.append(x) else: wikipedia.output(u"Ignoring unknown language code %s"%newcode) # Autotranslate dates into all other languages, the rest will come from existing interwiki links. if auto: # search inside all dictionaries for this link dictName, value = date.getAutoFormat( page.site().language(), page.title() ) if dictName: if not (dictName == 'yearsBC' and date.maxyearBC.has_key(page.site().language()) and value > date.maxyearBC[page.site().language()]) or (dictName == 'yearsAD' and date.maxyearAD.has_key(page.site().language()) and value > date.maxyearAD[page.site().language()]): wikipedia.output(u'TitleTranslate: %s was recognized as %s with value %d' % (page.title(),dictName,value)) for entryLang, entry in date.formats[dictName].iteritems(): if entryLang != page.site().language(): if dictName == 'yearsBC' and date.maxyearBC.has_key(entryLang) and value > date.maxyearBC[entryLang]: pass elif dictName == 'yearsAD' and date.maxyearAD.has_key(entryLang) and value > date.maxyearAD[entryLang]: pass else: newname = entry(value) x = wikipedia.Page( wikipedia.getSite(code=entryLang, fam=site.family), newname ) if x not in result: result.append(x) # add new page return result
|
newtext = replaceLanguageLinks(newtext, interwiki_links)
|
newtext = replaceLanguageLinks(newtext, interwiki_links, site = site)
|
def replaceCategoryLinks(oldtext, new, site = None): """Replace the category links given in the wikitext given in oldtext by the new links given in new. 'new' should be a list of Category objects. """ if site is None: site = getSite() # first remove interwiki links and add them later, so that # interwiki tags appear below category tags if both are set # to appear at the bottom of the article if not site.lang in site.family.categories_last: interwiki_links = getLanguageLinks(oldtext, insite = site, getPageObjects = True) oldtext = removeLanguageLinks(oldtext, site = site) s = categoryFormat(new, insite = site) s2 = removeCategoryLinks(oldtext, site = site) if s: if site.language() in site.family.category_attop: newtext = s + site.family.category_text_separator + s2 else: newtext = s2 + site.family.category_text_separator + s else: newtext = s2 # now re-add interwiki links if not site.lang in site.family.categories_last: newtext = replaceLanguageLinks(newtext, interwiki_links) return newtext
|
if not disambPage.isRedirectPage(): disambigText = editor.edit(disambPage.get(), jumpIndex = m.start())
|
if disambPage.isRedirectPage(): disambredir1 = disambPage.getRedirectTarget() disambredir2 = wikipedia.Page(wikipedia.getSite(), disambredir1) disambigText = editor.edit(disambredir2.get(), jumpIndex = m.start(), highlight = disambredir2.title()) else: disambigText = editor.edit(disambPage.get(), jumpIndex = m.start(), highlight = disambPage.title())
|
def treat(self, refPage, disambPage): """ Parameters: disambPage - The disambiguation page or redirect we don't want anything to link on refPage - A page linking to disambPage Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """ # TODO: break this function up into subroutines!
|
dictionary = cache[site]
|
watchlist = cache[site]
|
def get(site = None): if site is None: site = wikipedia.getSite() if cache.has_key(site): # Use cached copy if it exists. dictionary = cache[site] else: fn = 'watchlists/watchlist-%s-%s.dat' % (site.family.name, site.lang) try: # find out how old our saved dump is (in seconds) file_age = time.time() - os.path.getmtime(fn) # if it's older than 1 month, reload it if file_age > 30 * 24 * 60 * 60: print 'Copy of watchlist is one month old, reloading' refresh(site) except OSError: # no saved watchlist exists yet, retrieve one refresh(site) f = open(fn, 'r') watchlist = pickle.load(f) f.close() # create cached copy cache[site] = watchlist return watchlist
|
wikipedia.output(u"Current categories: ", cats)
|
wikipedia.output(u"Current categories: %s" % cats)
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.PageLink(wikipedia.mylang, listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_add) % newcat) cat_namespace = wikipedia.family.category_namespace(wikipedia.mylang) answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories: ", cats) catpl = wikipedia.PageLink(wikipedia.mylang, cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
Rredir = re.compile('\<li\>\<a href=\"\/w\/wiki.phtml\?title=(.*?)&redirect=no\"')
|
Rredir = re.compile('\<li\>\<a href=".+?" title="(.*?)"')
|
def retrieve_broken_redirects(self): if self.source == None: # retrieve information from the live wiki's maintenance page mysite = wikipedia.getSite() # broken redirect maintenance page's URL path = mysite.broken_redirects_address(default_limit = False) print 'Retrieving special page...' maintenance_txt = wikipedia.getUrl(mysite, path) # regular expression which finds redirects which point to a non-existing page inside the HTML Rredir = re.compile('\<li\>\<a href=\"\/w\/wiki.phtml\?title=(.*?)&redirect=no\"') redir_names = Rredir.findall(maintenance_txt) print 'Retrieved %d redirects from special page.\n' % len(redir_names) for redir_name in redir_names: yield redir_name else: print 'Step 1: Getting a list of all redirects' redirs = self.get_redirects_from_dump() print 'Step 2: Getting a list of all page titles' dump = sqldump.SQLdump(self.source, wikipedia.myencoding()) # We save page titles in a dictionary where all values are None, so we # use it as a list. "dict.has_key(x)" is much faster than "x in list" # because "dict.has_key(x)" uses a hashtable while "x in list" compares # x with each list element pagetitles = {} for entry in dump.entries(): pagetitles[entry.full_title()] = None print 'Step 3: Comparing.' brokenredirs = [] for (key, value) in redirs.iteritems(): if not pagetitles.has_key(value): yield key
|
for hit in R.findall(text):
|
for hit in R.findall(tet):
|
def allpages(start = '%21%200'): """Iterate over all Wikipedia pages in the home language, starting at the given page. This will raise an exception if the home language does not have a translation of 'Special' listed above.""" import sys start = link2url(start, code = mylang) m=0 while 1: text = getPage(mylang, '%s:Allpages&printable=yes&from=%s'%(special[mylang],start),do_quote=0,do_edit=0) #print text R = re.compile('/wiki/(.*?)" *class=[\'\"]printable') n = 0 for hit in R.findall(text): if not ':' in hit: # Some dutch exceptions. if not hit in ['Hoofdpagina','In_het_nieuws']: n = n + 1 yield PageLink(mylang, url2link(hit, code = mylang, incode = mylang)) start = hit + '%20%200' if n < 100: break m += n sys.stderr.write('AllPages: %d done; continuing from "%s";\n'%(m,url2link(start,code='nl',incode='ascii')))
|
Rref = re.compile('<li><a href.* title="([^"]*)"')
|
Rref = re.compile('<li><a href.*="([^"]*)"')
|
def getReferences(pl): host = langs[pl.code()] url = "/w/wiki.phtml?title=%s:Whatlinkshere&target=%s"%(special[mylang], pl.urlname()) txt, charset = getUrl(host,url) Rref = re.compile('<li><a href.* title="([^"]*)"') x = Rref.findall(txt) x.sort() # Remove duplicates for i in range(len(x)-1, 0, -1): if x[i] == x[i-1]: del x[i] return x
|
self.problem("Found link to %s" % pl.aslink() )
|
self.problem("Found link to %s" % pl.aslink(forceInterwiki = True) )
|
def assemble(self): # No errors have been seen so far nerr = 0 # Build up a dictionary of all links found, with the site as key. # Each value will be a list. mysite = wikipedia.getSite() new = {} for pl in self.done.keys(): site = pl.site() if site == mysite and pl.exists() and not pl.isRedirectPage(): if pl != self.inpl: self.problem("Found link to %s" % pl.aslink() ) self.whereReport(pl) nerr += 1 elif pl.exists() and not pl.isRedirectPage(): if site in new: new[site].append(pl) else: new[site] = [pl] # See if new{} contains any problematic values result = {} for k, v in new.items(): if len(v) > 1: nerr += 1 self.problem("Found more than one link for %s"%k) if nerr == 0 and len( self.foundin[self.inpl] ) == 0 and len(new) != 0: self.problem(u'None of %i other languages refers back to %s' % (len(new), self.inpl.aslink())) # If there are any errors, we need to go through all # items manually. if nerr > 0:
|
for pl2 in v:
|
for page2 in v:
|
def assemble(self): # No errors have been seen so far nerr = 0 # Build up a dictionary of all links found, with the site as key. # Each value will be a list. mysite = wikipedia.getSite() new = {} for pl in self.done.keys(): site = pl.site() if site == mysite and pl.exists() and not pl.isRedirectPage(): if pl != self.inpl: self.problem("Found link to %s" % pl.aslink() ) self.whereReport(pl) nerr += 1 elif pl.exists() and not pl.isRedirectPage(): if site in new: new[site].append(pl) else: new[site] = [pl] # See if new{} contains any problematic values result = {} for k, v in new.items(): if len(v) > 1: nerr += 1 self.problem("Found more than one link for %s"%k) if nerr == 0 and len( self.foundin[self.inpl] ) == 0 and len(new) != 0: self.problem(u'None of %i other languages refers back to %s' % (len(new), self.inpl.aslink())) # If there are any errors, we need to go through all # items manually. if nerr > 0:
|
wikipedia.output(u" (%d) Found link to %s in:" % (i, pl2.aslink())) self.whereReport(pl2, indent=8)
|
wikipedia.output(u" (%d) Found link to %s in:" % (i, page2.aslink(forceInterwiki = True))) self.whereReport(page2, indent=8)
|
def assemble(self): # No errors have been seen so far nerr = 0 # Build up a dictionary of all links found, with the site as key. # Each value will be a list. mysite = wikipedia.getSite() new = {} for pl in self.done.keys(): site = pl.site() if site == mysite and pl.exists() and not pl.isRedirectPage(): if pl != self.inpl: self.problem("Found link to %s" % pl.aslink() ) self.whereReport(pl) nerr += 1 elif pl.exists() and not pl.isRedirectPage(): if site in new: new[site].append(pl) else: new[site] = [pl] # See if new{} contains any problematic values result = {} for k, v in new.items(): if len(v) > 1: nerr += 1 self.problem("Found more than one link for %s"%k) if nerr == 0 and len( self.foundin[self.inpl] ) == 0 and len(new) != 0: self.problem(u'None of %i other languages refers back to %s' % (len(new), self.inpl.aslink())) # If there are any errors, we need to go through all # items manually. if nerr > 0:
|
updatedSites = []
|
def finish(self, sa = None): """Round up the subject, making any necessary changes. This method should be called exactly once after the todo list has gone empty.
|
|
self.replaceLinks(page, new, sa)
|
if self.replaceLinks(page, new, sa): updatedSites.append(site) for site in updatedSites: del new[site]
|
def finish(self, sa = None): """Round up the subject, making any necessary changes. This method should be called exactly once after the todo list has gone empty.
|
try: wikipedia.showDiff(oldtext, newtext) except: wikipedia.output(u'Error executing showDiff')
|
wikipedia.showDiff(oldtext, newtext)
|
def replaceLinks(self, pl, new, sa): wikipedia.output(u"Updating links on page %s." % pl.aslink(forceInterwiki = True))
|
if globalvar.forreal:
|
ask = False if removing: self.problem('removing: %s'%(",".join([x.lang for x in removing]))) ask = True if globalvar.force:
|
def replaceLinks(self, pl, new, sa): wikipedia.output(u"Updating links on page %s." % pl.aslink(forceInterwiki = True))
|
if removing: self.problem('removing: %s'%(",".join([x.lang for x in removing]))) ask = True if globalvar.force: ask = False if globalvar.confirm: ask = True if ask: if globalvar.autonomous: answer = 'n'
|
if globalvar.confirm: ask = True if ask: if globalvar.autonomous: answer = 'n' else: answer = wikipedia.inputChoice(u'Submit?', ['Yes', 'No'], ['y', 'N'], 'N') else: answer = 'y' if answer == 'y': if sa: while wikipedia.get_throttle.waittime() + 2.0 < wikipedia.put_throttle.waittime(): print "NOTE: Performing a recursive query first to save time...." qdone = sa.oneQuery() if not qdone: break print "NOTE: Updating live wiki..." timeout=60 while 1: try: status, reason, data = pl.put(newtext, comment=u'robot '+mods) except (socket.error, IOError): if timeout>3600: raise wikipedia.output(u"ERROR putting page. Sleeping %i seconds before trying again" % timeout) timeout *= 2 time.sleep(timeout)
|
def replaceLinks(self, pl, new, sa): wikipedia.output(u"Updating links on page %s." % pl.aslink(forceInterwiki = True))
|
answer = wikipedia.inputChoice(u'Submit?', ['Yes', 'No'], ['y', 'N'], 'N')
|
break if str(status) == '302': return True
|
def replaceLinks(self, pl, new, sa): wikipedia.output(u"Updating links on page %s." % pl.aslink(forceInterwiki = True))
|
answer = 'y' if answer == 'y': if sa: while wikipedia.get_throttle.waittime() + 2.0 < wikipedia.put_throttle.waittime(): print "NOTE: Performing a recursive query first to save time...." qdone = sa.oneQuery() if not qdone: break print "NOTE: Updating live wiki..." timeout=60 while 1: try: print "DBG> updating ", pl status, reason, data = pl.put(newtext, comment=u'robot '+mods) except (socket.error, IOError): if timeout>3600: raise print "ERROR putting page. Sleeping %d seconds before trying again"%timeout timeout=timeout*2 time.sleep(timeout) else: break if str(status) != '302': print status, reason
|
print status, reason return False
|
def replaceLinks(self, pl, new, sa): wikipedia.output(u"Updating links on page %s." % pl.aslink(forceInterwiki = True))
|
for site in new.keys(): pl = new[site] if not pl.section():
|
for site, page in new.iteritems(): if not page.section():
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site in new.keys(): pl = new[site] if not pl.section(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) # Check for superfluous links for xpl in linked: if not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
|
linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked:
|
linked = page.interwiki() for xpage in shouldlink: if xpage != page and not xpage in linked:
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site in new.keys(): pl = new[site] if not pl.section(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) # Check for superfluous links for xpl in linked: if not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
|
if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True)))
|
if l.site() == xpage.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True)))
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site in new.keys(): pl = new[site] if not pl.section(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) # Check for superfluous links for xpl in linked: if not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
|
wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True)))
|
wikipedia.output(u"WARNING: %s: %s does not link to %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True)))
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site in new.keys(): pl = new[site] if not pl.section(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) # Check for superfluous links for xpl in linked: if not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
|
for xpl in linked: if not xpl in shouldlink:
|
for xpage in linked: if not xpage in shouldlink:
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site in new.keys(): pl = new[site] if not pl.section(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) # Check for superfluous links for xpl in linked: if not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
|
if l.site() == xpl.site():
|
if l.site() == xpage.site():
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site in new.keys(): pl = new[site] if not pl.section(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) # Check for superfluous links for xpl in linked: if not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
|
wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True)))
|
wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True)))
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site in new.keys(): pl = new[site] if not pl.section(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) # Check for superfluous links for xpl in linked: if not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (pl.site().family.name, pl.aslink(forceInterwiki = True), xpl.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
|
print pl.site().hostname() print pl.site().delete_address(pl.urlname())
|
def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
|
|
print arg
|
def main(args): filename = '' description = [] keep = False wiki = '' for arg in args: if wikipedia.argHandler(arg, 'upload'): print arg if arg.startswith('-keep'): keep = True elif arg.startswith('-wiki:'): wiki=arg[6:] elif filename == '': filename = arg else: description.append(arg) description = ' '.join(description) bot = UploadRobot(filename, description, wiki, keep) bot.run()
|
|
except IsRedirectPage:
|
except wikipedia.IsRedirectPage:
|
def handlebadpage(self): try: self.content = self.page.get() except IsRedirectPage: wikipedia.output(u'Already redirected, skipping.') return
|
wikipedia.output(u'Found: "',d, '" in content, nothing necessary')
|
wikipedia.output(u'Found: "%s" in content, nothing necessary'%d)
|
def handlebadpage(self): try: self.content = self.page.get() except IsRedirectPage: wikipedia.output(u'Already redirected, skipping.') return
|
output(url2unicode("Creating page [[%s:%s]]" % site.lang, name, site = site))
|
output(url2unicode("Creating page [[%s:%s]]" % (site.lang, name), site = site))
|
def putPage(site, name, text, comment = None, watchArticle = False, minorEdit = True, newPage = False, token = None, gettoken = False): """Upload 'text' on page 'name' to the 'site' wiki. Use of this routine can normally be avoided; use Page.put instead. """ safetuple = () # safetuple keeps the old value, but only if we did not get a token yet could if site.version() >= "1.4": if gettoken or not token: token = site.getToken(getagain = gettoken) else: safetuple = (site,name,text,comment,watchArticle,minorEdit,newPage) # Check whether we are not too quickly after the previous putPage, and # wait a bit until the interval is acceptable put_throttle() # Which web-site host are we submitting to? host = site.hostname() # Get the address of the page on that host. address = site.put_address(space2underline(name)) # If no comment is given for the change, use the default if comment is None: comment=action # Use the proper encoding for the comment comment = comment.encode(site.encoding()) try: # Encode the text into the right encoding for the wiki if type(text) != type(u''): print 'Warning: wikipedia.putPage() got non-unicode page content. Please report this.' print text text = text.encode(site.encoding()) predata = [ ('wpSave', '1'), ('wpSummary', comment), ('wpTextbox1', text)] # Except if the page is new, we need to supply the time of the # previous version to the wiki to prevent edit collisions if newPage: predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', edittime[repr(site), link2url(name, site = site)])) # Pass the minorEdit and watchArticle arguments to the Wiki. if minorEdit and minorEdit != '0': predata.append(('wpMinoredit', '1')) if watchArticle and watchArticle != '0': predata.append(('wpWatchthis', '1')) # Give the token, but only if one is supplied. if token: predata.append(('wpEditToken', token)) # Encode all of this into a HTTP request data = urlencode(tuple(predata)) except KeyError: print edittime raise if newPage: output(url2unicode("Creating page [[%s:%s]]" % site.lang, name, site = site)) else: output(url2unicode("Changing page [[%s:%s]]" % site.lang, name, site = site)) # Submit the prepared information conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "PythonWikipediaBot/1.0") if site.cookies(): conn.putheader('Cookie',site.cookies()) conn.endheaders() conn.send(data) # Prepare the return values response = conn.getresponse() data = response.read().decode(myencoding()) conn.close() if data != u'': editconflict = mediawiki_messages.get('editconflict').replace('$1', '') if '<title>%s' % editconflict in data: raise EditConflict() elif safetuple and "<" in data: # We might have been using an outdated token print "Changing page has failed. Retrying." putPage(safetuple[0], safetuple[1], safetuple[2], comment=safetuple[3], watchArticle=safetuple[4], minorEdit=safetuple[5], newPage=safetuple[6], token=None,gettoken=True) else: output(data) return response.status, response.reason, data
|
output(url2unicode("Changing page [[%s:%s]]" % site.lang, name, site = site))
|
output(url2unicode("Changing page [[%s:%s]]" % (site.lang, name), site = site))
|
def putPage(site, name, text, comment = None, watchArticle = False, minorEdit = True, newPage = False, token = None, gettoken = False): """Upload 'text' on page 'name' to the 'site' wiki. Use of this routine can normally be avoided; use Page.put instead. """ safetuple = () # safetuple keeps the old value, but only if we did not get a token yet could if site.version() >= "1.4": if gettoken or not token: token = site.getToken(getagain = gettoken) else: safetuple = (site,name,text,comment,watchArticle,minorEdit,newPage) # Check whether we are not too quickly after the previous putPage, and # wait a bit until the interval is acceptable put_throttle() # Which web-site host are we submitting to? host = site.hostname() # Get the address of the page on that host. address = site.put_address(space2underline(name)) # If no comment is given for the change, use the default if comment is None: comment=action # Use the proper encoding for the comment comment = comment.encode(site.encoding()) try: # Encode the text into the right encoding for the wiki if type(text) != type(u''): print 'Warning: wikipedia.putPage() got non-unicode page content. Please report this.' print text text = text.encode(site.encoding()) predata = [ ('wpSave', '1'), ('wpSummary', comment), ('wpTextbox1', text)] # Except if the page is new, we need to supply the time of the # previous version to the wiki to prevent edit collisions if newPage: predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', edittime[repr(site), link2url(name, site = site)])) # Pass the minorEdit and watchArticle arguments to the Wiki. if minorEdit and minorEdit != '0': predata.append(('wpMinoredit', '1')) if watchArticle and watchArticle != '0': predata.append(('wpWatchthis', '1')) # Give the token, but only if one is supplied. if token: predata.append(('wpEditToken', token)) # Encode all of this into a HTTP request data = urlencode(tuple(predata)) except KeyError: print edittime raise if newPage: output(url2unicode("Creating page [[%s:%s]]" % site.lang, name, site = site)) else: output(url2unicode("Changing page [[%s:%s]]" % site.lang, name, site = site)) # Submit the prepared information conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "PythonWikipediaBot/1.0") if site.cookies(): conn.putheader('Cookie',site.cookies()) conn.endheaders() conn.send(data) # Prepare the return values response = conn.getresponse() data = response.read().decode(myencoding()) conn.close() if data != u'': editconflict = mediawiki_messages.get('editconflict').replace('$1', '') if '<title>%s' % editconflict in data: raise EditConflict() elif safetuple and "<" in data: # We might have been using an outdated token print "Changing page has failed. Retrying." putPage(safetuple[0], safetuple[1], safetuple[2], comment=safetuple[3], watchArticle=safetuple[4], minorEdit=safetuple[5], newPage=safetuple[6], token=None,gettoken=True) else: output(data) return response.status, response.reason, data
|
output(url2unicode(u'Getting page [[%s:%s]]' % site.lang, name, site = site))
|
output(url2unicode(u'Getting page [[%s:%s]]' % (site.lang, name), site = site))
|
def getEditPage(site, name, read_only = False, do_quote = True, get_redirect=False, throttle = True): """ Get the contents of page 'name' from the 'site' wiki Do not use this directly; for 99% of the possible ideas you can use the Page object instead. Arguments: site - the wiki site name - the page name read_only - If true, doesn't raise LockedPage exceptions. do_quote - ??? (TODO: what is this for?) get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text. """ isWatched = False name = re.sub(' ', '_', name) output(url2unicode(u'Getting page [[%s:%s]]' % site.lang, name, site = site)) # A heuristic to encode the URL into %XX for characters that are not # allowed in a URL. if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) path = site.edit_address(name) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() text = getUrl(site, path) get_throttle.setDelay(time.time() - starttime)\ # Look for the edit token R = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = R.search(text) if tokenloc: site.puttoken(tokenloc.group(1)) elif not site.getToken(getalways = False): site.puttoken('') # Look if the page is on our watchlist R = re.compile(r"\<input tabindex='[\d]+' type='checkbox' name='wpWatchthis' checked='checked'") matchWatching = R.search(text) if matchWatching: isWatched = True if not read_only: # check if we're logged in p=re.compile('userlogin') if p.search(text) != None: output(u'Warning: You\'re probably not logged in on %s:' % repr(site)) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: edittime[repr(site), link2url(name, site = site)] = "0" # Extract the actual text from the textedit field try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %i minutes..." % (site.hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(site, name) m = redirectRe(site).match(text[i1:i2]) if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (url2unicode(name, site = site), m.group(1))) raise IsRedirectPage(m.group(1)) if edittime[repr(site), link2url(name, site = site)] == "0" and not read_only: output(u"DBG> page may be locked?!") raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] return x, isWatched
|
wikipedia.getall(wikipedia.getSite(), pages, throttle=False)
|
site = pages[0].site() wikipedia.getall(site, pages, throttle=False) except IndexError: pass
|
def preload(self, pages): try: wikipedia.getall(wikipedia.getSite(), pages, throttle=False) except wikipedia.SaxError: # Ignore this error, and get the pages the traditional way later. pass
|
output("WARNING: Section does not exist: %s" % self.linkname())
|
output(u"WARNING: Section does not exist: %s" % self.linkname())
|
def get(self, read_only = False, force = False, get_redirect=False, throttle = True): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
|
output("WARNING: Section does not exist: %s" % self)
|
output(u"WARNING: Section does not exist: %s" %pl2.linkname())
|
def oneDone(self, title, timestamp, text): pl = Page(self.site, title) for pl2 in self.pages: if Page(self.site, pl2.sectionFreeLinkname()) == pl: if not hasattr(pl2,'_contents') and not hasattr(pl2,'_getexception'): break else: print repr(title) print repr(pl) print repr(self.pages) print "BUG> bug, page not found in list" raise PageNotFound m = redirectRe(self.site).match(text) if m: edittime[repr(self.site), link2url(title, site = self.site)] = timestamp redirectto=m.group(1) if pl.site().lang=="eo": for c in 'CGHJSU': for c2 in c,c.lower(): for x in 'Xx': redirectto = redirectto.replace(c2+x,c2+x+x+x+x) pl2._getexception = IsRedirectPage pl2._redirarg = redirectto else: if len(text)<50: output(u"DBG> short text in %s:" % pl2.aslink()) output(text) hn = pl2.section() if hn: m = re.search("== *%s *==" % hn, text) if not m: output("WARNING: Section does not exist: %s" % self) else: # Store the content pl2._contents = text # Store the time stamp edittime[repr(self.site), link2url(title, site = self.site)] = timestamp else: # Store the content pl2._contents = text # Store the time stamp edittime[repr(self.site), link2url(title, site = self.site)] = timestamp
|
txt = "
|
txt = redir.get(get_redirect=True).replace('[['+target,'[['+second_target)
|
def fix_double_redirects(self): mysite = wikipedia.getSite() for redir_name in self.generator.retrieve_double_redirects(): print '' redir = wikipedia.Page(mysite, redir_name) try: target = redir.getRedirectTarget() except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % redir.title()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % redir.title()) else: try: second_redir = wikipedia.Page(mysite, target) second_target = second_redir.getRedirectTarget() except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % second_redir.title()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % second_redir.title()) else: txt = "#REDIRECT [[%s]]" % second_target try: status, reason, data = redir.put(txt) except wikipedia.LockedPage: wikipedia.output(u'%s is locked.' % redir.title()) print status, reason
|
wikipedia.link2url(start)
|
start=link2url(start)
|
def allnlpages(start='%20%200'): wikipedia.link2url(start) m=0 while 1: text=wikipedia.getPage('nl','Speciaal:Allpages&printable=yes&from=%s'%start,do_quote=0,do_edit=0) #print text R=re.compile('/wiki/(.*?)" *class=[\'\"]printable') n=0 for hit in R.findall(text): if not ':' in hit: if not hit in ['Hoofdpagina','In_het_nieuws']: n=n+1 yield wikipedia.url2link(hit) start=hit+'%20%200' if n<100: break m=m+n sys.stderr.write('AllNLPages: %d done; continuing from "%s";\n'%(m,wikipedia.link2url(start)))
|
text=wikipedia.getPage('nl','Speciaal:Allpages&printable=yes&from=%s'%start,do_quote=0,do_edit=0)
|
text=getPage('nl','Speciaal:Allpages&printable=yes&from=%s'%start,do_quote=0,do_edit=0)
|
def allnlpages(start='%20%200'): wikipedia.link2url(start) m=0 while 1: text=wikipedia.getPage('nl','Speciaal:Allpages&printable=yes&from=%s'%start,do_quote=0,do_edit=0) #print text R=re.compile('/wiki/(.*?)" *class=[\'\"]printable') n=0 for hit in R.findall(text): if not ':' in hit: if not hit in ['Hoofdpagina','In_het_nieuws']: n=n+1 yield wikipedia.url2link(hit) start=hit+'%20%200' if n<100: break m=m+n sys.stderr.write('AllNLPages: %d done; continuing from "%s";\n'%(m,wikipedia.link2url(start)))
|
yield wikipedia.url2link(hit)
|
yield url2link(hit)
|
def allnlpages(start='%20%200'): wikipedia.link2url(start) m=0 while 1: text=wikipedia.getPage('nl','Speciaal:Allpages&printable=yes&from=%s'%start,do_quote=0,do_edit=0) #print text R=re.compile('/wiki/(.*?)" *class=[\'\"]printable') n=0 for hit in R.findall(text): if not ':' in hit: if not hit in ['Hoofdpagina','In_het_nieuws']: n=n+1 yield wikipedia.url2link(hit) start=hit+'%20%200' if n<100: break m=m+n sys.stderr.write('AllNLPages: %d done; continuing from "%s";\n'%(m,wikipedia.link2url(start)))
|
sys.stderr.write('AllNLPages: %d done; continuing from "%s";\n'%(m,wikipedia.link2url(start)))
|
sys.stderr.write('AllNLPages: %d done; continuing from "%s";\n'%(m,url2link(start)))
|
def allnlpages(start='%20%200'): wikipedia.link2url(start) m=0 while 1: text=wikipedia.getPage('nl','Speciaal:Allpages&printable=yes&from=%s'%start,do_quote=0,do_edit=0) #print text R=re.compile('/wiki/(.*?)" *class=[\'\"]printable') n=0 for hit in R.findall(text): if not ':' in hit: if not hit in ['Hoofdpagina','In_het_nieuws']: n=n+1 yield wikipedia.url2link(hit) start=hit+'%20%200' if n<100: break m=m+n sys.stderr.write('AllNLPages: %d done; continuing from "%s";\n'%(m,wikipedia.link2url(start)))
|
result[code] = m.group(1)
|
t=m.group(1) if '|' in t: t.replace('|','') result[code] = t
|
def getLanguageLinks(text): """Returns a dictionary of other language links mentioned in the text in the form {code:pagename}""" result = {} for code in langs: m=re.search(r'\[\['+code+':([^\]]*)\]\]', text) if m: if m.group(1): result[code] = m.group(1) else: print "ERROR: empty link to %s:"%(code) return result
|
elif arg.startswith('-page:'):
|
elif arg.startswith('-page'):
|
def main(): template_names = [] resolve = False remove = False namespaces = [] editSummary = '' acceptAll = False pageTitles = [] extras = False # If xmlfilename is None, references will be loaded from the live wiki. xmlfilename = None new = None # read command line parameters for arg in wikipedia.handleArgs(): if arg == '-remove': remove = True elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] elif arg.startswith('-namespace:'): namespaces.append(int(arg[len('-namespace:'):])) elif arg.startswith('-summary:'): editSummary = arg[len('-summary:'):] elif arg.startswith('-always'): acceptAll = True elif arg.startswith('-page:'): if len(arg) == len('-page'): pageTitles.append(wikipedia.input(u'Which page do you want to chage?')) else: pageTitles.append(arg[len('-page:'):]) elif arg.startswith('-extras'): extras = True else: template_names.append(arg) if extras: old = template_names elif len(template_names) == 0 or len(template_names) > 2: wikipedia.showHelp() sys.exit() else: old = template_names[0] if len(template_names) == 2: new = template_names[1] mysite = wikipedia.getSite() ns = mysite.template_namespace() if extras: oldTemplate = [] for thisPage in old: oldTemplate.append(wikipedia.Page(mysite, ns + ':' + thisPage)) else: oldTemplate = wikipedia.Page(mysite, ns + ':' + old) if xmlfilename: gen = XmlDumpTemplatePageGenerator(oldTemplate, xmlfilename) elif pageTitles: pages = [wikipedia.Page(wikipedia.getSite(), pageTitle) for pageTitle in pageTitles] gen = iter(pages) elif extras: gen = pagegenerators.ReferringPagesGenerator(oldTemplate, onlyTemplateInclusion = True) else: gen = pagegenerators.ReferringPageGenerator(oldTemplate, onlyTemplateInclusion = True) if namespaces: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen) #At this point, if extras is set to False, old is the name of a single template. #But if extras is set to True, old is a whole list of templates to be replaced. bot = TemplateRobot(preloadingGen, old, new, remove, editSummary, acceptAll, extras) bot.run()
|
m = re.search('value="(\d+)" name=["\']wpEdittime["\']', text) if m: self._editTime = m.group(1) else: self._editTime = "0" m = re.search('value="(\d+)" name=["\']wpStarttime["\']', text) if m: self._startTime = m.group(1) else: self._startTime = "0"
|
def getEditPage(self, get_redirect=False, throttle = True, sysop = False): """ Get the contents of the Page via the edit page. Do not use this directly, use get() instead. Arguments: get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text. """ isWatched = False editRestriction = None output(u'Getting page %s' % self.aslink()) path = self.site().edit_address(self.urlname()) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() try: text = self.site().getUrl(path, sysop = sysop) except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: Could not load %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue # Extract the actual text from the textedit field try: i1 = re.search('<textarea[^>]*>', text).end() i2 = re.search('</textarea>', text).start() except AttributeError: # find out if the username or IP has been blocked if text.find(mediawiki_messages.get('blockedtitle', self.site())) != -1: raise UserBlocked(self.site(), self.title()) else: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue # We now know that there is a textarea. # Look for the edit token Rwatch = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = Rwatch.search(text) if tokenloc: self.site().putToken(tokenloc.group(1), sysop = sysop) elif not self.site().getToken(getalways = False): self.site().putToken('', sysop = sysop) # Find out if page actually exists. Only existing pages have a # version history tab. RversionTab = re.compile(r'<li id="ca-history"><a href=".*title=.*&action=history">.*</a></li>') matchVersionTab = RversionTab.search(text) if not matchVersionTab: raise NoPage(self.site(), self.title()) # Look if the page is on our watchlist R = re.compile(r"\<input tabindex='[\d]+' type='checkbox' name='wpWatchthis' checked='checked'") matchWatching = R.search(text) if matchWatching: isWatched = True # Get timestamps m = re.search('value="(\d+)" name=["\']wpEdittime["\']', text) if m: self._editTime = m.group(1) else: self._editTime = "0" m = re.search('value="(\d+)" name=["\']wpStarttime["\']', text) if m: self._startTime = m.group(1) else: self._startTime = "0" # Now process the contents of the textarea m = self.site().redirectRegex().match(text[i1:i2]) if self._editTime == "0": output(u"DBG> page may be locked?!") editRestriction = 'sysop' if m: if get_redirect: self._redirarg = m.group(1) else: output(u"DBG> %s is redirect to %s" % (self.title(), m.group(1))) raise IsRedirectPage(m.group(1)) x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] return x, isWatched, editRestriction
|
|
m = re.search('value="(\d+)" name=["\']wpEdittime["\']', text) if m: self._editTime = m.group(1) else: self._editTime = "0" m = re.search('value="(\d+)" name=["\']wpStarttime["\']', text) if m: self._startTime = m.group(1) else: self._startTime = "0"
|
def getEditPage(self, get_redirect=False, throttle = True, sysop = False): """ Get the contents of the Page via the edit page. Do not use this directly, use get() instead. Arguments: get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text. """ isWatched = False editRestriction = None output(u'Getting page %s' % self.aslink()) path = self.site().edit_address(self.urlname()) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() try: text = self.site().getUrl(path, sysop = sysop) except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: Could not load %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue # Extract the actual text from the textedit field try: i1 = re.search('<textarea[^>]*>', text).end() i2 = re.search('</textarea>', text).start() except AttributeError: # find out if the username or IP has been blocked if text.find(mediawiki_messages.get('blockedtitle', self.site())) != -1: raise UserBlocked(self.site(), self.title()) else: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue # We now know that there is a textarea. # Look for the edit token Rwatch = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = Rwatch.search(text) if tokenloc: self.site().putToken(tokenloc.group(1), sysop = sysop) elif not self.site().getToken(getalways = False): self.site().putToken('', sysop = sysop) # Find out if page actually exists. Only existing pages have a # version history tab. RversionTab = re.compile(r'<li id="ca-history"><a href=".*title=.*&action=history">.*</a></li>') matchVersionTab = RversionTab.search(text) if not matchVersionTab: raise NoPage(self.site(), self.title()) # Look if the page is on our watchlist R = re.compile(r"\<input tabindex='[\d]+' type='checkbox' name='wpWatchthis' checked='checked'") matchWatching = R.search(text) if matchWatching: isWatched = True # Get timestamps m = re.search('value="(\d+)" name=["\']wpEdittime["\']', text) if m: self._editTime = m.group(1) else: self._editTime = "0" m = re.search('value="(\d+)" name=["\']wpStarttime["\']', text) if m: self._startTime = m.group(1) else: self._startTime = "0" # Now process the contents of the textarea m = self.site().redirectRegex().match(text[i1:i2]) if self._editTime == "0": output(u"DBG> page may be locked?!") editRestriction = 'sysop' if m: if get_redirect: self._redirarg = m.group(1) else: output(u"DBG> %s is redirect to %s" % (self.title(), m.group(1))) raise IsRedirectPage(m.group(1)) x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] return x, isWatched, editRestriction
|
|
def isTalkPage(self): ns = self.namespace() return ns >= 0 and ns % 2 == 1
|
def isEmpty(self): """ True if the page has less than 4 characters, except for language links and category links, False otherwise. Can raise the same exceptions as get() """ txt = self.get() txt = removeLanguageLinks(txt) txt = removeCategoryLinks(txt, site = self.site()) if len(txt) < 4: return True else: return False
|
|
R=re.compile(r'\[\[(.*)]\]')
|
R=re.compile(r'\[\[(.+?)\]\]')
|
def PagesFromFile(fn, site = None): """Read a file of page links between double-square-brackets, and return them as a list of Page objects. 'fn' is the name of the file that should be read.""" if site is None: site = getSite() f=open(fn, 'r') R=re.compile(r'\[\[(.*)]\]') for line in f.readlines(): m=R.match(line) if m: part = m.group(1).split(':') i = 0 try: fam=Family(part[i], fatal = False) i += 1 except ValueError: fam=site.family if part[i] in fam.langs: code = part[i] i += 1 else: code = site.lang pagename = ':'.join(part[i:]) thesite = getSite(code = code, fam = fam) #print "DBG> Pagename", repr(thesite),pagename yield Page(thesite, pagename) else: print "ERROR: Did not understand %s line:\n%s" % (fn, repr(line)) f.close()
|
except ValueError:
|
except:
|
def PagesFromFile(fn, site = None): """Read a file of page links between double-square-brackets, and return them as a list of Page objects. 'fn' is the name of the file that should be read.""" if site is None: site = getSite() f=open(fn, 'r') R=re.compile(r'\[\[(.*)]\]') for line in f.readlines(): m=R.match(line) if m: part = m.group(1).split(':') i = 0 try: fam=Family(part[i], fatal = False) i += 1 except ValueError: fam=site.family if part[i] in fam.langs: code = part[i] i += 1 else: code = site.lang pagename = ':'.join(part[i:]) thesite = getSite(code = code, fam = fam) #print "DBG> Pagename", repr(thesite),pagename yield Page(thesite, pagename) else: print "ERROR: Did not understand %s line:\n%s" % (fn, repr(line)) f.close()
|
if summary == None:
|
if summary_commandline == None:
|
def main(): gen = None # How we want to retrieve information on which pages need to be changed. # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # summary message summary_commandline = None # Array which will collect commandline parameters. # First element is original text, second element is replacement text. commandline_replacements = [] # A list of 2-tuples of original text and replacement text. replacements = [] # Don't edit pages which contain certain texts. exceptions = [] # Should the elements of 'replacements' and 'exceptions' be interpreted # as regular expressions? regex = False # Predefined fixes from dictionary 'fixes' (see above). fix = None # the dump's path, either absolute or relative, which will be used when source # is 'xmldump'. xmlFilename = None useSql = False # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used PageTitles = [] # a page whose referrers will be processed when the -ref parameter is used referredPageTitle = None # an image page whose file links will be processed when the -filelinks parameter is used fileLinksPageTitle = None # a page whose links will be processed when the -links parameter is used linkingPageTitle = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None # Google query googleQuery = None # Load default summary message. wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) # Read commandline parameters. for arg in wikipedia.handleArgs(): if arg == '-regex': regex = True elif arg.startswith('-filelinks'): if len(arg) == 10: fileLinksPageTitle = wikipedia.input(u'Links to which image page should be processed?') else: fileLinksPageTitle = arg[11:] #TODO: Replace 'Image:' with something that automatically gets the name of images based on the language. fileLinksPage = wikipedia.Page(wikipedia.getSite(), 'Image:' + fileLinksPageTitle) gen = pagegenerators.FileLinksGenerator(fileLinksPage) elif arg.startswith('-file'): if len(arg) >= 6: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] elif arg =='-sql': useSql = True elif arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'Which page do you want to chage?')) else: PageTitles.append(arg[6:]) source = 'specificPages' elif arg.startswith('-ref'): if len(arg) == 4: referredPageTitle = wikipedia.input(u'Links to which page should be processed?') else: referredPageTitle = arg[5:] referredPage = wikipedia.Page(wikipedia.getSite(), referredPageTitle) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links'): if len(arg) == 6: linkingPageTitle = wikipedia.input(u'Links from which page should be processed?') else: linkingPageTitle = arg[7:] linkingPage = wikipedia.Page(wikipedia.getSite(), linkingPageTitle) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to chage?') else: firstPageTitle = arg[7:] namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() firstPageTitle = wikipedia.Page(wikipedia.getSite(), firstPageTitle).titleWithoutNamespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif arg.startswith('-google'): if len(arg) >= 8: googleQuery = arg[8:] gen = pagegenerators.GoogleSearchPageGenerator(googleQuery) elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-fix:'): fix = arg[5:] elif arg == '-always': acceptall = True elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) elif arg.startswith('-summary:'): wikipedia.setAction(arg[9:]) summary_commandline = True else: commandline_replacements.append(arg) if (len(commandline_replacements) == 2 and fix == None): replacements.append((commandline_replacements[0], commandline_replacements[1])) if summary == None: wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg ) % ' (-' + commandline_replacements[0] + ' +' + commandline_replacements[1] + ')') elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') change = '(-' + old + ' +' + new replacements.append((old, new)) while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': change = change + ')' break new = wikipedia.input(u'Please enter the new text:') change = change + ' & -' + old + ' +' + new replacements.append((old, new)) if not summary_commandline == True: default_summary_message = wikipedia.translate(wikipedia.getSite(), msg) % change wikipedia.output(u'The summary message will default to: %s' % default_summary_message) summary_message = wikipedia.input(u'Press Enter to use this default message, or enter a description of the changes your bot will make:') if summary_message == '': summary_message = default_summary_message wikipedia.setAction(summary_message) else: # Perform one of the predefined actions. try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) wikipedia.stopme() sys.exit() if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), fix['msg'])) if fix.has_key('exceptions'): exceptions = fix['exceptions'] replacements = fix['replacements'] # already compile all regular expressions here to save time later for i in range(len(replacements)): old, new = replacements[i] if not regex: old = re.escape(old) oldR = re.compile(old, re.UNICODE) replacements[i] = oldR, new for i in range(len(exceptions)): exception = exceptions[i] if not regex: exception = re.escape(exception) exceptionR = re.compile(exception, re.UNICODE) exceptions[i] = exceptionR if xmlFilename: gen = XmlDumpReplacePageGenerator(xmlFilename, replacements, exceptions) elif useSql: whereClause = 'WHERE (%s)' % ' OR '.join(["old_text RLIKE '%s'" % prepareRegexForMySQL(old.pattern) for (old, new) in replacements]) if exceptions: exceptClause = 'AND NOT (%s)' % ' OR '.join(["old_text RLIKE '%s'" % prepareRegexForMySQL(exc.pattern) for exc in exceptions]) else: exceptClause = '' query = u"""
|
Rnowiki = re.compile("<nowiki>*?</nowiki>")
|
Rnowiki = re.compile("<nowiki>.*?</nowiki>")
|
def linkedPages(self): """Gives the normal (not-interwiki, non-category) pages the page links to, as a list of Page objects """ result = [] try: thistxt = removeLanguageLinks(self.get()) except NoPage: return [] except IsRedirectPage: raise thistxt = removeCategoryLinks(thistxt, self.site())
|
e = code2encoding(incode) if e == 'utf-8':
|
return unicode2html(x, encoding = code2encoding(incode))
|
def url2link(percentname,incode,code): """Convert a url-name of a page into a proper name for an interwiki link the argument 'incode' specifies the encoding of the target wikipedia """ result = underline2space(percentname) x = url2unicode(result, language = code) e = code2encoding(incode) if e == 'utf-8': # utf-8 can handle anything return x elif e == code2encoding(code): #print "url2link", repr(x), "same encoding",incode,code return unicode2html(x, encoding = code2encoding(code)) else: # In all other cases, replace difficult chars by &#; refs. #print "url2link", repr(x), "different encoding" return unicode2html(x, encoding = 'ascii')
|
return x elif e == code2encoding(code): return unicode2html(x, encoding = code2encoding(code)) else: return unicode2html(x, encoding = 'ascii')
|
def url2link(percentname,incode,code): """Convert a url-name of a page into a proper name for an interwiki link the argument 'incode' specifies the encoding of the target wikipedia """ result = underline2space(percentname) x = url2unicode(result, language = code) e = code2encoding(incode) if e == 'utf-8': # utf-8 can handle anything return x elif e == code2encoding(code): #print "url2link", repr(x), "same encoding",incode,code return unicode2html(x, encoding = code2encoding(code)) else: # In all other cases, replace difficult chars by &#; refs. #print "url2link", repr(x), "different encoding" return unicode2html(x, encoding = 'ascii')
|
|
if hasattr(self, '_redirarg'): if not get_redirect: raise IsRedirectPage,self._redirarg elif hasattr(self, '_getexception'):
|
if hasattr(self, '_getexception'):
|
def get(self, read_only = False, force = False, get_redirect=False, throttle = True): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
|
except LockedPage: self._getexception = LockedPage raise
|
def get(self, read_only = False, force = False, get_redirect=False, throttle = True): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
|
|
self.get(read_only = True)
|
self.get(read_only = read_only)
|
def getRedirectTarget(self, read_only = False): """ If the page is a redirect page, gives the title of the page it redirects to. Otherwise it will raise an IsNotRedirectPage exception. This function can raise a NoPage exception, and unless the argument read_only is True, a LockedPage exception as well. """ try: self.get(read_only = True) except NoPage: raise NoPage(self) except LockedPage: raise LockedPage(self) except IsRedirectPage, arg: if '|' in arg: warnings.warn("%s has a | character, this makes no sense", Warning) return arg[0] else: raise IsNotRedirectPage(self)
|
if edittime[site, name] == "0" and not read_only: output(u"DBG> page may be locked?!") raise LockedPage()
|
def getEditPage(site, name, read_only = False, get_redirect=False, throttle = True): """ Get the contents of page 'name' from the 'site' wiki Do not use this directly; for 99% of the possible ideas you can use the Page object instead. Arguments: site - the wiki site name - the page name read_only - If true, doesn't raise LockedPage exceptions. do_quote - ??? (TODO: what is this for?) get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text. """ isWatched = False name = re.sub(' ', '_', name) output(url2unicode(u'Getting page [[%s:%s]]' % (site.lang, name), site = site)) path = site.edit_address(name) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() try: text = getUrl(site, path) except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: Could not load %s%s. Maybe the server is down. Retrying in %i minutes..." % (site.hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue get_throttle.setDelay(time.time() - starttime)\ # Look for the edit token R = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = R.search(text) if tokenloc: site.puttoken(tokenloc.group(1)) elif not site.getToken(getalways = False): site.puttoken('') # Look if the page is on our watchlist R = re.compile(r"\<input tabindex='[\d]+' type='checkbox' name='wpWatchthis' checked='checked'") matchWatching = R.search(text) if matchWatching: isWatched = True if not read_only: # check if we're logged in p=re.compile('userlogin') if p.search(text) != None: output(u'Warning: You\'re probably not logged in on %s:' % repr(site)) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[site, name] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[site, name] = m.group(1) else: edittime[site, name] = "0" # Extract the actual text from the textedit field try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %i minutes..." % (site.hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(site, name) m = redirectRe(site).match(text[i1:i2]) if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (url2unicode(name, site = site), m.group(1))) raise IsRedirectPage(m.group(1)) if edittime[site, name] == "0" and not read_only: output(u"DBG> page may be locked?!") raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] return x, isWatched
|
|
import mediawiki_messages for i in pages_for_exclusion_database: path = 'copyright/' + i[0] + '/' + i[2] mediawiki_messages.makepath(path) p = wikipedia.Page(wikipedia.getSite(i[0]),i[1]) yield p, path
|
for i in pages_for_exclusion_database: path = 'copyright/' + i[0] + '/' + i[2] mediawiki_messages.makepath(path) p = wikipedia.Page(wikipedia.getSite(i[0]),i[1]) yield p, path
|
def exclusion_file_list(): import mediawiki_messages for i in pages_for_exclusion_database: path = 'copyright/' + i[0] + '/' + i[2] mediawiki_messages.makepath(path) p = wikipedia.Page(wikipedia.getSite(i[0]),i[1]) yield p, path
|
import time, os write = False for page, path in exclusion_file_list(): try: file_age = time.time() - os.path.getmtime(path) if file_age > 24 * 60 * 60: print 'Updating source pages to exclude new URLs...' write = True except OSError: write = True if write: f = codecs.open(path, 'w', 'utf-8') f.write(page.get()) f.close() return
|
write = False for page, path in exclusion_file_list(): try: file_age = time.time() - os.path.getmtime(path) if file_age > 24 * 60 * 60: print 'Updating source pages to exclude new URLs...' write = True except OSError: write = True if write: f = codecs.open(path, 'w', 'utf-8') f.write(page.get()) f.close() return
|
def load_pages(): import time, os write = False for page, path in exclusion_file_list(): try: file_age = time.time() - os.path.getmtime(path) if file_age > 24 * 60 * 60: print 'Updating source pages to exclude new URLs...' write = True except OSError: write = True if write: f = codecs.open(path, 'w', 'utf-8') f.write(page.get()) f.close() return
|
import glob prelist = [] load_pages() for page, path in exclusion_file_list(): f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data) list1 = [] for entry in prelist: list1 += entry.split(", ") list2 = [] for entry in list1: list2 += entry.split("and ") list3 = [] for entry in list2: entry = re.sub("http://", "", entry) if entry: if '/' in entry: list3 += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: list3 += [re.sub(" .*", "", entry)] f = codecs.open('copyright/exclusion_list.txt', 'r','utf-8') list3 += re.sub(" ? f.close() return list3
|
prelist = [] load_pages() for page, path in exclusion_file_list(): f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data) list1 = [] for entry in prelist: list1 += entry.split(", ") list2 = [] for entry in list1: list2 += entry.split("and ") list3 = [] for entry in list2: entry = re.sub("http://", "", entry) if entry: if '/' in entry: list3 += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: list3 += [re.sub(" .*", "", entry)] f = codecs.open('copyright/exclusion_list.txt', 'r','utf-8') list3 += re.sub(" ? f.close() return list3
|
def exclusion_list(): import glob prelist = [] load_pages() for page, path in exclusion_file_list(): f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() # wikipedia:en:Wikipedia:Mirrors and forks prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) # wikipedia:it:Wikipedia:Cloni if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data)
|
text = re.sub("(?i){{quote|.*?}}", "", text) text = re.sub("^:''.*?''\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$", "", text) text = re.sub('^[:*]?["][^"]+["]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[«][^»]+[»]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[“][^”]+[”]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text)
|
text = re.sub("(?i){{quote|.*?}}", "", text) text = re.sub("^:''.*?''\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$", "", text) text = re.sub('^[:*]?["][^"]+["]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[«][^»]+[»]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[“][^â€]+[â€]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text)
|
def cleanwikicode(text): if not text: return ""
|
if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1)
|
def main(give_url,image_url): url = give_url basicdesc = [] if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
|
|
answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image)
|
answer = wikipedia.inputChoice(u'Include image %s?' % image, ['yes', 'no', 'stop'], ['y', 'N', 's'], 'N')
|
def main(give_url,image_url): url = give_url basicdesc = [] if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
|
lib_images.get_image(image, None, desc)
|
uploadBot = upload.UploadRobot(image, desc) uploadBot.run()
|
def main(give_url,image_url): url = give_url basicdesc = [] if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
|
url = ''
|
url = u''
|
def main(give_url,image_url): url = give_url basicdesc = [] if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
|
if wikipedia.argHandler(arg, 'imageharvest'):
|
arg = wikipedia.argHandler(arg, 'imageharvest') if arg:
|
def main(give_url,image_url): url = give_url basicdesc = [] if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
|
elif url == '':
|
elif url == u'':
|
def main(give_url,image_url): url = give_url basicdesc = [] if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
|
self.version = '1.5'
|
def version(self, code): return "1.5"
|
def __init__(self): family.Family.__init__(self) self.name = 'commons' self.langs = { 'commons': 'commons.wikimedia.org', } self.namespaces[4] = { '_default': 'Commons', } self.namespaces[5] = { '_default': 'Commons talk', }
|
print mediawiki_messages.get('spamprotectiontitle', self.site())
|
output(u""+mediawiki_messages.get('spamprotectiontitle', self.site()))
|
def putPage(self, text, comment = None, watchArticle = False, minorEdit = True, newPage = False, token = None, gettoken = False, sysop = False): """ Upload 'text' as new contents for this Page by filling out the edit page.
|
if code in ['eml','lij','lmo','nap','pms','roa-tará','sc','scn','vec']:
|
if code in ['eml','lij','lmo','nap','pms','roa-tara','sc','scn','vec']:
|
def altlang(code): if code=='aa': return ['am'] if code in ['fa','so']: return ['ar'] if code=='ku': return ['ar','tr'] if code=='sk': return ['cs'] if code in ['bar','hsb','ksh']: return ['de'] if code in ['als','lb']: return ['de','fr'] if code=='io': return ['eo'] if code in ['an','ast','ay','ca','gn','nah','qu']: return ['es'] if code == ['cbk-zam']: return ['es','tl'] if code=='eu': return ['es','fr'] if code in ['glk','mzn']: return ['fa','ar'] if code=='gl': return ['es','pt'] if code=='lad': return ['es','he'] if code in ['br','ht','ln','lo','nrm','vi','wa']: return ['fr'] if code in ['ie','oc']: return ['ie','oc','fr'] if code in ['co','frp']: return ['fr','it'] if code=='yi': return ['he'] if code=='sa': return ['hi'] if code in ['eml','lij','lmo','nap','pms','roa-tará','sc','scn','vec']: return ['it'] if code=='rm': return ['it','de','fr'] if code=='bat-smg': return ['lt'] if code=='ia': return ['la','es','fr','it'] if code=='nds': return ['nds-nl','de'] if code=='nds-nl': return ['nds','nl'] if code in ['fy','pap','vls','zea']: return ['nl'] if code=='li': return ['nl','de'] if code=='csb': return ['pl'] if code=='tet': return ['pt'] if code in ['mo','roa-rup']: return ['ro'] if code in ['av','be','bxr','cv','hy','lbe','ru-sib','tt','udm','uk','xal']: return ['ru'] if code=='got': return ['ru','uk'] if code in ['kk','ky','tk','ug','uz']: return ['tr','ru'] if code == 'diq': return ['tr'] if code in ['ja','ko','minnan','zh','zh-cn']: return ['zh','zh-tw','zh-classical','zh-cn'] if code in ['bo','cdo','wuu','za','zh-classical','zh-tw','zh-yue']: return ['zh','zh-cn','zh-classical','zh-tw'] if code=='da': return ['nb','no'] if code in ['is','no','nb','nn']: return ['no','nb','nn','da','sv'] if code=='sv': return ['da','no','nb'] if code=='se': return ['no','nb','sv','nn','fi','da'] if code in ['bug','id','jv','map-bms','ms','su']: return ['id','ms','jv'] if code in ['bs','hr','mk','sh','sr']: return ['sh','hr','sr','bs'] if code in ['ceb','pag','war']: return ['tl'] if code=='bi': return ['tpi'] if code=='tpi': return ['bi'] if code == 'new': return ['ne'] if code == 'nov': return ['io','eo'] return []
|
def showdiff(old, new):
|
def showdiff(self,old, new):
|
def showdiff(old, new): diff = difflib.context_diff(old.splitlines(), new.splitlines()) wikipedia.output(u"\n".join(diff))
|
raise
|
wikipedia.stopme()
|
def main(): # How we want to retrieve information on which pages need to be changed. # Can either be 'sqldump', 'textfile' or 'userinput'. source = None # Array which will collect commandline parameters. # First element is original text, second element is replacement text. commandline_replacements = [] # A dictionary where keys are original texts and values are replacement texts. replacements = {} # Don't edit pages which contain certain texts. exceptions = [] # Should the elements of 'replacements' and 'exceptions' be interpreted # as regular expressions? regex = False # Predefined fixes from dictionary 'fixes' (see above). fix = None # the dump's path, either absolute or relative, which will be used when source # is 'sqldump'. sqlfilename = '' # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = '' # a list of pages which will be used when source is 'userinput'. pagenames = [] # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespace should be processed when using a SQL dump # default to -1 which means all namespaces will be processed namespace = -1 # Load default summary message. wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) # Read commandline parameters. for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg) if arg: if arg == '-regex': regex = True elif arg.startswith('-file'): if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the filename:') else: textfilename = arg[6:] source = 'textfile' elif arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input(u'Please enter the SQL dump\'s filename:') else: sqlfilename = arg[5:] source = 'sqldump' elif arg.startswith('-page'): if len(arg) == 5: pagenames.append(wikipedia.input(u'Which page do you want to chage?')) else: pagenames.append(arg[6:]) source = 'userinput' elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-fix:'): fix = arg[5:] elif arg == '-always': acceptall = True elif arg.startswith('-namespace:'): namespace = int(arg[11:]) else: commandline_replacements.append(arg) if source == None or len(commandline_replacements) not in [0, 2]: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if (len(commandline_replacements) == 2 and fix == None): replacements[commandline_replacements[0]] = commandline_replacements[1] wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg ) % ' (-' + commandline_replacements[0] + ' +' + commandline_replacements[1] + ')') elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') change = '(-' + old + ' +' + new replacements[old] = new while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': change = change + ')' break new = wikipedia.input(u'Please enter the new text:') change = change + ' & -' + old + ' +' + new replacements[old] = new default_summary_message = wikipedia.translate(wikipedia.getSite(), msg) % change wikipedia.output(u'The summary message will default to: %s' % default_summary_message) summary_message = wikipedia.input(u'Press Enter to use this default message, or enter a description of the changes your bot will make:') if summary_message == '': summary_message = default_summary_message wikipedia.setAction(summary_message) else: # Perform one of the predefined actions. try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) wikipedia.stopme() sys.exit() if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), fix['msg'])) if fix.has_key('exceptions'): exceptions = fix['exceptions'] replacements = fix['replacements'] # Run the generator which will yield PageLinks to pages which might need to be # changed. for pl in generator(source, replacements, exceptions, regex, namespace, textfilename, sqlfilename, pagenames): print '' try: # Load the page's text from the wiki original_text = pl.get() except wikipedia.NoPage: wikipedia.output(u'Page %s not found' % pl.linkname()) continue except wikipedia.LockedPage: wikipedia.output(u'Skipping locked page %s' % pl.linkname()) continue except wikipedia.IsRedirectPage: continue skip_page = False # skip all pages that contain certain texts for exception in exceptions: if regex: exception = re.compile(exception) hit = exception.search(original_text) if hit: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), hit.group(0))) # Does anyone know how to break out of the _outer_ loop? # Then we wouldn't need the skip_page variable. skip_page = True break else: hit = original_text.find(exception) if hit != -1: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), original_text[hit:hit + len(exception)])) skip_page = True break if not skip_page: # create a copy of the original text to work on, so we can later compare # if any changes were made new_text = original_text for old, new in replacements.items(): if regex: # TODO: compiling the regex each time might be inefficient old = re.compile(old) new_text = old.sub(new, new_text) else: new_text = new_text.replace(old, new) if new_text == original_text: try: # Sometime the bot crashes when it can't decode a character. # Let's not let it crash print 'No changes were necessary in %s' % pl.linkname() except UnicodeEncodeError: print 'Error decoding pl.linkname()' continue else: #wikipedia.showDiff(original_text, new_text) wikipedia.showColorDiff(original_text, new_text, replacements) if not acceptall: choice = wikipedia.input(u'Do you want to accept these changes? [y|n|a(ll)]') if choice in ['a', 'A']: acceptall = True if acceptall or choice in ['y', 'Y']: pl.put(new_text)
|
return "[[user:%s" % username in text.lower()
|
return "[[user:%s" % username.lower() in text.lower()
|
def allowedbot(username, site): """Checks whether the bot is listed on Wikipedia:bots""" pl = wikipedia.Page(site, "Wikipedia:Bots") text = pl.get() return "[[user:%s" % username in text.lower()
|
catlib.change_category(article, original_cat, None)
|
catlib.change_category(article, original_cat.catname(), None)
|
def move_to_subcategory(article, original_cat, current_cat): print print 'Treating page ' + article.ascii_linkname() + ', currently in category ' + current_cat.ascii_linkname() subcatlist = get_subcats(current_cat) print if len(subcatlist) == 0: print 'This category has no subcategories.' print # show subcategories as possible choices (with numbers) for i in range(len(subcatlist)): print '%d - Move to %s' % (i, subcatlist[i]) print 'j - Jump to another category' print 's - Skip this article' print 'r - Remove this category tag' print '? - Read the page' print 'Enter - Save category as ' + current_cat.ascii_linkname()
|
ns = mysite.template_namespace(fallback = None)
|
ns = mysite.template_namespace()
|
def main(): template_names = [] resolve = False remove = False # If xmlfilename is None, references will be loaded from the live wiki. xmlfilename = None new = None # read command line parameters for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'template') if arg: if arg == '-remove': remove = True elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] else: template_names.append(arg) if len(template_names) == 0 or len(template_names) > 2: wikipedia.showHelp('template') sys.exit() old = template_names[0] if len(template_names) == 2: new = template_names[1] mysite = wikipedia.getSite() ns = mysite.template_namespace(fallback = None) oldTemplate = wikipedia.Page(mysite, ns + ':' + old) if xmlfilename: gen = XmlTemplatePageGenerator(oldTemplate, xmlfilename) else: gen = pagegenerators.ReferringPageGenerator(oldTemplate, onlyTemplateInclusion = True) preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = TemplateRobot(preloadingGen, old, new, remove) bot.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.