rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
choice = wikipedia.input('WARNING: %s doesn\'t seem to be a disambiguation page, but %s is one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink()))
|
choice = wikipedia.inputChoice('WARNING: %s doesn\'t seem to be a disambiguation page, but %s is one. Follow it anyway?' % (self.inpl.aslink(), pl.aslink()), ['Yes', 'No'], ['y', 'N'], 'N')
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site() # Register this fact at the todo-counter. counter.minus(pl.site()) # Assume it's not a redirect isredirect = 0 # Now check whether any interwiki links should be added to the # todo list. if pl.section(): # We have been referred to a part of a page, not the whole page. Do not follow references. pass else: try: iw = pl.interwiki() except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.Page(pl.site(),arg.args[0]) wikipedia.output(u"NOTE: %s is redirect to %s" % (pl.aslink(), pl3.aslink())) if pl == self.inpl: # This is a redirect page itself. We don't need to # follow the redirection. isredirect = 1 # In this case we can also stop all hints! for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} pass elif not globalvar.followredirect: print "NOTE: not following redirects." else: if self.conditionalAdd(pl3, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new redirect %s" % (self.inpl.aslink(), pl.aslink(), pl3.aslink())) except wikipedia.NoPage: wikipedia.output(u"NOTE: %s does not exist" % pl.aslink()) #print "DBG> ",pl.urlname() if pl == self.inpl: # This is the home subject page. # In this case we can stop all hints! for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} self.done = {} # In some rare cases it might be we already did check some 'automatic' links pass #except wikipedia.SectionError: # wikipedia.output(u"NOTE: section %s does not exist" % pl.aslink()) else: if not globalvar.autonomous: if self.inpl.isDisambig() and not pl.isDisambig(): choice = wikipedia.input('WARNING: %s is a disambiguation page, but %s doesn\'t seem to be one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink())) elif not self.inpl.isDisambig() and pl.isDisambig(): choice = wikipedia.input('WARNING: %s doesn\'t seem to be a disambiguation page, but %s is one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink())) else: choice = 'y' if choice not in ['y', 'Y']: wikipedia.output(u"NOTE: ignoring %s and its interwiki links" % pl.aslink()) del self.done[pl] iw = () if self.inpl == pl: self.untranslated = (len(iw) == 0) if globalvar.untranslatedonly: # Ignore the interwiki links. iw = () elif pl.isEmpty(): if not pl.isCategory(): wikipedia.output(u"NOTE: %s is empty; ignoring it and its interwiki links" % pl.aslink()) # Ignore the interwiki links iw = () for page2 in iw: if page2.site().language() in globalvar.neverlink: print "Skipping link %s to an ignored language"% page2 continue if globalvar.same=='wiktionary' and page2.linkname().lower()!=self.inpl.linkname().lower(): print "NOTE: Ignoring %s for %s in wiktionary mode"% (page2, self.inpl) continue if not globalvar.autonomous: if self.inpl.namespace() != page2.namespace(): choice = wikipedia.input('WARNING: %s is in namespace %i, but %s is in namespace %i. Follow it anyway? [y|N]' % (self.inpl.aslink(), self.inpl.namespace(), page2.aslink(), page2.namespace())) if choice not in ['y', 'Y']: continue if self.conditionalAdd(page2, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new interwiki %s"% (self.inpl.aslink(), pl.aslink(), page2.aslink())) # These pages are no longer 'in progress' del self.pending # Check whether we need hints and the user offered to give them if self.untranslated and not self.hintsasked: wikipedia.output(u"NOTE: %s does not have any interwiki links" % self.inpl.aslink()) if (self.untranslated or globalvar.askhints) and not self.hintsasked and not isredirect: # Only once! self.hintsasked = True if globalvar.untranslated: newhint = None t = globalvar.showtextlink if t: wikipedia.output(pl.get()[:t]) while 1: newhint = wikipedia.input(u'Give a hint (? to see pagetext):') if newhint == '?': t += globalvar.showtextlinkadd wikipedia.output(pl.get()[:t]) elif newhint and not ':' in newhint: print "Please enter a hint like language:pagename" print "or type nothing if you do not have a hint" elif not newhint: break else: arr = {} titletranslate.translate(pl, arr, same = False, hints = [newhint], auto = globalvar.auto) for pl2 in arr.iterkeys(): self.todo[pl2] = pl2.site() counter.plus(pl2.site()) self.foundin[pl2] = [None]
|
choice = wikipedia.input('WARNING: %s is in namespace %i, but %s is in namespace %i. Follow it anyway? [y|N]' % (self.inpl.aslink(), self.inpl.namespace(), page2.aslink(), page2.namespace()))
|
choice = wikipedia.inputChoice('WARNING: %s is in namespace %i, but %s is in namespace %i. Follow it anyway?' % (self.inpl.aslink(), self.inpl.namespace(), page2.aslink(), page2.namespace()), ['Yes', 'No'], ['y', 'N'], 'N')
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site() # Register this fact at the todo-counter. counter.minus(pl.site()) # Assume it's not a redirect isredirect = 0 # Now check whether any interwiki links should be added to the # todo list. if pl.section(): # We have been referred to a part of a page, not the whole page. Do not follow references. pass else: try: iw = pl.interwiki() except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.Page(pl.site(),arg.args[0]) wikipedia.output(u"NOTE: %s is redirect to %s" % (pl.aslink(), pl3.aslink())) if pl == self.inpl: # This is a redirect page itself. We don't need to # follow the redirection. isredirect = 1 # In this case we can also stop all hints! for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} pass elif not globalvar.followredirect: print "NOTE: not following redirects." else: if self.conditionalAdd(pl3, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new redirect %s" % (self.inpl.aslink(), pl.aslink(), pl3.aslink())) except wikipedia.NoPage: wikipedia.output(u"NOTE: %s does not exist" % pl.aslink()) #print "DBG> ",pl.urlname() if pl == self.inpl: # This is the home subject page. # In this case we can stop all hints! for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} self.done = {} # In some rare cases it might be we already did check some 'automatic' links pass #except wikipedia.SectionError: # wikipedia.output(u"NOTE: section %s does not exist" % pl.aslink()) else: if not globalvar.autonomous: if self.inpl.isDisambig() and not pl.isDisambig(): choice = wikipedia.input('WARNING: %s is a disambiguation page, but %s doesn\'t seem to be one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink())) elif not self.inpl.isDisambig() and pl.isDisambig(): choice = wikipedia.input('WARNING: %s doesn\'t seem to be a disambiguation page, but %s is one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink())) else: choice = 'y' if choice not in ['y', 'Y']: wikipedia.output(u"NOTE: ignoring %s and its interwiki links" % pl.aslink()) del self.done[pl] iw = () if self.inpl == pl: self.untranslated = (len(iw) == 0) if globalvar.untranslatedonly: # Ignore the interwiki links. iw = () elif pl.isEmpty(): if not pl.isCategory(): wikipedia.output(u"NOTE: %s is empty; ignoring it and its interwiki links" % pl.aslink()) # Ignore the interwiki links iw = () for page2 in iw: if page2.site().language() in globalvar.neverlink: print "Skipping link %s to an ignored language"% page2 continue if globalvar.same=='wiktionary' and page2.linkname().lower()!=self.inpl.linkname().lower(): print "NOTE: Ignoring %s for %s in wiktionary mode"% (page2, self.inpl) continue if not globalvar.autonomous: if self.inpl.namespace() != page2.namespace(): choice = wikipedia.input('WARNING: %s is in namespace %i, but %s is in namespace %i. Follow it anyway? [y|N]' % (self.inpl.aslink(), self.inpl.namespace(), page2.aslink(), page2.namespace())) if choice not in ['y', 'Y']: continue if self.conditionalAdd(page2, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new interwiki %s"% (self.inpl.aslink(), pl.aslink(), page2.aslink())) # These pages are no longer 'in progress' del self.pending # Check whether we need hints and the user offered to give them if self.untranslated and not self.hintsasked: wikipedia.output(u"NOTE: %s does not have any interwiki links" % self.inpl.aslink()) if (self.untranslated or globalvar.askhints) and not self.hintsasked and not isredirect: # Only once! self.hintsasked = True if globalvar.untranslated: newhint = None t = globalvar.showtextlink if t: wikipedia.output(pl.get()[:t]) while 1: newhint = wikipedia.input(u'Give a hint (? to see pagetext):') if newhint == '?': t += globalvar.showtextlinkadd wikipedia.output(pl.get()[:t]) elif newhint and not ':' in newhint: print "Please enter a hint like language:pagename" print "or type nothing if you do not have a hint" elif not newhint: break else: arr = {} titletranslate.translate(pl, arr, same = False, hints = [newhint], auto = globalvar.auto) for pl2 in arr.iterkeys(): self.todo[pl2] = pl2.site() counter.plus(pl2.site()) self.foundin[pl2] = [None]
|
answer = wikipedia.input(u"What should be done [(a)ccept, (r)eject, (g)ive up, accept a(l)l] :")
|
answer = wikipedia.inputChoice(u'What should be done?', ['accept', 'reject', 'give up', 'accept all'], ['a', 'r', 'G', 'l'], 'G')
|
def assemble(self): # No errors have been seen so far nerr = 0 # Build up a dictionary of all links found, with the site as key. # Each value will be a list. mysite = wikipedia.getSite() new = {} for pl in self.done.keys(): site = pl.site() if site == mysite and pl.exists() and not pl.isRedirectPage(): if pl != self.inpl: self.problem("Found link to %s" % pl.aslink() ) self.whereReport(pl) nerr += 1 elif pl.exists() and not pl.isRedirectPage(): if site in new: new[site].append(pl) else: new[site] = [pl] # See if new{} contains any problematic values result = {} for k, v in new.items(): if len(v) > 1: nerr += 1 self.problem("Found more than one link for %s"%k) if nerr == 0 and len( self.foundin[self.inpl] ) == 0 and len(new) != 0: self.problem(u'None of %i other languages refers back to %s' % (len(new), self.inpl.aslink())) # If there are any errors, we need to go through all # items manually. if nerr > 0:
|
answer = wikipedia.input(u'Submit? [y|N]')
|
answer = wikipedia.inputChoice(u'Submit?', ['Yes', 'No'], ['y', 'N'], 'N')
|
def finish(self, sa = None): """Round up the subject, making any necessary changes. This method should be called exactly once after the todo list has gone empty.
|
start = wikipedia.input(u'Which page to start from: ', wikipedia.myencoding())
|
start = wikipedia.input(u'Which page to start from: ')
|
def readWarnfile(filename, sa): import warnfile reader = warnfile.WarnfileReader(filename) # we won't use removeHints (hints, removeHints) = reader.getHints() for pagename in hints.iterkeys(): pl = wikipedia.Page(wikipedia.getSite(), pagename) # The WarnfileReader gives us a list of pagelinks, but titletranslate.py expects a list of strings, so we convert it back. # TODO: This is a quite ugly hack, in the future we should maybe make titletranslate expect a list of pagelinks. hintStrings = [] for hint in hints[pagename]: #lang = hintStrings.append('%s:%s' % (hint.site().language(), hint.linkname())) sa.add(pl, hints = hintStrings)
|
inname = wikipedia.input(u'Which page to check: ', wikipedia.myencoding())
|
inname = wikipedia.input(u'Which page to check: ')
|
def readWarnfile(filename, sa): import warnfile reader = warnfile.WarnfileReader(filename) # we won't use removeHints (hints, removeHints) = reader.getHints() for pagename in hints.iterkeys(): pl = wikipedia.Page(wikipedia.getSite(), pagename) # The WarnfileReader gives us a list of pagelinks, but titletranslate.py expects a list of strings, so we convert it back. # TODO: This is a quite ugly hack, in the future we should maybe make titletranslate expect a list of pagelinks. hintStrings = [] for hint in hints[pagename]: #lang = hintStrings.append('%s:%s' % (hint.site().language(), hint.linkname())) sa.add(pl, hints = hintStrings)
|
print repr(s),repr(s2[:100])
|
def treesearch(pl): arr = {pl:None} # First make one step based on the language itself try: n = treestep(arr, pl, abort_on_redirect = 1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n == 0 and not arr[pl]: print "Mother doesn't exist" return if untranslated: if len(arr) > 1: print "Already has translations" else: if bell: sys.stdout.write('\07') newhint = raw_input("Hint:") if not newhint: return hints.append(newhint) # Then add translations if we survived. autotranslate(pl, arr, same = same) modifications = 1 while modifications: modifications = 0 for newpl in arr.keys(): if arr[newpl] is None: modifications += treestep(arr, newpl) return arr
|
|
'ru' : [u'disambig'],
|
'ru' : [u'disambig',u'значения'],
|
def __init__(self):
|
namespaces.append(namespace_title.lower())
|
if namespace_title != namespace_title.lower(): namespaces.append(namespace_title.lower())
|
def category_namespaces(self, code): namespaces = [] namespace_title = self.namespace(code, 14) namespaces.append(namespace_title) namespaces.append(namespace_title.lower()) default_namespace_title = self.namespace('_default', 14) if namespace_title != default_namespace_title: namespaces.append(default_namespace_title) namespaces.append(default_namespace_title.lower()) return namespaces
|
namespaces.append(default_namespace_title.lower())
|
if default_namespace_title != default_namespace_title.lower(): namespaces.append(default_namespace_title.lower())
|
def category_namespaces(self, code): namespaces = [] namespace_title = self.namespace(code, 14) namespaces.append(namespace_title) namespaces.append(namespace_title.lower()) default_namespace_title = self.namespace('_default', 14) if namespace_title != default_namespace_title: namespaces.append(default_namespace_title) namespaces.append(default_namespace_title.lower()) return namespaces
|
for original_cat in article.categories(): wikipedia.output('* %s' % original_cat.linkname())
|
for cat in article.categories(): wikipedia.output('* %s' % cat.linkname())
|
def move_to_category(article, original_cat, current_cat): print wikipedia.output(u'Treating page %s, currently in category %s' % (article.linkname(), current_cat.linkname())) subcatlist = get_subcats(current_cat) supercatlist = get_supercats(current_cat) print if len(subcatlist) == 0: print 'This category has no subcategories.' print if len(supercatlist) == 0: print 'This category has no supercategories.' print # show subcategories as possible choices (with numbers) for i in range(len(supercatlist)): # layout: we don't expect a cat to have more than 10 supercats print 'u%d - Move up to %s' % (i, supercatlist[i].linkname()) for i in range(len(subcatlist)): # layout: we don't expect a cat to have more than 100 subcats print '%2d - Move down to %s' % (i, subcatlist[i].linkname()) print ' j - Jump to another category' print ' n - Skip this article' print ' r - Remove this category tag' print ' ? - Read the page' wikipedia.output(u'Enter - Save category as %s' % current_cat.linkname())
|
except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg
|
def check(self, useHEAD = True): """ Returns True and the server status message if the page is alive. Otherwise returns false """ try: wasRedirected = self.resolveRedirect(useHEAD = useHEAD) except httplib.error, arg: return False, u'HTTP Error: %s' % arg except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg if wasRedirected: if self.url in self.redirectChain: if useHEAD: # Some servers don't seem to handle HEAD requests properly, # which leads to a cyclic list of redirects. # We simply start from the beginning, but this time, # we don't use HEAD, but GET requests. redirChecker = LinkChecker(self.redirectChain[0]) return redirChecker.check(useHEAD = False) else: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url]) elif len(self.redirectChain) >= 19: if useHEAD: # Some servers don't seem to handle HEAD requests properly, # which leads to a long (or infinite) list of redirects. # We simply start from the beginning, but this time, # we don't use HEAD, but GET requests. redirChecker = LinkChecker(self.redirectChain[0]) return redirChecker.check(useHEAD = False) else: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url]) else: redirChecker = LinkChecker(self.url, self.redirectChain) return redirChecker.check(useHEAD = useHEAD) else: try: if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) except httplib.error, arg: return False, u'HTTP Error: %s' % arg try: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg try: response = conn.getresponse() except Exception, arg: return False, u'Error: %s' % arg #wikipedia.output('%s: %s' % (self.url, response.status)) # site down if the server status is between 400 and 499 siteDown = response.status in range(400, 500) return not siteDown, '%s %s' % (response.status, response.reason)
|
|
notInside = '\]\s<>}"'
|
def checkLinksIn(self, page): try: text = page.get() except wikipedia.NoPage: wikipedia.output(u'%s does not exist.' % page.title()) return # RFC 2396 says that URLs may only contain certain characters. # For this regex we also accept non-allowed characters, so that the bot # will later show these links as broken ('Non-ASCII Characters in URL'). # Note: While allowing parenthesis inside URLs, MediaWiki will regard # right parenthesis at the end of the URL as not part of that URL. # The same applies to dot, comma, colon and some other characters. # So characters inside the URL can be anything except whitespace, # closing squared brackets, quotation marks, greater than and less # than, and the last character also can't be parenthesis or another # character disallowed by MediaWiki. # MediaWiki allows closing curly braces inside links, but such braces # often come from templates where URLs are parameters, so as a # workaround we won't allow them inside links here. The same is true # for the vertical bar. # The first half of this regular expression is required because '' is # not allowed inside links. linkR = re.compile(r'http[s]?://[^\]\s<>}"]*?[^\]\s\)\.:;,<>}\|"](?=\'\')|http[s]?://[^\]\s<>}"]*[^\]\s\)\.:;,<>}"\|]') # Remove HTML comments in URLs as well as URLs in HTML comments. # Also remove text inside nowiki links text = re.sub('(?s)<nowiki>.*?</nowiki>|<!--.*?-->', '', text) urls = linkR.findall(text) for url in urls: ignoreUrl = False for ignoreR in ignorelist: if ignoreR.match(url): ignoreUrl = True if not ignoreUrl: # Limit the number of threads started at the same time. Each # thread will check one page, then die. while threading.activeCount() >= config.max_external_links: # wait 100 ms time.sleep(0.1) thread = LinkCheckThread(page, url, self.history) # thread dies when program terminates thread.setDaemon(True) thread.start()
|
|
linkR = re.compile(r'http[s]?://[^\]\s<>}"]*?[^\]\s\)\.:;,<>}\|"](?=\'\')|http[s]?://[^\]\s<>}"]*[^\]\s\)\.:;,<>}"\|]')
|
linkR = re.compile(r'http[s]?://[^' + notInside + ']*?[^' + notAtEnd + '](?=[' + notAtEnd+ ']*\'\')|http[s]?://[^' + notInside + ']*[^' + notAtEnd + ']')
|
def checkLinksIn(self, page): try: text = page.get() except wikipedia.NoPage: wikipedia.output(u'%s does not exist.' % page.title()) return # RFC 2396 says that URLs may only contain certain characters. # For this regex we also accept non-allowed characters, so that the bot # will later show these links as broken ('Non-ASCII Characters in URL'). # Note: While allowing parenthesis inside URLs, MediaWiki will regard # right parenthesis at the end of the URL as not part of that URL. # The same applies to dot, comma, colon and some other characters. # So characters inside the URL can be anything except whitespace, # closing squared brackets, quotation marks, greater than and less # than, and the last character also can't be parenthesis or another # character disallowed by MediaWiki. # MediaWiki allows closing curly braces inside links, but such braces # often come from templates where URLs are parameters, so as a # workaround we won't allow them inside links here. The same is true # for the vertical bar. # The first half of this regular expression is required because '' is # not allowed inside links. linkR = re.compile(r'http[s]?://[^\]\s<>}"]*?[^\]\s\)\.:;,<>}\|"](?=\'\')|http[s]?://[^\]\s<>}"]*[^\]\s\)\.:;,<>}"\|]') # Remove HTML comments in URLs as well as URLs in HTML comments. # Also remove text inside nowiki links text = re.sub('(?s)<nowiki>.*?</nowiki>|<!--.*?-->', '', text) urls = linkR.findall(text) for url in urls: ignoreUrl = False for ignoreR in ignorelist: if ignoreR.match(url): ignoreUrl = True if not ignoreUrl: # Limit the number of threads started at the same time. Each # thread will check one page, then die. while threading.activeCount() >= config.max_external_links: # wait 100 ms time.sleep(0.1) thread = LinkCheckThread(page, url, self.history) # thread dies when program terminates thread.setDaemon(True) thread.start()
|
elif not xpl in shouldlink:
|
if not xpl in shouldlink:
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" for site in new.keys(): pl = new[site] if not unequal.bigger(self.inpl, pl) and not pl.hashname(): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.site() == xpl.site(): wikipedia.output(u"WARNING: %s does not link to %s but to %s" % (pl.asselflink(), xpl.aslink(None), l.aslink(None))) break else: wikipedia.output(u"WARNING: %s does not link to %s" % (pl.asselflink(), xpl.aslink(None))) # Check for superfluous links for xpl in linked: elif not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpl.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s links to incorrect %s" % (pl.asselflink(), xpl.aslink(None)))
|
'af': u'%d v.Chr.',
|
'af':u'%d v.C.',
|
def __call__(self, m, d): import wikipedia return wikipedia.html2unicode((date_format[m][self.site.lang]) % d, site = self.site)
|
'he': u'%d &
|
'he':u'%d לפנה"ס',
|
def __call__(self, m, d): import wikipedia return wikipedia.html2unicode((date_format[m][self.site.lang]) % d, site = self.site)
|
'ja':'%d& 'zh':'%d& 'ko':'%d& 'minnan':'%d nî', 'ur':'%d&
|
'ja':u'%d年', 'zh':u'%d年', 'ko':u'%d년', 'minnan':u'%d nî', 'ur':u'%dسبم'
|
def __call__(self, m, d): import wikipedia return wikipedia.html2unicode((date_format[m][self.site.lang]) % d, site = self.site)
|
self.todo = []
|
self.todo = {}
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.code() # Register this fact at the todo-counter. counter.minus(pl.code()) # Assume it's not a redirect isredirect = 0 # Now check whether any interwiki links should be added to the # todo list. if unequal.bigger(self.inpl, pl): print "NOTE: %s is bigger than %s, not following references" % (pl, self.inpl) else: try: iw = pl.interwiki() except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(pl.code(),arg.args[0]) print "NOTE: %s is redirect to %s" % (pl.asasciilink(), pl3.asasciilink()) if pl == self.inpl: # This is a redirect page itself. We don't need to # follow the redirection. isredirect = 1 # In this case we can also stop all hints! for pl2 in self.todo: counter.minus(pl2.code()) self.todo = {} pass elif not globalvar.followredirect: print "NOTE: not following redirects." elif unequal.unequal(self.inpl, pl3): print "NOTE: %s is unequal to %s, not adding it" % (pl3, self.inpl) else: if self.conditionalAdd(pl3, counter): if globalvar.shownew: print "%s: %s gives new redirect %s"% (self.inpl.asasciiselflink(), pl.asasciilink(), pl3.asasciilink()) except wikipedia.NoPage: print "NOTE: %s does not exist" % pl.asasciilink() if pl == self.inpl: # This is the home subject page. # In this case we can stop all hints! for pl2 in self.todo: counter.minus(pl2.code()) self.todo = [] pass except wikipedia.SubpageError: print "NOTE: %s subpage does not exist" % pl.asasciilink() else: if self.inpl == pl: self.untranslated = (len(iw) == 0) if globalvar.untranslatedonly: # Ignore the interwiki links. iw = () for pl2 in iw: if unequal.unequal(self.inpl, pl2): print "NOTE: %s is unequal to %s, not adding it" % (pl2, self.inpl) else: if self.conditionalAdd(pl2, counter): if globalvar.shownew: print "%s: %s gives new interwiki %s"% (self.inpl.asasciiselflink(), pl.asasciilink(), pl2.asasciilink()) # These pages are no longer 'in progress' del self.pending # Check whether we need hints and the user offered to give them if self.untranslated and not self.hintsasked: print "NOTE: %s does not have any interwiki links" % self.inpl.asasciilink() # Only once! self.hintsasked = True if globalvar.untranslated: if globalvar.bell: sys.stdout.write('\07') newhint = None while 1: newhint = raw_input("Hint:") if newhint and not ':' in newhint: print "Please enter a hint like language:pagename" #print "or type 'q' to stop generating new pages" print "or type nothing if you do not have a hint" elif not newhint: break else: arr = {} import titletranslate titletranslate.translate(pl, arr, same = False, hints = [newhint]) for pl in arr.iterkeys(): self.todo[pl] = pl.code() counter.plus(pl.code())
|
if globalvar.bell: sys.stdout.write('\07')
|
def assemble(self, returnonquestion = False, askall = False): if globalvar.bell: sys.stdout.write('\07') new = {} for pl in self.done.keys(): code = pl.code() if code == wikipedia.mylang and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty(): if pl != self.inpl: if returnonquestion: return None self.problem('Someone refers to %s with us' % pl.asasciilink()) if globalvar.autonomous: return None elif pl.exists() and not pl.isRedirectPage(): if new.has_key(code) and new[code] is None: print "NOTE: Ignoring %s"%(pl.asasciilink()) elif new.has_key(code) and new[code] != pl: if returnonquestion: return None self.problem("'%s' as well as '%s'" % (new[code].asasciilink(), pl.asasciilink())) if globalvar.autonomous: return None while 1: answer = raw_input("Use (f)ormer or (l)atter or (n)either or (g)ive up?") if answer.startswith('f'): break elif answer.startswith('l'): new[pl.code()] = pl break elif answer.startswith('n'): new[pl.code()] = None break elif answer.startswith('g'): # Give up return None elif code in ('zh-tw','zh-cn') and new.has_key('zh') and new['zh'] is not None: print "NOTE: Ignoring %s, using %s"%(new['zh'].asasciilink(),pl.asasciilink()) if self.ask(askall, pl): new['zh'] = None # Remove the global zh link new[code] = pl # Add the more precise one elif code == 'zh' and ( (new.has_key('zh-tw') and new['zh-tw'] is not None) or (new.has_key('zh-cn') and new['zh-cn'] is not None)): print "NOTE: Ignoring %s"%(pl.asasciilink()) pass # do not add global zh if there is a specific zh-tw or zh-cn elif code not in new: if self.ask(askall, pl): new[code] = pl
|
|
newfn = wikipedia.input(u'Better name:')
|
while True: newfn = wikipedia.input(u'Better name:') if '/' in newfn: print "Invalid character: '/'. Please try again" else: break
|
def get_image(original_url, source_wiki, original_description, keep=False, debug=False): # work with a copy of argument variables so we can reuse the # original ones if the upload fails fn = original_url description = original_description # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] # convert ISO 8859-1 to Unicode, or parse UTF-8. If source_wiki is None, # the filename is already in Unicode. if source_wiki != None: try: fn = unicode(fn, wikipedia.code2encoding(source_wiki)) except TypeError: print 'Type error in lib_images.py. This should not happen. Please report this problem.' pass if not keep: print "The filename on wikipedia will default to:", fn newfn = wikipedia.input(u'Better name:') if newfn != '': fn = newfn # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki fn = fn.encode(wikipedia.myencoding()) # A proper description for the submission. if description=='': description = wikipedia.input(u'Give a description for the image:') else: print ("The suggested description is:") print print wikipedia.output(description) print print ("Enter return to use this description, enter a text to add something") print ("at the end, or enter = followed by a text to replace the description.") newtext = wikipedia.input(u'Enter return, text or =text : ') if newtext=='': pass elif newtext[0]=='=': description=newtext[1:] else: description=description+' '+newtext # try to encode the description to the encoding used by the home Wikipedia. # if that's not possible (e.g. because there are non-Latin-1 characters and # the home Wikipedia uses Latin-1), convert all non-ASCII characters to # HTML entities. try: description = description.encode(wikipedia.myencoding()) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) except UnicodeDecodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) # don't upload if we're in debug mode if not debug: # WARNING: broken for Wikipedia 1.4 (test.wikipedia.org) returned_html = post_multipart(wikipedia.family.hostname(wikipedia.mylang), wikipedia.family.upload_address(wikipedia.mylang), (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpIgnoreWarning', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) # do we know how the "success!" HTML page should look like? success_msg = mediawiki_messages.get('successfulupload') success_msgR = re.compile(re.escape(success_msg)) if success_msgR.search(returned_html): print "Upload successful." else: # dump the HTML page print returned_html + "\n\n" answer = raw_input(u"Upload of " + fn + " failed. Above you see the HTML page which was returned by MediaWiki. Try again? [y|N]") if answer in ["y", "Y"]: return get_image(original_url, source_wiki, original_description, debug) else: return return fn
|
print wikipedia.output(description)
|
wikipedia.output(description)
|
def get_image(original_url, source_wiki, original_description, keep=False, debug=False): # work with a copy of argument variables so we can reuse the # original ones if the upload fails fn = original_url description = original_description # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] # convert ISO 8859-1 to Unicode, or parse UTF-8. If source_wiki is None, # the filename is already in Unicode. if source_wiki != None: try: fn = unicode(fn, wikipedia.code2encoding(source_wiki)) except TypeError: print 'Type error in lib_images.py. This should not happen. Please report this problem.' pass if not keep: print "The filename on wikipedia will default to:", fn newfn = wikipedia.input(u'Better name:') if newfn != '': fn = newfn # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki fn = fn.encode(wikipedia.myencoding()) # A proper description for the submission. if description=='': description = wikipedia.input(u'Give a description for the image:') else: print ("The suggested description is:") print print wikipedia.output(description) print print ("Enter return to use this description, enter a text to add something") print ("at the end, or enter = followed by a text to replace the description.") newtext = wikipedia.input(u'Enter return, text or =text : ') if newtext=='': pass elif newtext[0]=='=': description=newtext[1:] else: description=description+' '+newtext # try to encode the description to the encoding used by the home Wikipedia. # if that's not possible (e.g. because there are non-Latin-1 characters and # the home Wikipedia uses Latin-1), convert all non-ASCII characters to # HTML entities. try: description = description.encode(wikipedia.myencoding()) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) except UnicodeDecodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) # don't upload if we're in debug mode if not debug: # WARNING: broken for Wikipedia 1.4 (test.wikipedia.org) returned_html = post_multipart(wikipedia.family.hostname(wikipedia.mylang), wikipedia.family.upload_address(wikipedia.mylang), (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpIgnoreWarning', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) # do we know how the "success!" HTML page should look like? success_msg = mediawiki_messages.get('successfulupload') success_msgR = re.compile(re.escape(success_msg)) if success_msgR.search(returned_html): print "Upload successful." else: # dump the HTML page print returned_html + "\n\n" answer = raw_input(u"Upload of " + fn + " failed. Above you see the HTML page which was returned by MediaWiki. Try again? [y|N]") if answer in ["y", "Y"]: return get_image(original_url, source_wiki, original_description, debug) else: return return fn
|
formdata = {} formdata["wpUploadDescription"] = description if wikipedia.version() >= '1.4': formdata["wpUploadCopyStatus"] = wikipedia.input(u"Copyright status: ") formdata["wpUploadSource"] = wikipedia.input(u"Source of image: ") else: formdata["wpUploadAffirm"] = "1" formdata["wpUpload"] = "upload bestand" formdata["wpIgnoreWarning"] = "1"
|
def get_image(original_url, source_wiki, original_description, keep=False, debug=False): # work with a copy of argument variables so we can reuse the # original ones if the upload fails fn = original_url description = original_description # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] # convert ISO 8859-1 to Unicode, or parse UTF-8. If source_wiki is None, # the filename is already in Unicode. if source_wiki != None: try: fn = unicode(fn, wikipedia.code2encoding(source_wiki)) except TypeError: print 'Type error in lib_images.py. This should not happen. Please report this problem.' pass if not keep: print "The filename on wikipedia will default to:", fn newfn = wikipedia.input(u'Better name:') if newfn != '': fn = newfn # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki fn = fn.encode(wikipedia.myencoding()) # A proper description for the submission. if description=='': description = wikipedia.input(u'Give a description for the image:') else: print ("The suggested description is:") print print wikipedia.output(description) print print ("Enter return to use this description, enter a text to add something") print ("at the end, or enter = followed by a text to replace the description.") newtext = wikipedia.input(u'Enter return, text or =text : ') if newtext=='': pass elif newtext[0]=='=': description=newtext[1:] else: description=description+' '+newtext # try to encode the description to the encoding used by the home Wikipedia. # if that's not possible (e.g. because there are non-Latin-1 characters and # the home Wikipedia uses Latin-1), convert all non-ASCII characters to # HTML entities. try: description = description.encode(wikipedia.myencoding()) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) except UnicodeDecodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) # don't upload if we're in debug mode if not debug: # WARNING: broken for Wikipedia 1.4 (test.wikipedia.org) returned_html = post_multipart(wikipedia.family.hostname(wikipedia.mylang), wikipedia.family.upload_address(wikipedia.mylang), (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpIgnoreWarning', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) # do we know how the "success!" HTML page should look like? success_msg = mediawiki_messages.get('successfulupload') success_msgR = re.compile(re.escape(success_msg)) if success_msgR.search(returned_html): print "Upload successful." else: # dump the HTML page print returned_html + "\n\n" answer = raw_input(u"Upload of " + fn + " failed. Above you see the HTML page which was returned by MediaWiki. Try again? [y|N]") if answer in ["y", "Y"]: return get_image(original_url, source_wiki, original_description, debug) else: return return fn
|
|
try: description = description.encode(wikipedia.myencoding()) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) except UnicodeDecodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding())
|
for key in formdata: assert isinstance(key, basestring), "ERROR: %s is not a string but %s" % (key, type(key)) try: formdata[key] = formdata[key].encode(wikipedia.myencoding()) except (UnicodeEncodeError, UnicodeDecodeError): formdata[key] = wikipedia.UnicodeToAsciiHtml(formdata[key]).encode(wikipedia.myencoding())
|
def get_image(original_url, source_wiki, original_description, keep=False, debug=False): # work with a copy of argument variables so we can reuse the # original ones if the upload fails fn = original_url description = original_description # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] # convert ISO 8859-1 to Unicode, or parse UTF-8. If source_wiki is None, # the filename is already in Unicode. if source_wiki != None: try: fn = unicode(fn, wikipedia.code2encoding(source_wiki)) except TypeError: print 'Type error in lib_images.py. This should not happen. Please report this problem.' pass if not keep: print "The filename on wikipedia will default to:", fn newfn = wikipedia.input(u'Better name:') if newfn != '': fn = newfn # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki fn = fn.encode(wikipedia.myencoding()) # A proper description for the submission. if description=='': description = wikipedia.input(u'Give a description for the image:') else: print ("The suggested description is:") print print wikipedia.output(description) print print ("Enter return to use this description, enter a text to add something") print ("at the end, or enter = followed by a text to replace the description.") newtext = wikipedia.input(u'Enter return, text or =text : ') if newtext=='': pass elif newtext[0]=='=': description=newtext[1:] else: description=description+' '+newtext # try to encode the description to the encoding used by the home Wikipedia. # if that's not possible (e.g. because there are non-Latin-1 characters and # the home Wikipedia uses Latin-1), convert all non-ASCII characters to # HTML entities. try: description = description.encode(wikipedia.myencoding()) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) except UnicodeDecodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) # don't upload if we're in debug mode if not debug: # WARNING: broken for Wikipedia 1.4 (test.wikipedia.org) returned_html = post_multipart(wikipedia.family.hostname(wikipedia.mylang), wikipedia.family.upload_address(wikipedia.mylang), (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpIgnoreWarning', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) # do we know how the "success!" HTML page should look like? success_msg = mediawiki_messages.get('successfulupload') success_msgR = re.compile(re.escape(success_msg)) if success_msgR.search(returned_html): print "Upload successful." else: # dump the HTML page print returned_html + "\n\n" answer = raw_input(u"Upload of " + fn + " failed. Above you see the HTML page which was returned by MediaWiki. Try again? [y|N]") if answer in ["y", "Y"]: return get_image(original_url, source_wiki, original_description, debug) else: return return fn
|
(('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpIgnoreWarning', '1'), ('wpUpload','upload bestand')),
|
formdata.items(),
|
def get_image(original_url, source_wiki, original_description, keep=False, debug=False): # work with a copy of argument variables so we can reuse the # original ones if the upload fails fn = original_url description = original_description # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] # convert ISO 8859-1 to Unicode, or parse UTF-8. If source_wiki is None, # the filename is already in Unicode. if source_wiki != None: try: fn = unicode(fn, wikipedia.code2encoding(source_wiki)) except TypeError: print 'Type error in lib_images.py. This should not happen. Please report this problem.' pass if not keep: print "The filename on wikipedia will default to:", fn newfn = wikipedia.input(u'Better name:') if newfn != '': fn = newfn # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki fn = fn.encode(wikipedia.myencoding()) # A proper description for the submission. if description=='': description = wikipedia.input(u'Give a description for the image:') else: print ("The suggested description is:") print print wikipedia.output(description) print print ("Enter return to use this description, enter a text to add something") print ("at the end, or enter = followed by a text to replace the description.") newtext = wikipedia.input(u'Enter return, text or =text : ') if newtext=='': pass elif newtext[0]=='=': description=newtext[1:] else: description=description+' '+newtext # try to encode the description to the encoding used by the home Wikipedia. # if that's not possible (e.g. because there are non-Latin-1 characters and # the home Wikipedia uses Latin-1), convert all non-ASCII characters to # HTML entities. try: description = description.encode(wikipedia.myencoding()) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) except UnicodeDecodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.myencoding()) # don't upload if we're in debug mode if not debug: # WARNING: broken for Wikipedia 1.4 (test.wikipedia.org) returned_html = post_multipart(wikipedia.family.hostname(wikipedia.mylang), wikipedia.family.upload_address(wikipedia.mylang), (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpIgnoreWarning', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) # do we know how the "success!" HTML page should look like? success_msg = mediawiki_messages.get('successfulupload') success_msgR = re.compile(re.escape(success_msg)) if success_msgR.search(returned_html): print "Upload successful." else: # dump the HTML page print returned_html + "\n\n" answer = raw_input(u"Upload of " + fn + " failed. Above you see the HTML page which was returned by MediaWiki. Try again? [y|N]") if answer in ["y", "Y"]: return get_image(original_url, source_wiki, original_description, debug) else: return return fn
|
print repr(data)
|
def getData(self): import httplib try: addr = self.addr%special[self.code] except KeyError: print "BUG: Can not find name of Special in %s:" % self.code raise pagenames = u'\r\n'.join([x.hashfreeLinkname() for x in self.pages]) pagenames = forCode(pagenames, self.code) data = urlencode(( ('action', 'submit'), ('pages', pagenames), ('curonly', 'True'), )) print repr(data) headers = {"Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0"} # Slow ourselves down get_throttle(requestsize = len(self.pages)) # Now make the actual request to the server conn = httplib.HTTPConnection(langs[self.code]) conn.request("POST", addr, data, headers) response = conn.getresponse() data = response.read() conn.close() return data
|
|
def check(self):
|
def check(self, useHEAD = True):
|
def check(self): """ Returns True and the server status message if the page is alive. Otherwise returns false """ try: wasRedirected = self.resolveRedirect() except httplib.error, arg: return False, u'HTTP Error: %s' % arg except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg if wasRedirected: #print "NEW TARGET:", self.url, '\n' if self.url in self.redirectChain: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url]) elif len(self.redirectChain) >= 19: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url]) else: redirChecker = LinkChecker(self.url, self.redirectChain) return redirChecker.check() else: try: if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) except httplib.error, arg: return False, u'HTTP Error: %s' % arg try: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg try: response = conn.getresponse() except Exception, arg: return False, u'Error: %s' % arg #wikipedia.output('%s: %s' % (self.url, response.status)) # site down if the server status is between 400 and 499 siteDown = response.status in range(400, 500) return not siteDown, '%s %s' % (response.status, response.reason)
|
wasRedirected = self.resolveRedirect()
|
wasRedirected = self.resolveRedirect(useHEAD = useHEAD)
|
def check(self): """ Returns True and the server status message if the page is alive. Otherwise returns false """ try: wasRedirected = self.resolveRedirect() except httplib.error, arg: return False, u'HTTP Error: %s' % arg except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg if wasRedirected: #print "NEW TARGET:", self.url, '\n' if self.url in self.redirectChain: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url]) elif len(self.redirectChain) >= 19: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url]) else: redirChecker = LinkChecker(self.url, self.redirectChain) return redirChecker.check() else: try: if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) except httplib.error, arg: return False, u'HTTP Error: %s' % arg try: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg try: response = conn.getresponse() except Exception, arg: return False, u'Error: %s' % arg #wikipedia.output('%s: %s' % (self.url, response.status)) # site down if the server status is between 400 and 499 siteDown = response.status in range(400, 500) return not siteDown, '%s %s' % (response.status, response.reason)
|
return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url])
|
if useHEAD: redirChecker = LinkChecker(self.redirectChain[0]) return redirChecker.check(useHEAD = False) else: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url])
|
def check(self): """ Returns True and the server status message if the page is alive. Otherwise returns false """ try: wasRedirected = self.resolveRedirect() except httplib.error, arg: return False, u'HTTP Error: %s' % arg except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg if wasRedirected: #print "NEW TARGET:", self.url, '\n' if self.url in self.redirectChain: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url]) elif len(self.redirectChain) >= 19: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url]) else: redirChecker = LinkChecker(self.url, self.redirectChain) return redirChecker.check() else: try: if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) except httplib.error, arg: return False, u'HTTP Error: %s' % arg try: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg try: response = conn.getresponse() except Exception, arg: return False, u'Error: %s' % arg #wikipedia.output('%s: %s' % (self.url, response.status)) # site down if the server status is between 400 and 499 siteDown = response.status in range(400, 500) return not siteDown, '%s %s' % (response.status, response.reason)
|
return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url])
|
if useHEAD: redirChecker = LinkChecker(self.redirectChain[0]) return redirChecker.check(useHEAD = False) else: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url])
|
def check(self): """ Returns True and the server status message if the page is alive. Otherwise returns false """ try: wasRedirected = self.resolveRedirect() except httplib.error, arg: return False, u'HTTP Error: %s' % arg except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg if wasRedirected: #print "NEW TARGET:", self.url, '\n' if self.url in self.redirectChain: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url]) elif len(self.redirectChain) >= 19: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url]) else: redirChecker = LinkChecker(self.url, self.redirectChain) return redirChecker.check() else: try: if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) except httplib.error, arg: return False, u'HTTP Error: %s' % arg try: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg try: response = conn.getresponse() except Exception, arg: return False, u'Error: %s' % arg #wikipedia.output('%s: %s' % (self.url, response.status)) # site down if the server status is between 400 and 499 siteDown = response.status in range(400, 500) return not siteDown, '%s %s' % (response.status, response.reason)
|
return redirChecker.check()
|
return redirChecker.check(useHEAD = useHEAD)
|
def check(self): """ Returns True and the server status message if the page is alive. Otherwise returns false """ try: wasRedirected = self.resolveRedirect() except httplib.error, arg: return False, u'HTTP Error: %s' % arg except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg if wasRedirected: #print "NEW TARGET:", self.url, '\n' if self.url in self.redirectChain: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url]) elif len(self.redirectChain) >= 19: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url]) else: redirChecker = LinkChecker(self.url, self.redirectChain) return redirChecker.check() else: try: if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) except httplib.error, arg: return False, u'HTTP Error: %s' % arg try: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg try: response = conn.getresponse() except Exception, arg: return False, u'Error: %s' % arg #wikipedia.output('%s: %s' % (self.url, response.status)) # site down if the server status is between 400 and 499 siteDown = response.status in range(400, 500) return not siteDown, '%s %s' % (response.status, response.reason)
|
cat_namespace = wikipedia.getSite().category_namespace()
|
cat_namespace = wikipedia.getSite().category_namespaces()[0]
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.PageLink(wikipedia.getSite(), listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.PageLink(wikipedia.getSit(), refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_add) % newcat) cat_namespace = wikipedia.getSite().category_namespace() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.getSite(), nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.getSite(),arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories: %s" % cats) catpl = wikipedia.PageLink(wikipedia.getSite(), cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
wikipedia.output(u"Current categories: %s" % cats)
|
wikipedia.output(u"Current categories:") for curpl in cats: wikipedia.output(u"* %s" % cat.aslink())
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.PageLink(wikipedia.getSite(), listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.PageLink(wikipedia.getSit(), refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_add) % newcat) cat_namespace = wikipedia.getSite().category_namespace() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.getSite(), nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.getSite(),arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories: %s" % cats) catpl = wikipedia.PageLink(wikipedia.getSite(), cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
nn=wikipedia.url2link(wikipedia.link2url(old[code]))
|
nn=wikipedia.url2link(wikipedia.link2url(new[code]))
|
def compareLanguages(old,new): removing=[] adding=[] modifying=[] for code,name in old.iteritems(): if not new.has_key(code): removing.append(code) elif old[code]!=new[code]: oo=wikipedia.url2link(wikipedia.link2url(old[code])) nn=wikipedia.url2link(wikipedia.link2url(old[code])) if oo!=nn: modifying.append(code) for code,name in new.iteritems(): if not old.has_key(code): adding.append(code) s="" if adding: s=s+" Adding:"+",".join(adding) if removing: s=s+" Removing:"+",".join(removing) if modifying: s=s+" Modifying:"+",".join(modifying) return s
|
s=compareLanguages(old,new) if not s and only_if_status:
|
mods=compareLanguages(old,new) if not mods and only_if_status:
|
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
|
print s
|
print mods
|
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
|
status,reason,data=wikipedia.putPage(mylang,inname,newtext)
|
status,reason,data=wikipedia.putPage(mylang,inname,newtext,comment='Rob Hooft: robot '+mods)
|
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
|
Page(self, "Non-existing page").get(force = True, sysop = sysop)
|
Page(self, "Wikipedia:Sandbox").get(force = True, sysop = sysop)
|
def getToken(self, getalways = True, getagain = False, sysop = False): if getagain or (getalways and ((sysop and not self._sysoptoken) or (not sysop and not self._token))): output(u"Getting page to get a token.") try: Page(self, "Non-existing page").get(force = True, sysop = sysop) except Error: pass if sysop: if not self._sysoptoken: return False else: return self._sysoptoken else: if not self._token: return False else: return self._token
|
sysop = (self.editRestriction is not None)
|
sysop = (not self.editRestriction)
|
def put(self, newtext, comment=None, watchArticle = None, minorEdit = True): """Replace the new page with the contents of the first argument. The second argument is a string that is to be used as the summary for the modification
|
catlib.change_category(article, original_cat, current_cat.titleWithoutNamespace())
|
catlib.change_category(article, original_cat, current_cat)
|
def move_to_category(self, article, original_cat, current_cat): ''' Given an article which is in category original_cat, ask the user if it should be moved to one of original_cat's subcategories. Recursively run through subcategories' subcategories. NOTE: current_cat is only used for internal recursion. You should always use current_cat = original_cat. ''' print wikipedia.output(u'Treating page %s, currently in category %s' % (article.title(), current_cat.title())) subcatlist = self.catDB.getSubcats(current_cat) supercatlist = self.catDB.getSupercats(current_cat) print if len(subcatlist) == 0: print 'This category has no subcategories.' print if len(supercatlist) == 0: print 'This category has no supercategories.' print # show subcategories as possible choices (with numbers) for i in range(len(supercatlist)): # layout: we don't expect a cat to have more than 10 supercats wikipedia.output(u'u%d - Move up to %s' % (i, supercatlist[i].title())) for i in range(len(subcatlist)): # layout: we don't expect a cat to have more than 100 subcats wikipedia.output(u'%2d - Move down to %s' % (i, subcatlist[i].title())) print ' j - Jump to another category' print ' n - Skip this article' print ' r - Remove this category tag' print ' ? - Read the page' wikipedia.output(u'Enter - Save category as %s' % current_cat.title())
|
catlink = catlib.Category(wikipedia.getSite(), 'Category:' + self.catTitle)
|
cat = catlib.Category(wikipedia.getSite(), 'Category:' + self.catTitle)
|
def run(self): catlink = catlib.Category(wikipedia.getSite(), 'Category:' + self.catTitle) # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_change) % self.catTitle) articles = catlink.articles(recurse = 0) if len(articles) == 0: wikipedia.output(u'There are no articles in category ' + catTitle) else: for article in articles: print print '===================================================================' self.move_to_category(article, catlink, catlink)
|
wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_change) % self.catTitle) articles = catlink.articles(recurse = 0)
|
wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_change) % cat.title()) articles = cat.articles(recurse = 0)
|
def run(self): catlink = catlib.Category(wikipedia.getSite(), 'Category:' + self.catTitle) # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_change) % self.catTitle) articles = catlink.articles(recurse = 0) if len(articles) == 0: wikipedia.output(u'There are no articles in category ' + catTitle) else: for article in articles: print print '===================================================================' self.move_to_category(article, catlink, catlink)
|
self.move_to_category(article, catlink, catlink)
|
self.move_to_category(article, cat, cat)
|
def run(self): catlink = catlib.Category(wikipedia.getSite(), 'Category:' + self.catTitle) # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_change) % self.catTitle) articles = catlink.articles(recurse = 0) if len(articles) == 0: wikipedia.output(u'There are no articles in category ' + catTitle) else: for article in articles: print print '===================================================================' self.move_to_category(article, catlink, catlink)
|
},
|
}
|
def __init__(self): family.Family.__init__(self) self.name = 'wikiquote' self.langs = { 'minnan':'zh-min-nan.wikiquote.org', 'nb':'no.wikiquote.org', 'zh-cn':'zh.wikiquote.org', 'zh-tw':'zh.wikiquote.org' } for lang in self.knownlanguages: self.langs[lang] = lang+'.wikiquote.org'
|
data = response.read() print response.status, response.reason
|
returned_html = response.read()
|
def post_multipart(host, selector, fields, files, cookies): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return the server's response page. """ content_type, body = encode_multipart_formdata(fields, files) conn = httplib.HTTPConnection(host) conn.putrequest('POST', selector) conn.putheader('content-type', content_type) conn.putheader('content-length', str(len(body))) conn.putheader("User-agent", "RobHooftWikiRobot/1.0") conn.putheader('Host', host) if cookies: conn.putheader('Cookie',cookies) conn.endheaders() conn.send(body) response = conn.getresponse() data = response.read() print response.status, response.reason conn.close() return data
|
return data
|
return response, returned_html
|
def post_multipart(host, selector, fields, files, cookies): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return the server's response page. """ content_type, body = encode_multipart_formdata(fields, files) conn = httplib.HTTPConnection(host) conn.putrequest('POST', selector) conn.putheader('content-type', content_type) conn.putheader('content-length', str(len(body))) conn.putheader("User-agent", "RobHooftWikiRobot/1.0") conn.putheader('Host', host) if cookies: conn.putheader('Cookie',cookies) conn.endheaders() conn.send(body) response = conn.getresponse() data = response.read() print response.status, response.reason conn.close() return data
|
if not self.urlEncoding: self.url = self.url.encode('utf-8') self.url = urllib.quote(self.url) self.urlEncoding = 'utf-8'
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ if not self.urlEncoding: # URL is a unicode string, might e.g. be a filename with non-ASCII # characters. self.url = self.url.encode('utf-8') self.url = urllib.quote(self.url) self.urlEncoding = 'utf-8' # Get file contents uo = wikipedia.MyURLopener() file = uo.open(self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki filename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
|
uo = wikipedia.MyURLopener() file = uo.open(self.url)
|
if '://' in self.url: uo = wikipedia.MyURLopener() file = uo.open(self.url) else: file = open(self.url) wikipedia.output('Reading file %s' % self.url)
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ if not self.urlEncoding: # URL is a unicode string, might e.g. be a filename with non-ASCII # characters. self.url = self.url.encode('utf-8') self.url = urllib.quote(self.url) self.urlEncoding = 'utf-8' # Get file contents uo = wikipedia.MyURLopener() file = uo.open(self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki filename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
filename = filename.encode(self.targetSite.encoding())
|
encodedFilename = filename.encode(self.targetSite.encoding())
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ if not self.urlEncoding: # URL is a unicode string, might e.g. be a filename with non-ASCII # characters. self.url = self.url.encode('utf-8') self.url = urllib.quote(self.url) self.urlEncoding = 'utf-8' # Get file contents uo = wikipedia.MyURLopener() file = uo.open(self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki filename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
returned_html = post_multipart(self.targetSite.hostname(),
|
response, returned_html = post_multipart(self.targetSite.hostname(),
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ if not self.urlEncoding: # URL is a unicode string, might e.g. be a filename with non-ASCII # characters. self.url = self.url.encode('utf-8') self.url = urllib.quote(self.url) self.urlEncoding = 'utf-8' # Get file contents uo = wikipedia.MyURLopener() file = uo.open(self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki filename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
(('wpUploadFile', filename, contents),),
|
(('wpUploadFile', encodedFilename, contents),),
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ if not self.urlEncoding: # URL is a unicode string, might e.g. be a filename with non-ASCII # characters. self.url = self.url.encode('utf-8') self.url = urllib.quote(self.url) self.urlEncoding = 'utf-8' # Get file contents uo = wikipedia.MyURLopener() file = uo.open(self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki filename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
success_msg = mediawiki_messages.get('successfulupload', site = self.targetSite) success_msgR = re.compile(re.escape(success_msg)) if success_msgR.search(returned_html):
|
if response.status == 302:
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ if not self.urlEncoding: # URL is a unicode string, might e.g. be a filename with non-ASCII # characters. self.url = self.url.encode('utf-8') self.url = urllib.quote(self.url) self.urlEncoding = 'utf-8' # Get file contents uo = wikipedia.MyURLopener() file = uo.open(self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki filename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
if not (trailingChars or label):
|
if not trailingChars:
|
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') label = m.group('label') trailingChars = m.group('linktrail')
|
else: output( u"DBG>>> Strange title: %s:%s" % (site.lang, title) )
|
def __init__(self, site, title, insite = None, tosite = None, defaultNamespace = 0): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string
|
|
Rcomment = re.compile("<!--.*?-->", re.M) while True: comment = Rcomment.search(thistxt) if not comment: break thistxt = thistxt[:comment.start()] + thistxt[comment.end():]
|
thistxt = re.sub("(?ms)<!--.*?-->", "", thistxt)
|
def linkedPages(self): """Gives the normal (not-interwiki, non-category) pages the page links to, as a list of Page objects """ result = [] try: thistxt = removeLanguageLinks(self.get()) except NoPage: return [] except IsRedirectPage: raise thistxt = removeCategoryLinks(thistxt, self.site())
|
Rnowiki = re.compile("<nowiki>.*?</nowiki>", re.M) while True: nowiki = Rnowiki.search(thistxt) if not nowiki: break thistxt = thistxt[:nowiki.start()] + thistxt[nowiki.end():]
|
thistxt = re.sub("(?ms)<nowiki>.*?</nowiki>", "", thistxt)
|
def linkedPages(self): """Gives the normal (not-interwiki, non-category) pages the page links to, as a list of Page objects """ result = [] try: thistxt = removeLanguageLinks(self.get()) except NoPage: return [] except IsRedirectPage: raise thistxt = removeCategoryLinks(thistxt, self.site())
|
result.append(page)
|
if page.sectionFreeTitle(): result.append(page)
|
def linkedPages(self): """Gives the normal (not-interwiki, non-category) pages the page links to, as a list of Page objects """ result = [] try: thistxt = removeLanguageLinks(self.get()) except NoPage: return [] except IsRedirectPage: raise thistxt = removeCategoryLinks(thistxt, self.site())
|
for i in range(len(title)): title[i] = title[i].strip() self._title = ':'.join(title)
|
self._title = ':'.join(title).strip()
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. # Replace underlines by spaces title = underline2space(title) # Convert HTML entities to unicode title = html2unicode(title, site = site, altsite = insite) # Convert URL-encoded characters to unicode title = url2unicode(title, site = site) # replace cx by ĉ etc. if site.lang == 'eo': title = resolveEsperantoXConvention(title) # Remove double spaces while ' ' in title: title = title.replace(' ', ' ') # Remove leading colon if title.startswith(':'): title = title[1:] # Capitalize first letter try: if not site.nocapitalize: title = title[0].upper() + title[1:] except IndexError: # title is empty pass # split up into namespace and rest title = title.split(':', 1) # if the page is not in namespace 0: if len(title) > 1: # translate a default namespace name into the local namespace name for ns in site.family.namespaces.keys(): if title[0] == site.family.namespace('_default', ns): title[0] = site.namespace(ns) # Capitalize the first non-namespace part for ns in site.family.namespaces.keys(): if title[0] == site.namespace(ns): if not site.nocapitalize: try: title[1] = title[1][0].upper()+title[1][1:] except IndexError: # title[1] is empty print "WARNING: Strange title %s"%'%3A'.join(title) # Remove leading and trailing whitespace from namespace and from rest for i in range(len(title)): title[i] = title[i].strip()
|
iend = txt.index('<div id="catlinks">')
|
iend = txt.index('<div class="printfooter">')
|
def _make_catlist(self, recurse = False, purge = False, site = None): """Make a list of all articles and categories that are in this category. If recurse is set to True, articles and subcategories of any subcategories are also retrieved.
|
except ValueError: iend = txt.index('<div class="printfooter">')
|
def _make_catlist(self, recurse = False, purge = False, site = None): """Make a list of all articles and categories that are in this category. If recurse is set to True, articles and subcategories of any subcategories are also retrieved.
|
|
'af' : lambda v: slh( v, [u'Januarie', u'Februarie', u'Maart', u'April', u'Mei', u'Junie', u'Julie', u'Augustus', u'September', u'Oktober', u'November', u'Desember'] ), 'als': lambda v: slh( v, [u'Januar', u'Februar', u'März', u'April', u'Mai', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'Dezember'] ), 'an' : lambda v: slh( v, [u'Chinero', u'Frebero', u'Marzo', u'Abril', u'Mayo', u'Chunio', u'Chulio', u'Agosto', u'Setiembre', u'Otubre', u'Nobiembre', u'Abiento'] ), 'ang': lambda v: slh( v, [u'Se æfterra Gēola', u'Solmónaþ', u'Hrēþmōnaþ', u'Ēastermōnaþ', u'Þrimilcemónaþ', u'Séremónaþ', u'Mǽdmónaþ', u'Wéodmónaþ', u'Háligmónaþ', u'Winterfylleþ', u'Blótmónaþ', u'Géolmónaþ'] ), 'ar' : lambda v: slh( v, [u'يناير', u'فبراير', u'مارس', u'إبريل', u'مايو', u'يونيو', u'يوليو', u'أغسطس', u'سبتمبر', u'أكتوبر', u'نوفمبر', u'ديسمبر'] ), 'ast': lambda v: slh( v, [u'Xineru', u'Febreru', u'Marzu', u'Abril', u'Mayu', u'Xunu', u'Xunetu', u'Agostu', u'Setiembre', u'Ochobre', u'Payares', u'Avientu'] ), 'be' : lambda v: slh( v, [u'Студзень', u'Люты', u'Сакавік', u'Красавік', u'Травень', u'Чэрвень', u'Ліпень', u'Жнівень', u'Верасень', u'Кастрычнік', u'Лістапад', u'Сьнежань'] ), 'bg' : lambda v: slh( v, [u'Януари', u'Февруари', u'Март', u'Април', u'Май', u'Юни', u'Юли', u'Август', u'Септември', u'Октомври', u'Ноември', u'Декември'] ), 'bs' : lambda v: slh( v, [u'Januar', u'Februar', u'Mart', u'April', u'Maj', u'Juni', u'Juli', u'Avgust', u'Septembar', u'Oktobar', u'Novembar', u'Decembar'] ), 'ca' : lambda v: slh( v, [u'Gener', u'Febrer', u'Març', u'Abril', u'Maig', u'Juny', u'Juliol', u'Agost', u'Setembre', u'Octubre', u'Novembre', u'Desembre'] ), 'cs' : lambda v: slh( v, [u'Leden', u'Únor', u'Březen', u'Duben', u'Květen', u'Červen', u'Červenec', u'Srpen', u'Září', u'Říjen', u'Listopad', u'Prosinec'] ), 'csb': lambda v: slh( v, [u'Stëcznik', u'Gromicznik', u'Strumiannik', u'Łżëkwiôt', u'Môj', u'Czerwińc', u'Lëpinc', u'Zélnik', u'Séwnik', u'Rujan', u'Lëstopadnik', u'Gòdnik'] ), 'cv' : lambda v: slh( v, [u'Кăрлач', u'Нарăс', u'Пуш', u'Ака', u'Çу', u'Çěртме', u'Утă', u'Çурла', u'Авăн', u'Юпа', u'Чӳк', u'Раштав'] ), 'cy' : lambda v: slh( v, [u'Ionawr', u'Chwefror', u'Mawrth', u'Ebrill', u'Mai', u'Mehefin', u'Gorffennaf', u'Awst', u'Medi', u'Hydref', u'Tachwedd', u'Rhagfyr'] ), 'da' : lambda v: slh( v, [u'Januar', u'Februar', u'Marts', u'April', u'Maj', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'December'] ), 'de' : lambda v: slh( v, [u'Januar', u'Februar', u'März', u'April', u'Mai', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'Dezember'] ), 'el' : lambda v: slh( v, [u'Ιανουάριος', u'Φεβρουάριος', u'Μάρτιος', u'Απρίλιος', u'Μάιος', u'Ιούνιος', u'Ιούλιος', u'Αύγουστος', u'Σεπτέμβριος', u'Οκτώβριος', u'Νοέμβριος', u'Δεκέμβριος'] ),
|
'af' : lambda v: slh( v, [u"Januarie", u"Februarie", u"Maart", u"April", u"Mei", u"Junie", u"Julie", u"Augustus", u"September", u"Oktober", u"November", u"Desember"] ), 'als': lambda v: slh( v, [u"Januar", u"Februar", u"März", u"April", u"Mai", u"Juni", u"Juli", u"August", u"September", u"Oktober", u"November", u"Dezember"] ), 'an' : lambda v: slh( v, [u"chinero", u"frebero", u"marzo", u"abril", u"mayo", u"chunio", u"chulio", u"agosto", u"setiembre", u"otubre", u"nobiembre", u"abiento"] ), 'ang': lambda v: slh( v, [u"Æfterra Gēola", u"Solmōnaþ", u"Hrēþmōnaþ", u"Ēastermōnaþ", u"Þrimilcemōnaþ", u"Sēremōnaþ", u"Mǣdmōnaþ", u"Wēodmōnaþ", u"Hāligmōnaþ", u"Winterfylleþ", u"Blōtmōnaþ", u"Gēolmōnaþ"] ), 'ar' : lambda v: slh( v, [u"يناير", u"فبراير", u"مارس", u"إبريل", u"مايو", u"يونيو", u"يوليو", u"أغسطس", u"سبتمبر", u"أكتوبر", u"نوفمبر", u"ديسمبر"] ), 'ast': lambda v: slh( v, [u"xineru", u"febreru", u"marzu", u"abril", u"mayu", u"xunu", u"xunetu", u"agostu", u"setiembre", u"ochobre", u"payares", u"avientu"] ), 'be' : lambda v: slh( v, [u"студзень", u"люты", u"сакавік", u"красавік", u"травень", u"чэрвень", u"ліпень", u"жнівень", u"верасень", u"кастрычнік", u"лістапад", u"сьнежань"] ), 'bg' : lambda v: slh( v, [u"януари", u"февруари", u"март", u"април", u"май", u"юни", u"юли", u"август", u"септември", u"октомври", u"ноември", u"декември"] ), 'br' : lambda v: slh( v, [u"Genver", u"C'hwevrer", u"Meurzh", u"Ebrel", u"Mae", u"Mezheven", u"Gouere", u"Eost", u"Gwengolo", u"Here", u"Du", u"Kerzu"] ), 'bs' : lambda v: slh( v, [u"januar", u"februar", u"mart", u"april", u"maj", u"juni", u"juli", u"avgust", u"septembar", u"oktobar", u"novembar", u"decembar"] ), 'ca' : lambda v: slh( v, [u"gener", u"febrer", u"març", u"abril", u"maig", u"juny", u"juliol", u"agost", u"setembre", u"octubre", u"novembre", u"desembre"] ), 'co' : lambda v: slh( v, [u"ghjennaghju", u"frivaghju", u"marzu", u"aprile", u"maghju", u"ghjugnu", u"lugliu", u"aostu", u"settembre", u"uttrovi", u"nuvembri", u"decembre"] ), 'cs' : lambda v: slh( v, [u"leden", u"únor", u"březen", u"duben", u"květen", u"červen", u"červenec", u"srpen", u"září", u"říjen", u"listopad", u"prosinec"] ), 'csb': lambda v: slh( v, [u"stëcznik", u"gromicznik", u"strumiannik", u"łżëkwiôt", u"môj", u"czerwińc", u"lëpinc", u"zélnik", u"séwnik", u"rujan", u"lëstopadnik", u"gòdnik"] ), 'cv' : lambda v: slh( v, [u"кăрлач", u"нарăс", u"Пуш", u"Ака", u"çу", u"çĕртме", u"утă", u"çурла", u"авăн", u"юпа", u"чӳк", u"раштав"] ), 'cy' : lambda v: slh( v, [u"Ionawr", u"Chwefror", u"Mawrth", u"Ebrill", u"Mai", u"Mehefin", u"Gorffennaf", u"Awst", u"Medi", u"Hydref", u"Tachwedd", u"Rhagfyr"] ), 'da' : lambda v: slh( v, [u"januar", u"februar", u"marts", u"april", u"maj", u"juni", u"juli", u"august", u"september", u"oktober", u"november", u"december"] ), 'de' : lambda v: slh( v, [u"Januar", u"Februar", u"März", u"April", u"Mai", u"Juni", u"Juli", u"August", u"September", u"Oktober", u"November", u"Dezember"] ), 'el' : lambda v: slh( v, [u"Ιανουάριος", u"Φεβρουάριος", u"Μάρτιος", u"Απρίλιος", u"Μάιος", u"Ιούνιος", u"Ιούλιος", u"Αύγουστος", u"Σεπτέμβριος", u"Οκτώβριος", u"Νοέμβριος", u"Δεκέμβριος"] ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'eo' : lambda v: slh( v, [u'Januaro', u'Februaro', u'Marto', u'Aprilo', u'Majo', u'Junio', u'Julio', u'Aŭgusto', u'Septembro', u'Oktobro', u'Novembro', u'Decembro'] ), 'es' : lambda v: slh( v, [u'Enero', u'Febrero', u'Marzo', u'Abril', u'Mayo', u'Junio', u'Julio', u'Agosto', u'Septiembre', u'Octubre', u'Noviembre', u'Diciembre'] ), 'et' : lambda v: slh( v, [u'Jaanuar', u'Veebruar', u'Märts', u'Aprill', u'Mai', u'Juuni', u'Juuli', u'August', u'September', u'Oktoober', u'November', u'Detsember'] ), 'eu' : lambda v: slh( v, [u'Urtarril', u'Otsail', u'Martxo', u'Apiril', u'Maiatz', u'Ekain', u'Uztail', u'Abuztu', u'Irail', u'Urri', u'Azaro', u'Abendu'] ), 'fa' : lambda v: slh( v, [u'ژانویه', u'فوریه', u'مارس', u'آوریل', u'مه', u'ژوئن', u'ژوئیه', u'اوت', u'سپتامبر', u'اکتبر', u'نوامبر', u'دسامبر'] ), 'fi' : lambda v: slh( v, [u'Tammikuu', u'Helmikuu', u'Maaliskuu', u'Huhtikuu', u'Toukokuu', u'Kesäkuu', u'Heinäkuu', u'Elokuu', u'Syyskuu', u'Lokakuu', u'Marraskuu', u'Joulukuu'] ), 'fo' : lambda v: slh( v, [u'Januar', u'Februar', u'Mars', u'Apríl', u'Mai', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'Desember'] ), 'fr' : lambda v: slh( v, [u'Janvier', u'Février', u'Mars (mois)', u'Avril', u'Mai', u'Juin', u'Juillet', u'Août', u'Septembre', u'Octobre', u'Novembre', u'Décembre'] ), 'fur': lambda v: slh( v, [u'Zenâr', u'Fevrâr', u'Març', u'Avrîl', u'Mai', u'Zugn', u'Lui', u'Avost', u'Setembar', u'Otubar', u'Novembar', u'Dicembar'] ), 'fy' : lambda v: slh( v, [u'Jannewaris', u'Febrewaris', u'Maart', u'April', u'Maaie', u'Juny', u'July', u'Augustus', u'Septimber', u'Oktober', u'Novimber', u'Desimber'] ), 'ga' : lambda v: slh( v, [u'Eanáir', u'Feabhra', u'Márta', u'Aibreán', u'Bealtaine', u'Meitheamh', u'Iúil', u'Lúnasa', u'Meán Fómhair', u'Deireadh Fómhair', u'Samhain', u'Nollaig'] ), 'gl' : lambda v: slh( v, [u'Xaneiro', u'Febreiro', u'Marzo', u'Abril', u'Maio', u'Xuño', u'Xullo', u'Agosto', u'Setembro', u'Outubro', u'Novembro', u'Decembro'] ), 'he' : lambda v: slh( v, [u'ינואר', u'פברואר', u'מרץ', u'אפריל', u'מאי', u'יוני', u'יולי', u'אוגוסט', u'ספטמבר', u'אוקטובר', u'נובמבר', u'דצמבר'] ), 'hi' : lambda v: slh( v, [u'जनवरी', u'फ़रवरी', u'मार्च', u'अप्रैल', u'मई', u'जून', u'जुलाई', u'अगस्त', u'सितम्बर', u'अक्टूबर', u'नवम्बर', u'दिसम्बर'] ), 'hr' : lambda v: slh( v, [u'Siječanj', u'Veljača', u'Ožujak', u'Travanj', u'Svibanj', u'Lipanj', u'Srpanj', u'Kolovoz', u'Rujan', u'Listopad', u'Studeni', u'Prosinac'] ), 'hu' : lambda v: slh( v, [u'Január', u'Február', u'Március', u'Április', u'Május', u'Június', u'Július', u'Augusztus', u'Szeptember', u'Október', u'November', u'December'] ), 'ia' : lambda v: slh( v, [u'Januario', u'Februario', u'Martio', u'April', u'Maio', u'Junio', u'Julio', u'Augusto', u'Septembre', u'Octobre', u'Novembre', u'Decembre'] ), 'id' : lambda v: slh( v, [u'Januari', u'Februari', u'Maret', u'April', u'Mei', u'Juni', u'Juli', u'Agustus', u'September', u'Oktober', u'November', u'Desember'] ), 'ie' : lambda v: slh( v, [u'Januar', u'Februar', u'Marte', u'April', u'May', u'Junio', u'Juli', u'August', u'Septembre', u'Octobre', u'Novembre', u'Decembre'] ), 'io' : lambda v: slh( v, [u'Januaro', u'Februaro', u'Marto', u'Aprilo', u'Mayo', u'Junio', u'Julio', u'Agosto', u'Septembro', u'Oktobro', u'Novembro', u'Decembro'] ), 'is' : lambda v: slh( v, [u'Janúar', u'Febrúar', u'Mars (mánuður)', u'Apríl', u'Maí', u'Júní', u'Júlí', u'Ágúst', u'September', u'Október', u'Nóvember', u'Desember'] ), 'it' : lambda v: slh( v, [u'Gennaio', u'Febbraio', u'Marzo', u'Aprile', u'Maggio', u'Giugno', u'Luglio', u'Agosto', u'Settembre', u'Ottobre', u'Novembre', u'Dicembre'] ), 'ja' : lambda v: slh( v, makeMonthList( u'%d月' )), 'jv' : lambda v: slh( v, [u'Januari', u'Februari', u'Maret', u'April', u'Mei', u'Juni', u'Juli', u'Agustus', u'September', u'Oktober', u'November', u'Desember'] ), 'ka' : lambda v: slh( v, [u'იანვარი', u'თებერვალი', u'მარტი', u'აპრილი', u'მაისი', u'ივნისი', u'ივლისი', u'აგვისტო', u'სექტემბერი', u'ოქტომბერი', u'ნოემბერი', u'დეკემბერი'] ), 'kn' : lambda v: slh( v, [u'ಜನವರಿ', u'ಫೆಬ್ರವರಿ', u'ಮಾರ್ಚಿ', u'ಎಪ್ರಿಲ್', u'ಮೇ', u'ಜೂನ', u'ಜುಲೈ', u'ಆಗಸ್ಟ್ ', u'ಸೆಪ್ಟೆಂಬರ್', u'ಅಕ್ಟೋಬರ್', u'ನವೆಂಬರ್', u'ಡಿಸೆಂಬರ್'] ), 'ko' : lambda v: slh( v, makeMonthList( u'%d월' )), 'ku' : lambda v: slh( v, [u'Rêbendan', u'Reşemî', u'Adar', u'Avrêl', u'Gulan', u'Pûşper', u'Tîrmeh', u'Gelawêj (meh)', u'Rezber', u'Kewçêr', u'Sermawez', u'Berfanbar'] ), 'kw' : lambda v: slh( v, [u'Mys Genver', u'Mys Whevrer', u'Mys Merth', u'Mys Ebrel', u'Mys Me', u'Mys Metheven', u'Mys Gortheren', u'Mys Est', u'Mys Gwyngala', u'Mys Hedra', u'Mys Du', u'Mys Kevardhu'] ), 'la' : lambda v: slh( v, [u'Ianuarius', u'Februarius', u'Martius', u'Aprilis', u'Maius', u'Iunius', u'Iulius', u'Augustus (mensis)', u'September', u'October', u'November', u'December'] ), 'lb' : lambda v: slh( v, [u'Januar', u'Februar', u'Mäerz', u'Abrëll', u'Mee', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'Dezember'] ), 'li' : lambda v: slh( v, [u'Jannewarie', u'Fibberwarie', u'Miert', u'April', u'Mei', u'Juni', u'Juli', u'Augustus (maond)', u'September', u'Oktober', u'November', u'December'] ), 'lt' : lambda v: slh( v, [u'Sausis', u'Vasaris', u'Kovas', u'Balandis', u'Gegužė', u'Birželis', u'Liepa', u'Rugpjūtis', u'Rugsėjis', u'Spalis', u'Lapkritis', u'Gruodis'] ), 'mi' : lambda v: slh( v, [u'Kohi-tātea', u'Hui-tanguru', u'Poutū-te-rangi', u'Paenga-whāwhā', u'Haratua', u'Pipiri', u'Hōngongoi', u'Here-turi-kōkā', u'Mahuru', u'Whiringa-ā-nuku', u'Whiringa-ā-rangi', u'Hakihea'] ), 'ml' : lambda v: slh( v, [u'ജനുവരി', u'ഫെബ്രുവരി', u'മാര്ച്', u'ഏപ്രില്', u'മേയ്', u'ജൂണ്', u'ജൂലൈ', u'ആഗസ്റ്റ്', u'സപ്തന്പര്', u'ഒക്ടോബര്', u'നവന്പര്', u'ഡിസന്പര്'] ), 'mr' : lambda v: slh( v, [u'जानेवारी', u'फेब्रुवारी', u'मार्च', u'एप्रिल', u'मे', u'जून', u'जुलै', u'ऑगस्ट', u'सप्टेंबर', u'ऑक्टोबर', u'नोव्हेंबर', u'डिसेंबर'] ), 'ms' : lambda v: slh( v, [u'Januari', u'Februari', u'Mac', u'April', u'Mei', u'Jun', u'Julai', u'Ogos', u'September', u'Oktober', u'November', u'Disember'] ), 'nds': lambda v: slh( v, [u'Januar', u'Februar', u'März', u'April', u'Mai', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'Dezember'] ), 'nl' : lambda v: slh( v, [u'Januari', u'Februari', u'Maart', u'April', u'Mei', u'Juni', u'Juli', u'Augustus (maand)', u'September', u'Oktober', u'November', u'December'] ), 'nn' : lambda v: slh( v, [u'Januar', u'Februar', u'Mars', u'April', u'Mai', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'Desember'] ), 'no' : lambda v: slh( v, [u'Januar', u'Februar', u'Mars', u'April', u'Mai', u'Juni', u'Juli', u'August', u'September', u'Oktober', u'November', u'Desember'] ), 'oc' : lambda v: slh( v, [u'Genièr', u'Febrièr', u'Març', u'Abril', u'Mai', u'Junh', u'Julhet', u'Agost', u'Setembre', u'Octobre', u'Novembre', u'Decembre'] ), 'pl' : lambda v: slh( v, [u'Styczeń', u'Luty', u'Marzec', u'Kwiecień', u'Maj', u'Czerwiec', u'Lipiec', u'Sierpień', u'Wrzesień', u'Październik', u'Listopad', u'Grudzień'] ), 'pt' : lambda v: slh( v, [u'Janeiro', u'Fevereiro', u'Março', u'Abril', u'Maio', u'Junho', u'Julho', u'Agosto', u'Setembro', u'Outubro', u'Novembro', u'Dezembro'] ), 'ro' : lambda v: slh( v, [u'Ianuarie', u'Februarie', u'Martie', u'Aprilie', u'Mai', u'Iunie', u'Iulie', u'August', u'Septembrie', u'Octombrie', u'Noiembrie', u'Decembrie'] ), 'ru' : lambda v: slh( v, [u'Январь', u'Февраль', u'Март', u'Апрель', u'Май', u'Июнь', u'Июль', u'Август', u'Сентябрь', u'Октябрь', u'Ноябрь', u'Декабрь'] ), 'sc' : lambda v: slh( v, [u'Ghennarzu', u'Frearzu', u'Martzu', u'Abrile', u'Maju', u'Làmpadas', u'Triulas', u'Aùstu', u'Cabudanni', u'Santugaìne', u'Santadria', u'Nadale'] ), 'scn': lambda v: slh( v, [u'Jinnaru', u'Frivaru', u'Marzu', u'Aprili', u'Maiu', u'Giugnu', u'Giugnettu', u'Austu', u'Sittèmmiru', u'Uttùviru', u'Nuvèmmiru', u'Dicèmmiru'] ), 'sco': lambda v: slh( v, [u'Januar', u'Februar', u'Mairch', u'Aprile', u'Mey', u'Juin', u'Julie', u'Augist', u'September', u'October', u'November', u'December'] ), 'se' : lambda v: slh( v, [u'Ođđajagimánnu', u'Guovvamánnu', u'Njukčamánnu', u'Cuoŋománnu', u'Miessemánnu', u'Geassemánnu', u'Suoidnemánnu', u'Borgemánnu', u'Čakčamánnu', u'Golggotmánnu', u'Skábmamánnu', u'Juovlamánnu'] ), 'simple': lambda v: slh( v, enMonthNames ), 'sk' : lambda v: slh( v, [u'Január', u'Február', u'Marec', u'Apríl', u'Máj', u'Jún', u'Júl', u'August', u'September', u'Október', u'November', u'December'] ), 'sl' : lambda v: slh( v, [u'Januar', u'Februar', u'Marec', u'April', u'Maj', u'Junij', u'Julij', u'Avgust', u'September', u'Oktober', u'November', u'December'] ), 'sq' : lambda v: slh( v, [u'Janari', u'Shkurti', u'Marsi (muaj)', u'Prilli', u'Maji', u'Qershori', u'Korriku', u'Gushti', u'Shtatori', u'Tetori', u'Nëntori', u'Dhjetori'] ), 'sr' : lambda v: slh( v, [u'Јануар', u'Фебруар', u'Март', u'Април', u'Мај', u'Јун', u'Јул', u'Август', u'Септембар', u'Октобар', u'Новембар', u'Децембар'] ), 'su' : lambda v: slh( v, [u'Januari', u'Pébruari', u'Maret', u'April', u'Méi', u'Juni', u'Juli', u'Agustus', u'Séptémber', u'Oktober', u'Nopémber', u'Désémber'] ), 'sv' : lambda v: slh( v, [u'Januari', u'Februari', u'Mars', u'April', u'Maj', u'Juni', u'Juli', u'Augusti', u'September', u'Oktober', u'November', u'December'] ), 'te' : lambda v: slh( v, [u'జనవరి', u'ఫిబ్రవరి', u'మార్చి', u'ఏప్రిల్', u'మే', u'జూన్', u'జూలై', u'ఆగష్టు', u'సెప్టెంబర్', u'అక్టోబర్', u'నవంబర్', u'డిసెంబర్'] ), 'th' : lambda v: slh( v, [u'มกราคม', u'กุมภาพันธ์', u'มีนาคม', u'เมษายน', u'พฤษภาคม', u'มิถุนายน', u'กรกฎาคม', u'สิงหาคม', u'กันยายน', u'ตุลาคม', u'พฤศจิกายน', u'ธันวาคม'] ), 'tl' : lambda v: slh( v, [u'Enero', u'Pebrero', u'Marso', u'Abril', u'Mayo', u'Hunyo', u'Hulyo', u'Agosto', u'Setyembre', u'Oktubre', u'Nobyembre', u'Disyembre'] ), 'tpi': lambda v: slh( v, [u'Janueri', u'Februeri', u'Mas', u'Epril', u'Me', u'Jun', u'Julai', u'Ogas', u'Septemba', u'Oktoba', u'Novemba', u'Disemba'] ), 'tr' : lambda v: slh( v, [u'Ocak', u'Şubat', u'Mart', u'Nisan', u'Mayıs', u'Haziran', u'Temmuz', u'Ağustos', u'Eylül', u'Ekim', u'Kasım', u'Aralık'] ), 'tt' : lambda v: slh( v, [u'Ğínwar', u'Febräl', u'Mart', u'Äpril', u'May', u'Yün', u'Yül', u'August', u'Sentäber', u'Öktäber', u'Nöyäber', u'Dekäber'] ), 'uk' : lambda v: slh( v, [u'Січень', u'Лютий', u'Березень', u'Квітень', u'Травень', u'Червень', u'Липень', u'Серпень', u'Вересень', u'Жовтень', u'Листопад', u'Грудень'] ), 'ur' : lambda v: slh( v, [u'جنوری', u'فروری', u'مارچ', u'اپريل', u'مئ', u'جون', u'جولائ', u'اگست', u'ستمبر', u'اکتوبر', u'نومبر', u'دسمبر'] ), 'vi' : lambda v: slh( v, [u'Tháng một', u'Tháng hai', u'Tháng ba', u'Tháng tư', u'Tháng năm', u'Tháng sáu', u'Tháng bảy', u'Tháng tám', u'Tháng chín', u'Tháng mười', u'Tháng mười một', u'Tháng mười hai'] ), 'vo' : lambda v: slh( v, [u'Yanul', u'Febul', u'Mäzul', u'Prilul', u'Mayul', u'Yunul', u'Yulul', u'Gustul', u'Setul', u'Tobul', u'Novul', u'Dekul'] ), 'wa' : lambda v: slh( v, [u'Djanvî', u'Fevrî', u'Måss', u'Avri', u'May', u'Djun', u'Djulete', u'Awousse', u'Setimbe', u'Octôbe', u'Nôvimbe', u'Decimbe'] ), 'zh' : lambda v: slh( v, makeMonthList( u'%d月' )),
|
'eo' : lambda v: slh( v, [u"Januaro", u"Februaro", u"Marto", u"Aprilo", u"Majo", u"Junio", u"Julio", u"Aŭgusto", u"Septembro", u"Oktobro", u"Novembro", u"Decembro"] ), 'es' : lambda v: slh( v, [u"enero", u"febrero", u"marzo", u"abril", u"mayo", u"junio", u"julio", u"agosto", u"septiembre", u"octubre", u"noviembre", u"diciembre"] ), 'et' : lambda v: slh( v, [u"jaanuar", u"veebruar", u"märts", u"aprill", u"mai", u"juuni", u"juuli", u"august", u"september", u"oktoober", u"november", u"detsember"] ), 'eu' : lambda v: slh( v, [u"Urtarril", u"Otsail", u"Martxo", u"Apiril", u"Maiatz", u"Ekain", u"Uztail", u"Abuztu", u"Irail", u"Urri", u"Azaro", u"Abendu"] ), 'fa' : lambda v: slh( v, [u"ژانویه", u"فوریه", u"مارس", u"آوریل", u"مه", u"ژوئن", u"ژوئیه", u"اوت", u"سپتامبر", u"اکتبر", u"نوامبر", u"دسامبر"] ), 'fi' : lambda v: slh( v, [u"tammikuu", u"helmikuu", u"maaliskuu", u"huhtikuu", u"toukokuu", u"kesäkuu", u"heinäkuu", u"elokuu", u"syyskuu", u"lokakuu", u"marraskuu", u"joulukuu"] ), 'fo' : lambda v: slh( v, [u"januar", u"februar", u"mars", u"apríl", u"mai", u"juni", u"juli", u"august", u"september", u"oktober", u"november", u"desember"] ), 'fr' : lambda v: slh( v, [u"janvier", u"février", u"mars (mois)", u"avril", u"mai", u"juin", u"juillet", u"août", u"septembre", u"octobre", u"novembre", u"décembre"] ), 'fur': lambda v: slh( v, [u"Zenâr", u"Fevrâr", u"Març", u"Avrîl", u"Mai", u"Zugn", u"Lui", u"Avost", u"Setembar", u"Otubar", u"Novembar", u"Dicembar"] ), 'fy' : lambda v: slh( v, [u"jannewaris", u"febrewaris", u"maart", u"april", u"maaie", u"juny", u"july", u"augustus", u"septimber", u"oktober", u"novimber", u"desimber"] ), 'ga' : lambda v: slh( v, [u"Eanáir", u"Feabhra", u"Márta", u"Aibreán", u"Bealtaine", u"Meitheamh", u"Iúil", u"Lúnasa", u"Meán Fómhair", u"Deireadh Fómhair", u"Samhain", u"Nollaig"] ), 'gl' : lambda v: slh( v, [u"xaneiro", u"febreiro", u"marzo", u"abril", u"maio", u"xuño", u"xullo", u"agosto", u"setembro", u"outubro", u"novembro", u"decembro"] ), 'he' : lambda v: slh( v, [u"ינואר", u"פברואר", u"מרץ", u"אפריל", u"מאי", u"יוני", u"יולי", u"אוגוסט", u"ספטמבר", u"אוקטובר", u"נובמבר", u"דצמבר"] ), 'hi' : lambda v: slh( v, [u"जनवरी", u"फ़रवरी", u"मार्च", u"अप्रैल", u"मई", u"जून", u"जुलाई", u"अगस्त", u"सितम्बर", u"अक्टूबर", u"नवम्बर", u"दिसम्बर"] ), 'hr' : lambda v: slh( v, [u"siječanj", u"veljača", u"ožujak", u"travanj", u"svibanj", u"lipanj", u"srpanj", u"kolovoz", u"rujan", u"listopad", u"studeni", u"prosinac"] ), 'hu' : lambda v: slh( v, [u"január", u"február", u"március", u"április", u"május", u"június", u"július", u"augusztus", u"szeptember", u"október", u"november", u"december"] ), 'ia' : lambda v: slh( v, [u"januario", u"februario", u"martio", u"april", u"maio", u"junio", u"julio", u"augusto", u"septembre", u"octobre", u"novembre", u"decembre"] ), 'id' : lambda v: slh( v, [u"Januari", u"Februari", u"Maret", u"April", u"Mei", u"Juni", u"Juli", u"Agustus", u"September", u"Oktober", u"November", u"Desember"] ), 'ie' : lambda v: slh( v, [u"januar", u"februar", u"marte", u"april", u"may", u"junio", u"juli", u"august", u"septembre", u"octobre", u"novembre", u"decembre"] ), 'io' : lambda v: slh( v, [u"januaro", u"februaro", u"Marto", u"aprilo", u"mayo", u"junio", u"julio", u"agosto", u"septembro", u"oktobro", u"novembro", u"decembro"] ), 'is' : lambda v: slh( v, [u"janúar", u"febrúar", u"mars (mánuður)", u"apríl", u"maí", u"júní", u"júlí", u"ágúst", u"september", u"október", u"nóvember", u"desember"] ), 'it' : lambda v: slh( v, [u"gennaio", u"febbraio", u"marzo", u"aprile", u"maggio", u"giugno", u"luglio", u"agosto", u"settembre", u"ottobre", u"novembre", u"dicembre"] ), 'ja' : lambda v: slh( v, makeMonthList( u"%d月" )), 'jv' : lambda v: slh( v, [u"Januari", u"Februari", u"Maret", u"April", u"Mei", u"Juni", u"Juli", u"Agustus", u"September", u"Oktober", u"November", u"Desember"] ), 'ka' : lambda v: slh( v, [u"იანვარი", u"თებერვალი", u"მარტი", u"აპრილი", u"მაისი", u"ივნისი", u"ივლისი", u"აგვისტო", u"სექტემბერი", u"ოქტომბერი", u"ნოემბერი", u"დეკემბერი"] ), 'kn' : lambda v: slh( v, [u"ಜನವರಿ", u"ಫೆಬ್ರವರಿ", u"ಮಾರ್ಚಿ", u"ಎಪ್ರಿಲ್", u"ಮೇ", u"ಜೂನ", u"ಜುಲೈ", u"ಆಗಸ್ಟ್", u"ಸೆಪ್ಟೆಂಬರ್", u"ಅಕ್ಟೋಬರ್", u"ನವೆಂಬರ್", u"ಡಿಸೆಂಬರ್"] ), 'ko' : lambda v: slh( v, makeMonthList( u"%d월" )), 'ku' : lambda v: slh( v, [u"rêbendan", u"reşemî", u"adar", u"avrêl", u"gulan", u"pûşper", u"tîrmeh", u"gelawêj (meh)", u"rezber", u"kewçêr", u"sermawez", u"berfanbar"] ), 'kw' : lambda v: slh( v, [u"Mys Genver", u"Mys Whevrer", u"Mys Merth", u"Mys Ebrel", u"Mys Me", u"Mys Metheven", u"Mys Gortheren", u"Mys Est", u"Mys Gwyngala", u"Mys Hedra", u"Mys Du", u"Mys Kevardhu"] ), 'la' : lambda v: slh( v, [u"Ianuarius", u"Februarius", u"Martius", u"Aprilis", u"Maius", u"Iunius", u"Iulius", u"Augustus (mensis)", u"September", u"October", u"November", u"December"] ), 'lb' : lambda v: slh( v, [u"Januar", u"Februar", u"Mäerz", u"Abrëll", u"Mee", u"Juni", u"Juli", u"August", u"September", u"Oktober", u"November", u"Dezember"] ), 'li' : lambda v: slh( v, [u"jannewarie", u"fibberwarie", u"miert", u"april", u"mei", u"juni", u"juli", u"augustus (maond)", u"september", u"oktober", u"november", u"december"] ), 'lt' : lambda v: slh( v, [u"Sausis", u"Vasaris", u"Kovas", u"Balandis", u"Gegužė", u"Birželis", u"Liepa", u"Rugpjūtis", u"Rugsėjis", u"Spalis", u"Lapkritis", u"Gruodis"] ), 'mi' : lambda v: slh( v, [u"Kohi-tātea", u"Hui-tanguru", u"Poutū-te-rangi", u"Paenga-whāwhā", u"Haratua", u"Pipiri", u"Hōngongoi", u"Here-turi-kōkā", u"Mahuru", u"Whiringa-ā-nuku", u"Whiringa-ā-rangi", u"Hakihea"] ), 'ml' : lambda v: slh( v, [u"ജനുവരി", u"ഫെബ്രുവരി", u"മാര്ച്", u"ഏപ്രില്", u"മേയ്", u"ജൂണ്", u"ജൂലൈ", u"ആഗസ്റ്റ്", u"സപ്തന്പര്", u"ഒക്ടോബര്", u"നവന്പര്", u"ഡിസന്പര്"] ), 'mr' : lambda v: slh( v, [u"जानेवारी", u"फेब्रुवारी", u"मार्च", u"एप्रिल", u"मे", u"जून", u"जुलै", u"ऑगस्ट", u"सप्टेंबर", u"ऑक्टोबर", u"नोव्हेंबर", u"डिसेंबर"] ), 'ms' : lambda v: slh( v, [u"Januari", u"Februari", u"Mac", u"April", u"Mei", u"Jun", u"Julai", u"Ogos", u"September", u"Oktober", u"November", u"Disember"] ), 'nap': lambda v: slh( v, [u"Januari", u"Februari", u"Mac", u"April", u"Mei", u"Jun", u"Julai", u"Ogos", u"September", u"Oktober", u"November", u"Disember"] ), 'nap': lambda v: slh( v, [u"Jennaro", u"Frevaro", u"Màrzo", u"Abbrile", u"Majo", u"Giùgno", u"Luglio", u"Aùsto", u"Settembre", u"Ottovre", u"Nuvembre", u"Dicembre"] ), 'nds': lambda v: slh( v, [u"Januar", u"Februar", u"März", u"April", u"Mai", u"Juni", u"Juli", u"August", u"September", u"Oktober", u"November", u"Dezember"] ), 'nl' : lambda v: slh( v, [u"januari", u"februari", u"maart", u"april", u"mei", u"juni", u"juli", u"augustus (maand)", u"september", u"oktober", u"november", u"december"] ), 'nn' : lambda v: slh( v, [u"januar", u"februar", u"månaden mars", u"april", u"mai", u"juni", u"juli", u"august", u"september", u"oktober", u"november", u"desember"] ), 'no' : lambda v: slh( v, [u"januar", u"februar", u"mars", u"april", u"mai", u"juni", u"juli", u"august", u"september", u"oktober", u"november", u"desember"] ), 'oc' : lambda v: slh( v, [u"genièr", u"febrièr", u"març", u"abril", u"mai", u"junh", u"julhet", u"agost", u"setembre", u"octobre", u"novembre", u"decembre"] ), 'os' : lambda v: slh( v, [u"январь", u"февраль", u"мартъи", u"апрель", u"май", u"июнь", u"июль", u"август", u"сентябрь", u"октябрь", u"ноябрь", u"декабрь"] ), 'pl' : lambda v: slh( v, [u"styczeń", u"luty", u"marzec", u"kwiecień", u"maj", u"czerwiec", u"lipiec", u"sierpień", u"wrzesień", u"październik", u"listopad", u"grudzień"] ), 'pt' : lambda v: slh( v, [u"Janeiro", u"Fevereiro", u"Março", u"Abril", u"Maio", u"Junho", u"Julho", u"Agosto", u"Setembro", u"Outubro", u"Novembro", u"Dezembro"] ), 'ro' : lambda v: slh( v, [u"ianuarie", u"februarie", u"martie", u"aprilie", u"mai", u"iunie", u"iulie", u"august", u"septembrie", u"octombrie", u"noiembrie", u"decembrie"] ), 'ru' : lambda v: slh( v, [u"январь", u"февраль", u"март", u"апрель", u"май", u"июнь", u"июль", u"август", u"сентябрь", u"октябрь", u"ноябрь", u"декабрь"] ), 'sc' : lambda v: slh( v, [u"Ghennarzu", u"Frearzu", u"Martzu", u"Abrile", u"Maju", u"Làmpadas", u"Triulas", u"Aùstu", u"Cabudanni", u"Santugaìne", u"Santadria", u"Nadale"] ), 'scn': lambda v: slh( v, [u"jinnaru", u"frivaru", u"marzu", u"aprili", u"maiu", u"giugnu", u"giugnettu", u"austu", u"sittèmmiru", u"uttùviru", u"nuvèmmiru", u"dicèmmiru"] ), 'sco': lambda v: slh( v, [u"Januar", u"Februar", u"Mairch", u"Aprile", u"Mey", u"Juin", u"Julie", u"August", u"September", u"October", u"November", u"December"] ), 'se' : lambda v: slh( v, [u"ođđajagimánnu", u"guovvamánnu", u"njukčamánnu", u"cuoŋománnu", u"miessemánnu", u"geassemánnu", u"suoidnemánnu", u"borgemánnu", u"čakčamánnu", u"golggotmánnu", u"skábmamánnu", u"juovlamánnu"] ), 'simple': lambda v: slh( v, [u"January", u"February", u"March", u"April", u"May", u"June", u"July", u"August", u"September", u"October", u"November", u"December"] ), 'sk' : lambda v: slh( v, [u"január", u"február", u"marec", u"apríl", u"máj", u"jún", u"júl", u"august", u"september", u"október", u"november", u"december"] ), 'sl' : lambda v: slh( v, [u"januar", u"februar", u"marec", u"april", u"maj", u"junij", u"julij", u"avgust", u"september", u"oktober", u"november", u"december"] ), 'sq' : lambda v: slh( v, [u"Janari", u"Shkurti", u"Marsi (muaj)", u"Prilli", u"Maji", u"Qershori", u"Korriku", u"Gushti", u"Shtatori", u"Tetori", u"Nëntori", u"Dhjetori"] ), 'sr' : lambda v: slh( v, [u"јануар", u"фебруар", u"март", u"април", u"мај", u"јун", u"јул", u"август", u"септембар", u"октобар", u"новембар", u"децембар"] ), 'su' : lambda v: slh( v, [u"Januari", u"Pébruari", u"Maret", u"April", u"Méi", u"Juni", u"Juli", u"Agustus", u"Séptémber", u"Oktober", u"Nopémber", u"Désémber"] ), 'sv' : lambda v: slh( v, [u"januari", u"februari", u"mars", u"april", u"maj", u"juni", u"juli", u"augusti", u"september", u"oktober", u"november", u"december"] ), 'te' : lambda v: slh( v, [u"జనవరి", u"ఫిబ్రవరి", u"మార్చి", u"ఏప్రిల్", u"మే", u"జూన్", u"జూలై", u"ఆగష్టు", u"సెప్టెంబర్", u"అక్టోబర్", u"నవంబర్", u"డిసెంబర్"] ), 'th' : lambda v: slh( v, [u"มกราคม", u"กุมภาพันธ์", u"มีนาคม", u"เมษายน", u"พฤษภาคม", u"มิถุนายน", u"กรกฎาคม", u"สิงหาคม", u"กันยายน", u"ตุลาคม", u"พฤศจิกายน", u"ธันวาคม"] ), 'tl' : lambda v: slh( v, [u"Enero", u"Pebrero", u"Marso", u"Abril", u"Mayo", u"Hunyo", u"Hulyo", u"Agosto", u"Setyembre", u"Oktubre", u"Nobyembre", u"Disyembre"] ), 'tpi': lambda v: slh( v, [u"Janueri", u"Februeri", u"Mas", u"Epril", u"Me", u"Jun", u"Julai", u"Ogas", u"Septemba", u"Oktoba", u"Novemba", u"Disemba"] ), 'tr' : lambda v: slh( v, [u"Ocak", u"Şubat", u"Mart", u"Nisan", u"Mayıs", u"Haziran", u"Temmuz", u"Ağustos", u"Eylül", u"Ekim", u"Kasım", u"Aralık"] ), 'tt' : lambda v: slh( v, [u"Ğínwar", u"Febräl", u"Mart", u"Äpril", u"May", u"Yün", u"Yül", u"August", u"Sentäber", u"Öktäber", u"Nöyäber", u"Dekäber"] ), 'uk' : lambda v: slh( v, [u"січень", u"лютий", u"березень", u"квітень", u"травень", u"червень", u"липень", u"серпень", u"вересень", u"жовтень", u"листопад", u"грудень"] ), 'ur' : lambda v: slh( v, [u"جنوری", u"فروری", u"مارچ", u"اپريل", u"مئ", u"جون", u"جولائ", u"اگست", u"ستمبر", u"اکتوبر", u"نومبر", u"دسمبر"] ), 'vi' : lambda v: slh( v, [u"tháng một", u"tháng hai", u"tháng ba", u"tháng tư", u"tháng năm", u"tháng sáu", u"tháng bảy", u"tháng tám", u"tháng chín", u"tháng mười", u"tháng mười một", u"tháng 12"] ), 'vo' : lambda v: slh( v, [u"Yanul", u"Febul", u"Mäzul", u"Prilul", u"Mayul", u"Yunul", u"Yulul", u"Gustul", u"Setul", u"Tobul", u"Novul", u"Dekul"] ), 'wa' : lambda v: slh( v, [u"djanvî", u"fevrî", u"måss", u"avri", u"may", u"djun", u"djulete", u"awousse", u"setimbe", u"octôbe", u"nôvimbe", u"decimbe"] ), 'zh' : lambda v: slh( v, makeMonthList( u"%d月" )), 'zh-min-nan': lambda v: slh( v, [u"It-goe̍h", u"Jī-goe̍h", u"Saⁿ-goe̍h", u"Sì-goe̍h", u"Gō·-goe̍h", u"La̍k-goe̍h", u"Chhit-goe̍h", u"Peh-goe̍h", u"Káu-goe̍h", u"Cha̍p-goe̍h", u"Cha̍p-it-goe̍h", u"Cha̍p-jī-goe̍h"] ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'simple' : lambda v: dh_noConv( v, u'%dth century' ),
|
'simple' : lambda v: multi( v, [ (lambda x: dh_noConv( x, u'%dst century' ), lambda x: x == 1 or (x > 20 and x%10 == 1)), (lambda x: dh_noConv( x, u'%dnd century' ), lambda x: x == 2 or (x > 20 and x%10 == 2)), (lambda x: dh_noConv( x, u'%drd century' ), lambda x: x == 3 or (x > 20 and x%10 == 3)), (lambda x: dh_noConv( x, u'%dth century' ), lambda x: True)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
addFmt( yrMnthFmts, 'af', True, makeMonthNamedList( 'af', u"%s %%d", True )) addFmt( yrMnthFmts, 'ang',True, [ None, None, None, None, None, None, None, None, None, None, None, u"Gēolmōnaþ %d" ]) addFmt( yrMnthFmts, 'de', True, makeMonthNamedList( 'de', u"%s %%d", True )) addFmt( yrMnthFmts, 'el', True, makeMonthNamedList( 'el', u"%s %%d", True )) addFmt( yrMnthFmts, 'en', True, makeMonthNamedList( 'en', u"%s %%d", True )) addFmt( yrMnthFmts, 'es', True, makeMonthNamedList( 'es', u"%s de %%d", True )) addFmt( yrMnthFmts, 'et', True, makeMonthNamedList( 'et', u"%s %%d", True )) addFmt( yrMnthFmts, 'fi', True, [ None, None, None, None, None, u"Huhtikuu %d", None, None, None, None, None, None ]) addFmt( yrMnthFmts, 'fr', True, [ u"Janvier %d", u"Février %d", u"Mars %d", u"Avril %d", u"Mai %d", u"Juin %d", u"Juillet %d", u"Août %d", u"Septembre %d", u"Octobre %d", u"Novembre %d", u"Décembre %d" ]) addFmt( yrMnthFmts, 'it', True, makeMonthNamedList( 'it', u"Attualità/Anno %%d - %s", True )) addFmt( yrMnthFmts, 'ja', True, [ u"「最近の出来事」%%d年%d月" % mm for mm in range(1,13)]) addFmt( yrMnthFmts, 'ka', True, makeMonthNamedList( 'ka', u"%s, %%d" )) addFmt( yrMnthFmts, 'ko', True, [ u"%d년 1월", u"%d년 2월", u"%d년 3월", u"%d년 4월", u"%d년 5월", u"%d년 6월", u"%d년 7월", u"%d년 8월", u"%d년 9월", u"%d년 10월", u"%d년 11월", u"%d년 12월" ]) addFmt( yrMnthFmts, 'nl', True, [ u"Januari %d", u"Februari %d", u"Maart %d", u"April %d", u"Mei %d", u"Juni %d", u"Juli %d", u"Augustus %d", u"September %d", u"Oktober %d", u"November %d", u"December %d" ]) addFmt( yrMnthFmts, 'pl', True, makeMonthNamedList( 'pl', u"%s %%d", True )) addFmt( yrMnthFmts, 'scn',True, [ None, None, u"Marzu %d", None, None, None, None, None, None, None, None, None ]) addFmt( yrMnthFmts, 'simple', True, makeMonthNamedList( 'simple', u"%s %%d", True )) addFmt( yrMnthFmts, 'sv', True, makeMonthNamedList( 'sv', u"%s %%d", True )) addFmt( yrMnthFmts, 'tt', True, makeMonthNamedList( 'tt', u"%s, %%d", True )) addFmt( yrMnthFmts, 'ur', True, [ u"%d01مبم", u"%d02مبم", u"%d03مبم", u"%d04مبم", u"%d05مبم", u"%d06مبم", u"%d07مبم", u"%d08مبم", u"%d09مبم", u"%d10مبم", u"%d11مبم", u"%d12مبم" ]) addFmt( yrMnthFmts, 'vi', True, makeMonthList( u"Tháng %d năm %%d" )) addFmt( yrMnthFmts, 'zh', True, makeMonthList( u"%%d年%d月" )) addFmt( yrMnthFmts, 'zh-min-nan',True, makeMonthList( u"%%d nî %d goe̍h" )) def getDictionaryYear( lang, title ):
|
addFmt( yrMnthFmts, 'af', True, makeMonthNamedList( 'af', u"%s %%d", True )) addFmt( yrMnthFmts, 'ang',True, [ None, None, None, None, None, None, None, None, None, None, None, u"Gēolmōnaþ %d" ]) addFmt( yrMnthFmts, 'de', True, makeMonthNamedList( 'de', u"%s %%d", True )) addFmt( yrMnthFmts, 'el', True, makeMonthNamedList( 'el', u"%s %%d", True )) addFmt( yrMnthFmts, 'en', True, makeMonthNamedList( 'en', u"%s %%d", True )) addFmt( yrMnthFmts, 'es', True, makeMonthNamedList( 'es', u"%s de %%d", True )) addFmt( yrMnthFmts, 'et', True, makeMonthNamedList( 'et', u"%s %%d", True )) addFmt( yrMnthFmts, 'fi', True, [ None, None, None, None, None, u"Huhtikuu %d", None, None, None, None, None, None ]) addFmt( yrMnthFmts, 'fr', True, [ u"Janvier %d", u"Février %d", u"Mars %d", u"Avril %d", u"Mai %d", u"Juin %d", u"Juillet %d", u"Août %d", u"Septembre %d", u"Octobre %d", u"Novembre %d", u"Décembre %d" ]) addFmt( yrMnthFmts, 'it', True, makeMonthNamedList( 'it', u"Attualità/Anno %%d - %s", True )) addFmt( yrMnthFmts, 'ja', True, [ u"「最近の出来事」%%d年%d月" % mm for mm in range(1,13)]) addFmt( yrMnthFmts, 'ka', True, makeMonthNamedList( 'ka', u"%s, %%d" )) addFmt( yrMnthFmts, 'ko', True, [ u"%d년 1월", u"%d년 2월", u"%d년 3월", u"%d년 4월", u"%d년 5월", u"%d년 6월", u"%d년 7월", u"%d년 8월", u"%d년 9월", u"%d년 10월", u"%d년 11월", u"%d년 12월" ]) addFmt( yrMnthFmts, 'nl', True, [ u"Januari %d", u"Februari %d", u"Maart %d", u"April %d", u"Mei %d", u"Juni %d", u"Juli %d", u"Augustus %d", u"September %d", u"Oktober %d", u"November %d", u"December %d" ]) addFmt( yrMnthFmts, 'pl', True, makeMonthNamedList( 'pl', u"%s %%d", True )) addFmt( yrMnthFmts, 'scn',True, [ None, None, u"Marzu %d", None, None, None, None, None, None, None, None, None ]) addFmt( yrMnthFmts, 'simple', True, makeMonthNamedList( 'simple', u"%s %%d", True )) addFmt( yrMnthFmts, 'sv', True, makeMonthNamedList( 'sv', u"%s %%d", True )) addFmt( yrMnthFmts, 'tt', True, makeMonthNamedList( 'tt', u"%s, %%d", True )) addFmt( yrMnthFmts, 'ur', True, [ u"%d01مبم", u"%d02مبم", u"%d03مبم", u"%d04مبم", u"%d05مبم", u"%d06مبم", u"%d07مبم", u"%d08مبم", u"%d09مبم", u"%d10مبم", u"%d11مبم", u"%d12مبم" ]) addFmt( yrMnthFmts, 'vi', True, makeMonthList( u"Tháng %d năm %%d" )) addFmt( yrMnthFmts, 'zh', True, makeMonthList( u"%%d年%d月" )) addFmt( yrMnthFmts, 'zh-min-nan',True, makeMonthList( u"%%d nî %d goe̍h" )) def getDictionaryYear( lang, title, ignoreFirstLetterCase = True ):
|
def makeMonthNamedList( lang, pattern, makeUpperCase = None ): """Creates a list of 12 elements based on the name of the month. The language-dependent month name is used as a formating argument to the pattern. The pattern must be have one %s that will be replaced by the localized month name. Use %%d for any other parameters that should be preserved. """ if makeUpperCase == None: f = lambda s: s elif makeUpperCase == True: f = lambda s: s[0].upper() + s[1:] elif makeUpperCase == False: f = lambda s: s[0].lower() + s[1:] return [ pattern % f(monthName(lang, m)) for m in range(1,13) ]
|
import wikipedia
|
def printMonthArray( lang, pattern, capitalize ): """ """ import wikipedia for s in makeMonthNamedList( lang, pattern, capitalize ): wikipedia.output( s )
|
|
import wikipedia
|
start,end,step = formatLimits[m] if showAll: wikipedia.output(u"Limits: from %d to %d, with step %d" % (start,stop,step))
|
def testMapEntry( showAll, m, year, testYear ): """This is a test function, to be used interactivelly to test the validity of the above maps. To test, run this function with the map name, year to be tested, and the final year expected. Usage example: run python interpreter >>> import date >>> date.testMapEntry( 'DecadeAD', 1992, 1990 ) >>> date.testMapEntry( 'CenturyAD', 20, 20 ) """ import wikipedia for code, value in formats[m].iteritems(): if showAll: wikipedia.output(u"%s[%s](%d)" % (m, code, year)) wikipedia.output(u" -> '%s' -> %d" % (value(year), value(value(year)))) if value(value(year)) != testYear: raise ValueError("%s[%s](%d) != %d: assert failed, years didn't match" % (m,code,year,testYear))
|
+ "<a href=.+? class=\"new\" title=.+?>.+?<\/a><\/p>\n"
|
+ "<a href=.+? title=.+?>.+?<\/a><\/p>\n"
|
def refresh_messages(): host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.allmessages_address(wikipedia.mylang) print 'Retrieving MediaWiki messages...' allmessages, charset = wikipedia.getUrl(host,url) #f=open('/home/daniel/allmessages.html', 'r') #allmessages = f.read() # First group is MediaWiki key string. Second group is the current value string. itemR = re.compile("<tr bgcolor=\"#F0F0FF\">\n" + "<td>\n" + "<p><a href=\"\/wiki/MediaWiki:.+?\" title=\"MediaWiki:.+?\">(.+?)<\/a><br \/>\n" + "<a href=.+? class=\"new\" title=.+?>.+?<\/a><\/p>\n" + "</td>\n" + "<td>\n" + "<p>.+?</p>\n" + "</td>\n" + "<td>\n" + "<p>(.+?)</p>\n" + "<\/td>\n" + "<\/tr>", re.DOTALL) items = itemR.findall(allmessages) # we will save the found key:value pairs here dictionary = {} for item in items: # Key strings only contain ASCII characters, so we can use them as dictionary keys dictionary[item[0]] = unicode(item[1], wikipedia.code2encoding(wikipedia.mylang)) # Save the dictionary to disk # TODO: Put them into another directory f = open('mediawiki-messages-%s.dat' % wikipedia.mylang, 'w') pickle.dump(dictionary, f) f.close()
|
x = x.encode(encoding) return x
|
return x.decode(encoding)
|
def url2unicode(percentname, site): # Does the input string contain non-ascii characters? In that case, # it is not really an url, and we do not have to unquote it.... for c in percentname: if ord(c)>128: x=percentname break else: # Before removing the % encoding, make sure it is an ASCII string. # unquote doesn't work on unicode strings. x=urllib.unquote(str(percentname)) #print "DBG> ",language,repr(percentname),repr(x) for encoding in site.encodings(): try: x = x.encode(encoding) return x except: pass raise UnicodeError("Could not decode %s" % repr(percentname))
|
entryR = re.compile('<li>(?P<date>.+?) <a href=".+?" title="(?P<title>.+?)">.+?</a> \((?P<length>\d+)(.+?)\) \. \. (?P<loggedin><a href=".+?" title=".+?">)?(?P<username>.+?)(</a>)?( <em>\((?P<comment>.+?)\)</em>)?</li>')
|
entryR = re.compile('<li[^>]*>(?P<date>.+?) \S*?<a href=".+?" title="(?P<title>.+?)">.+?</a>.+?\((?P<length>\d+)(.+?)\) \. \. (.*?)(?P<loggedin><a href=".+?" title="(?P<username>.+?)">)')
|
def newpages(self, number = 10, repeat = False): """Generator which yields new articles subsequently. It starts with the article created 'number' articles ago (first argument). When these are all yielded it fetches NewPages again. If there is no new page, it blocks until there is one, sleeping between subsequent fetches of NewPages.
|
comment = m.group('comment')
|
def newpages(self, number = 10, repeat = False): """Generator which yields new articles subsequently. It starts with the article created 'number' articles ago (first argument). When these are all yielded it fetches NewPages again. If there is no new page, it blocks until there is one, sleeping between subsequent fetches of NewPages.
|
|
yield page, date, length, loggedIn, username, comment
|
yield page, date, length, loggedIn, username, None
|
def newpages(self, number = 10, repeat = False): """Generator which yields new articles subsequently. It starts with the article created 'number' articles ago (first argument). When these are all yielded it fetches NewPages again. If there is no new page, it blocks until there is one, sleeping between subsequent fetches of NewPages.
|
defdis = self.site().family.disambig( "_default" )
|
def isDisambig(self): if not hasattr(self, '_isDisambig'): defdis = self.site().family.disambig( "_default" ) locdis = self.site().family.disambig( self._site.lang )
|
|
if tn in defdis or tn in locdis:
|
if tn in locdis:
|
def isDisambig(self): if not hasattr(self, '_isDisambig'): defdis = self.site().family.disambig( "_default" ) locdis = self.site().family.disambig( self._site.lang )
|
wikipedia.setAction(wikipedia.translate(wikipedia.getSite(),msg_change) % oldCat.title())
|
wikipedia.setAction(wikipedia.translate(wikipedia.getSite(),msg_change) % self.oldCat.title())
|
def __init__(self, oldCatTitle, newCatTitle): self.oldCat = catlib.Category(wikipedia.getSite(), 'Category:' + oldCatTitle) self.newCatTitle = newCatTitle # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(),msg_change) % oldCat.title())
|
'nds': [u'Begreepkloren'],
|
'nds': [u'Mehrdüdig Begreep'],
|
def __init__(self): family.Family.__init__(self) self.name = 'wikipedia'
|
output(u"ERROR> link from %s to %s:%s has leading colon?!" % (self, newsite, newname))
|
output(u"ERROR> link from %s to %s:%s has leading colon?!" % (self.linkname(), newsite, newname))
|
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
|
output(u"ERROR> link from %s to %s:%s has leading space?!" % (self, newsite, newname))
|
output(u"ERROR> link from %s to %s:%s has leading space?!" % (self.linkname(), newsite, newname))
|
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
|
output(u"ERROR> link from %s to %s:%s is invalid encoding?!" % (self, newsite, newname))
|
output(u"ERROR> link from %s to %s:%s is invalid encoding?!" % (self.linkname(), newsite, newname))
|
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
|
output(u"ERROR> link from %s to %s:%s contains invalid character?!" % (self, newsite, newname))
|
output(u"ERROR> link from %s to %s:%s contains invalid character?!" % (self.linkname(), newsite, newname))
|
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
|
output(u"ERROR> link from %s to %s:%s contains invalid unicode reference?!" % (self, newsite, newname))
|
output(u"ERROR> link from %s to %s:%s contains invalid unicode reference?!" % (self.linkname(), newsite, newname))
|
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
|
templateR=re.compile(r'\{\{([mM][sS][gG]:)?[' + self.old[0].upper() + self.old[0].lower() + ']' + self.old[1:] + '(?P<sortkey>\|[^}]+)?}}')
|
templateR=re.compile(r'\{\{([mM][sS][gG]:)?[' + self.old[0].upper() + self.old[0].lower() + ']' + self.old[1:] + '(?P<sortkey>\|[^}]+|)}}')
|
def run(self): # regular expression to find the original template. # {{msg:vfd}} does the same thing as {{msg:Vfd}}, so both will be found. # The new syntax, {{vfd}}, will also be found. templateR=re.compile(r'\{\{([mM][sS][gG]:)?[' + self.old[0].upper() + self.old[0].lower() + ']' + self.old[1:] + '(?P<sortkey>\|[^}]+)?}}') replacements = {} if self.remove: replacements[templateR] = '' elif self.resolve: replacements[templateR] = '{{subst:' + self.old + '}}' elif self.oldFormat: replacements[templateR] = '{{msg:' + self.new + '\g<sortkey>}}' else: replacements[templateR] = '{{' + self.new + '\g<sortkey>}}' replaceBot = replace.ReplaceRobot(self.generator, replacements, regex = True) replaceBot.run()
|
print "DBG> addEntity:", repr(name), repr(result)
|
def addEntity(name): """Convert a unicode name into ascii name with entities""" import htmlentitydefs result = '' for c in name: if ord(c) < 128: result += str(c) else: for k, v in htmlentitydefs.entitydefs.iteritems(): if (len(v) == 1 and ord(c) == ord(v)) or v == '&#%d;'%ord(c): result += '&%s;' % k break else: raise NoSuchEntity("Cannot locate entity for character %s"%repr(c)) print "DBG> addEntity:", repr(name), repr(result) return result
|
|
wikipedia.action += wrd
|
wikipedia.setAction('Robot-assisted disambiguation '+wrd)
|
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
|
print "== %s =="%(refpl),m.start(),m.end()
|
print "== %s =="%(refpl)
|
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
|
'ia' : lambda v: dh_dayOfMnth( v, u'%d de decembre' ),
|
'ia' : lambda v: dh_dayOfMnth( v, u'%d de Decembre' ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
self.loggedin = 'Userlogin' not in txt
|
self._loggedin = 'Userlogin' not in txt
|
def loggedin(self, check = False): if not hasattr(self,'_loggedin'): self._fill() if check: txt = getPage(self, 'Non-existing page', get_edit_page = False) self.loggedin = 'Userlogin' not in txt return self._loggedin
|
if not globalvar.autonomous:
|
if globalvar.autonomous: if self.inpl.isDisambig() and not pl.isDisambig(): wikipedia.output(u"NOTE: Ignoring link from disambiguation page %s to non-disambiguation %s" % (self.inpl.aslink(forceInterwiki = True), pl.aslink(forceInterwiki = True))) del self.done[pl] elif not self.inpl.isDisambig() and pl.isDisambig(): wikipedia.output(u"NOTE: Ignoring link from non-disambiguation page %s to disambiguation %s" % (self.inpl.aslink(forceInterwiki = True), pl.aslink(forceInterwiki = True))) del self.done[pl] else:
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site()
|
firstLabel = '"%s:%s"' % (inpl.site().language(), wikipedia.unicode2html(inpl.title(), 'ascii'))
|
firstLabel = '"%s:%s"' % (self.inpl.site().language(), wikipedia.unicode2html(inpl.title(), 'ascii'))
|
def createGraph(self): import pydot # create empty graph graph = pydot.Dot() graph.add_node(pydot.Node('start', shape = 'point')) for page in self.foundin.iterkeys(): # a node for each found page node = pydot.Node('"%s:%s"' % (page.site().language(), wikipedia.unicode2html(page.title(), 'ascii')), shape = 'rectangle') if not page.exists(): node.set_style('filled') node.set_fillcolor('red') elif page.isRedirectPage(): node.set_style('filled') node.set_fillcolor('blue') elif page.isDisambig(): node.set_style('filled') node.set_fillcolor('orange') graph.add_node(node) # mark start node firstLabel = '"%s:%s"' % (inpl.site().language(), wikipedia.unicode2html(inpl.title(), 'ascii')) graph.add_edge(pydot.Edge('start', firstLabel)) for page, referrers in self.foundin.iteritems(): for refPage in referrers: sourceLabel = '"%s:%s"' % (refPage.site().language(), wikipedia.unicode2html(refPage.title(), 'ascii')) targetLabel = '"%s:%s"' % (page.site().language(), wikipedia.unicode2html(page.title(), 'ascii')) edge = pydot.Edge(sourceLabel, targetLabel) oppositeEdge = graph.get_edge(targetLabel, sourceLabel) if oppositeEdge: oppositeEdge.set_arrowtail('normal') else: # add edge if refPage.site() == page.site(): edge.set_color('blue') elif not page.exists(): # mark dead links edge.set_color('red') elif refPage.isDisambig() != page.isDisambig(): # mark links between disambiguation and non-disambiguation # pages edge.set_color('orange') graph.add_edge(edge) filename = 'interwiki-graphs/%s-%s-%s.png' % (inpl.site().family.name, inpl.site().language(), inpl.urlname()) if graph.write(filename, prog = 'dot', format = 'png'): wikipedia.output(u'Graph saved as %s' % filename) else: wikipedia.output(u'Graph could not be saved as %s' % filename)
|
result[k] = v[answer-1] break
|
try: result[k] = v[answer-1] except IndexError: pass else: break
|
def assemble(self): # No errors have been seen so far nerr = 0 # Build up a dictionary of all links found, with the code as key. # Each value will be a list. new = {} for pl in self.done.keys(): code = pl.code() if code == wikipedia.mylang and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty(): if pl != self.inpl: self.problem("Found link to %s"%pl.aslink()) self.whereReport(pl) nerr += 1 elif pl.exists() and not pl.isRedirectPage(): if code in new: new[code].append(pl) else: new[code] = [pl] # Clean up the Chinese links if 'zh-cn' in new or 'zh-tw' in new: if 'zh' in new: del new['zh'] print "Ignoring links to zh in presence of zh-cn or zh-tw" # Remove Chinese internal links if wikipedia.mylang=='zh' or wikipedia.mylang=='zh-cn': if 'zh-tw' in new: if len(new['zh-tw']) > 1: nerr +=1 self.problem("Found more than one link for traditional Chinese") del new['zh-tw'] if wikipedia.mylang=='zh' or wikipedia.mylang=='zh-tw': if 'zh-cn' in new: if len(new['zh-cn']) > 1: nerr +=1 self.problem("Found more than one link for simplified Chinese") del new['zh-cn'] # See if new{} contains any problematic values result = {} for k, v in new.items(): if len(v) > 1: nerr += 1 self.problem("Found more than one link for %s"%k) # If there are any errors, we need to go through all # items manually. if nerr > 0: # First loop over the ones that have more solutions for k,v in new.items(): if len(v) > 1: print "="*30 print "Links to %s"%k i = 0 for pl2 in v: i += 1 wikipedia.output(u" (%d) Found link to %s in:"%(i,pl2.aslink())) self.whereReport(pl2, indent=8) if not globalvar.autonomous: while 1: answer = raw_input("Which variant should be used [number, (n)one, (g)ive up] :") if answer: if answer in 'gG': return None elif answer in 'nN': # None acceptable break elif answer[0] in '0123456789': answer = int(answer) result[k] = v[answer-1] break # We don't need to continue with the rest if we're in autonomous # mode. if globalvar.autonomous: return None # Loop over the ones that have one solution, so are in principle # not a problem. acceptall = False for k,v in new.items(): if len(v) == 1: print "="*30 pl2 = v[0] wikipedia.output(u"Found link to %s in:" % pl2.aslink()) self.whereReport(pl2, indent=4) while 1: if acceptall: answer = 'a' else: answer = raw_input("What should be done [(a)ccept, (r)eject, (g)ive up, accept a(l)l] :") if not answer: answer = 'a' if answer in 'lL': # accept all acceptall = True answer = 'a' if answer in 'aA': # accept this one result[k] = v[0] break elif answer in 'gG': # give up return None elif answer in 'rR': # reject # None acceptable break else: # nerr <= 0, hence there are no lists longer than one. for k,v in new.items(): result[k] = v[0] return result
|
pl = wikipedia.PageLink(wikipedia.getSit(), refpage)
|
pl = wikipedia.PageLink(wikipedia.getSite(), refpage)
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.PageLink(wikipedia.getSite(), listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.PageLink(wikipedia.getSit(), refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_add) % newcat) cat_namespace = wikipedia.getSite().category_namespaces()[0] answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.getSite(), nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.getSite(),arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories:") for curpl in cats: wikipedia.output(u"* %s" % cat.aslink()) catpl = wikipedia.PageLink(wikipedia.getSite(), cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[0]) + ",".join([x.lang for x in adding])
|
s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[0]) + " " + ", ".join([x.lang for x in adding])
|
def compareLanguages(old, new): removing = [] adding = [] modifying = [] mysite = wikipedia.getSite() for site in old.keys(): if site not in new: removing.append(site) elif old[site] != new[site]: modifying.append(site) for site2 in new.keys(): if site2 not in old: adding.append(site2) s = "" if adding: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[0]) + ",".join([x.lang for x in adding]) if removing: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[1]) + ",".join([x.lang for x in removing]) if modifying: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[2]) + ",".join([x.lang for x in modifying]) return s,removing
|
s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[1]) + ",".join([x.lang for x in removing])
|
s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[1]) + " " + ", ".join([x.lang for x in removing])
|
def compareLanguages(old, new): removing = [] adding = [] modifying = [] mysite = wikipedia.getSite() for site in old.keys(): if site not in new: removing.append(site) elif old[site] != new[site]: modifying.append(site) for site2 in new.keys(): if site2 not in old: adding.append(site2) s = "" if adding: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[0]) + ",".join([x.lang for x in adding]) if removing: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[1]) + ",".join([x.lang for x in removing]) if modifying: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[2]) + ",".join([x.lang for x in modifying]) return s,removing
|
s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[2]) + ",".join([x.lang for x in modifying])
|
s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[2]) + " " + ", ".join([x.lang for x in modifying])
|
def compareLanguages(old, new): removing = [] adding = [] modifying = [] mysite = wikipedia.getSite() for site in old.keys(): if site not in new: removing.append(site) elif old[site] != new[site]: modifying.append(site) for site2 in new.keys(): if site2 not in old: adding.append(site2) s = "" if adding: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[0]) + ",".join([x.lang for x in adding]) if removing: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[1]) + ",".join([x.lang for x in removing]) if modifying: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[2]) + ",".join([x.lang for x in modifying]) return s,removing
|
i = 0 for page in self.pageGenerator: if page not in globalvar.skip: i += 1
|
for i in range(number): try: page = self.pageGenerator.next() while page in globalvar.skip: page = self.pageGenerator.next()
|
def generateMore(self, number): """Generate more subjects. This is called internally when the list of subjects becomes to small, but only if there is a PageGenerator""" fs = self.firstSubject() if fs: wikipedia.output(u"NOTE: The first unfinished subject is " + fs.pl().aslink()) print "NOTE: Number of pages queued is %d, trying to add %d more."%(len(self.subjects), number) i = 0 for page in self.pageGenerator: if page not in globalvar.skip: i += 1 self.add(page, hints = hints) if i >= number: return # nothing more to generate self.pageGenerator = None
|
if i >= number: return self.pageGenerator = None
|
except StopIteration: self.pageGenerator = None break
|
def generateMore(self, number): """Generate more subjects. This is called internally when the list of subjects becomes to small, but only if there is a PageGenerator""" fs = self.firstSubject() if fs: wikipedia.output(u"NOTE: The first unfinished subject is " + fs.pl().aslink()) print "NOTE: Number of pages queued is %d, trying to add %d more."%(len(self.subjects), number) i = 0 for page in self.pageGenerator: if page not in globalvar.skip: i += 1 self.add(page, hints = hints) if i >= number: return # nothing more to generate self.pageGenerator = None
|
bot.setPageGenerator(hintlessPageGen)
|
bot.setPageGenerator(iter(hintlessPageGen))
|
def readWarnfile(filename, sa): import warnfile reader = warnfile.WarnfileReader(filename) # we won't use removeHints (hints, removeHints) = reader.getHints() pages = hints.keys() for page in pages: # The WarnfileReader gives us a list of pagelinks, but titletranslate.py expects a list of strings, so we convert it back. # TODO: This is a quite ugly hack, in the future we should maybe make titletranslate expect a list of pagelinks. hintStrings = ['%s:%s' % (hintedPage.site().language(), hintedPage.title()) for hintedPage in hints[page]] sa.add(page, hints = hintStrings)
|
ns = []
|
namespaces = []
|
def category_namespaces(code): ns = [] if not category.has_key(code): #print "DBG> No category namespace known for %s" % code import mediawiki_messages ns.append(mediawiki_messages.get('nstab-category', lang = code)) else: ns.append(category[code]) ns.append(category[code].lower()) if category[code] != category['en']: ns.append(category['en']) ns.append(category['en'].lower()) return ns
|
ns.append(mediawiki_messages.get('nstab-category', lang = code))
|
namespace_title = mediawiki_messages.get('nstab-category', lang = code)
|
def category_namespaces(code): ns = [] if not category.has_key(code): #print "DBG> No category namespace known for %s" % code import mediawiki_messages ns.append(mediawiki_messages.get('nstab-category', lang = code)) else: ns.append(category[code]) ns.append(category[code].lower()) if category[code] != category['en']: ns.append(category['en']) ns.append(category['en'].lower()) return ns
|
ns.append(category[code]) ns.append(category[code].lower()) if category[code] != category['en']: ns.append(category['en']) ns.append(category['en'].lower()) return ns
|
namespace_title = category[code] namespaces.append(namespace_title) namespaces.append(namespace_title.lower()) if namespace_title != category['en']: namespaces.append(category['en']) namespaces.append(category['en'].lower()) return namespaces
|
def category_namespaces(code): ns = [] if not category.has_key(code): #print "DBG> No category namespace known for %s" % code import mediawiki_messages ns.append(mediawiki_messages.get('nstab-category', lang = code)) else: ns.append(category[code]) ns.append(category[code].lower()) if category[code] != category['en']: ns.append(category['en']) ns.append(category['en'].lower()) return ns
|
ns = wikipedia.family.category_namespaces(wikipedia.mylang) cat_namespace = ns[0].encode(wikipedia.myencoding())
|
cat_namespace = wikipedia.family.category_namespace(wikipedia.mylang)
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.PageLink(wikipedia.mylang, listpage) except NoPage: print 'The page ' + listpage + ' could not be loaded from the server.' sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg_change) % newcat) ns = wikipedia.family.category_namespaces(wikipedia.mylang) cat_namespace = ns[0].encode(wikipedia.myencoding()) answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: print "%s doesn't exist yet. Ignoring."%(pl2.aslocallink()) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) print "WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink()) else: print "Current categories: ",cats catpl = wikipedia.PageLink(wikipedia.mylang, cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: print "%s already has %s"%(pl2.aslocallink(),catpl.aslocallink()) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.