rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
print "Logging in to ",wikipedia.family.hostname(wikipedia.mylang)
|
wikipedia.output(u"Logging in to %s" % wikipedia.family.hostname(wikipedia.mylang))
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
username=raw_input('username: ') password=getpass.getpass('password: ')
|
username = wikipedia.input(u'username:', encode = True) password = getpass.getpass(u'password: ') password = unicode(password, config.console_encoding) password = password.encode(wikipedia.myencoding())
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
pass from_pl = ''
|
print "Can't work on redirect page." return from_pl = ""
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass
|
if from_pl == "": print "Interwiki link to " + from_lang + " not found." return
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname())
|
wikipedia.setAction(msg[msglang] + from_lang + ":" + from_pl.linkname())
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
table = translate(table)
|
if not table: print "No table found in %s." % from_lang + ":" + from_pl.linkname() return table = translate(table, type) if not table: print "Could not translate table." return
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
to_text = table + '\n\n' + to_text
|
to_text = table + "\n\n" + to_text
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
print 'Changing page %s' % (to_pl)
|
print "Changing page %s" % (to_pl)
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
startR = re.compile(r'<table|\{\|')
|
startR = re.compile(r"<table|\{\|")
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
endR = re.compile(r'</table>|\|\}')
|
endR = re.compile(r"</table>|\|\}")
|
def treat(to_pl): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_pl = '' for interwiki in interwikis: if interwiki.code() == from_lang: from_pl = interwiki if from_pl == '': print 'Interwiki link to ' + from_lang + ' not found.' raise NoInterwiki('Interwiki link not found.') try: interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: pass from_text = from_pl.get() wikipedia.setAction(msg[msglang] + from_lang + ':' + from_pl.linkname()) # search start of table table = get_table(from_text) table = translate(table) # add table to top of the article, seperated by a blank lines to_text = table + '\n\n' + to_text if not debug: print 'Changing page %s' % (to_pl) to_pl.put(to_text)
|
if debug: print 'No table found' pass
|
return
|
def get_table(text): pos = 0 # find first start tag first_start_tag = re.search(startR, text) if not first_start_tag: if debug: print 'No table found' pass else: if debug: print 'First start tag found at ' + str(first_start_tag.start()) pos = first_start_tag.end() table_level = 1 remaining_text = text while table_level != 0: remaining_text = text[pos:] next_start_tag = re.search(startR, remaining_text, pos) next_end_tag = re.search(endR, remaining_text, pos) if not next_end_tag: if debug: print 'Error: missing end tag' pass if next_start_tag and next_start_tag.start() < next_end_tag.start(): if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start()) pos += next_start_tag.end() table_level += 1 if debug: print 'Table level is ' + str(table_level) else: if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start()) pos += next_end_tag.end() table_level -= 1 if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos) if debug: print text[first_start_tag.start():pos] return text[first_start_tag.start():pos]
|
if debug: print 'First start tag found at ' + str(first_start_tag.start())
|
if debug: print "First start tag found at " + str(first_start_tag.start())
|
def get_table(text): pos = 0 # find first start tag first_start_tag = re.search(startR, text) if not first_start_tag: if debug: print 'No table found' pass else: if debug: print 'First start tag found at ' + str(first_start_tag.start()) pos = first_start_tag.end() table_level = 1 remaining_text = text while table_level != 0: remaining_text = text[pos:] next_start_tag = re.search(startR, remaining_text, pos) next_end_tag = re.search(endR, remaining_text, pos) if not next_end_tag: if debug: print 'Error: missing end tag' pass if next_start_tag and next_start_tag.start() < next_end_tag.start(): if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start()) pos += next_start_tag.end() table_level += 1 if debug: print 'Table level is ' + str(table_level) else: if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start()) pos += next_end_tag.end() table_level -= 1 if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos) if debug: print text[first_start_tag.start():pos] return text[first_start_tag.start():pos]
|
if debug: print 'Error: missing end tag'
|
if debug: print "Error: missing end tag"
|
def get_table(text): pos = 0 # find first start tag first_start_tag = re.search(startR, text) if not first_start_tag: if debug: print 'No table found' pass else: if debug: print 'First start tag found at ' + str(first_start_tag.start()) pos = first_start_tag.end() table_level = 1 remaining_text = text while table_level != 0: remaining_text = text[pos:] next_start_tag = re.search(startR, remaining_text, pos) next_end_tag = re.search(endR, remaining_text, pos) if not next_end_tag: if debug: print 'Error: missing end tag' pass if next_start_tag and next_start_tag.start() < next_end_tag.start(): if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start()) pos += next_start_tag.end() table_level += 1 if debug: print 'Table level is ' + str(table_level) else: if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start()) pos += next_end_tag.end() table_level -= 1 if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos) if debug: print text[first_start_tag.start():pos] return text[first_start_tag.start():pos]
|
if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start())
|
if debug: print "Next start tag found at " + str(pos + next_start_tag.start())
|
def get_table(text): pos = 0 # find first start tag first_start_tag = re.search(startR, text) if not first_start_tag: if debug: print 'No table found' pass else: if debug: print 'First start tag found at ' + str(first_start_tag.start()) pos = first_start_tag.end() table_level = 1 remaining_text = text while table_level != 0: remaining_text = text[pos:] next_start_tag = re.search(startR, remaining_text, pos) next_end_tag = re.search(endR, remaining_text, pos) if not next_end_tag: if debug: print 'Error: missing end tag' pass if next_start_tag and next_start_tag.start() < next_end_tag.start(): if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start()) pos += next_start_tag.end() table_level += 1 if debug: print 'Table level is ' + str(table_level) else: if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start()) pos += next_end_tag.end() table_level -= 1 if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos) if debug: print text[first_start_tag.start():pos] return text[first_start_tag.start():pos]
|
if debug: print 'Table level is ' + str(table_level)
|
if debug: print "Table level is " + str(table_level)
|
def get_table(text): pos = 0 # find first start tag first_start_tag = re.search(startR, text) if not first_start_tag: if debug: print 'No table found' pass else: if debug: print 'First start tag found at ' + str(first_start_tag.start()) pos = first_start_tag.end() table_level = 1 remaining_text = text while table_level != 0: remaining_text = text[pos:] next_start_tag = re.search(startR, remaining_text, pos) next_end_tag = re.search(endR, remaining_text, pos) if not next_end_tag: if debug: print 'Error: missing end tag' pass if next_start_tag and next_start_tag.start() < next_end_tag.start(): if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start()) pos += next_start_tag.end() table_level += 1 if debug: print 'Table level is ' + str(table_level) else: if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start()) pos += next_end_tag.end() table_level -= 1 if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos) if debug: print text[first_start_tag.start():pos] return text[first_start_tag.start():pos]
|
if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start())
|
if debug: print "Next end tag found at " + str(pos + next_end_tag.start())
|
def get_table(text): pos = 0 # find first start tag first_start_tag = re.search(startR, text) if not first_start_tag: if debug: print 'No table found' pass else: if debug: print 'First start tag found at ' + str(first_start_tag.start()) pos = first_start_tag.end() table_level = 1 remaining_text = text while table_level != 0: remaining_text = text[pos:] next_start_tag = re.search(startR, remaining_text, pos) next_end_tag = re.search(endR, remaining_text, pos) if not next_end_tag: if debug: print 'Error: missing end tag' pass if next_start_tag and next_start_tag.start() < next_end_tag.start(): if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start()) pos += next_start_tag.end() table_level += 1 if debug: print 'Table level is ' + str(table_level) else: if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start()) pos += next_end_tag.end() table_level -= 1 if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos) if debug: print text[first_start_tag.start():pos] return text[first_start_tag.start():pos]
|
if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos)
|
if debug: print "Table level is " + str(table_level) if debug: print "Table starts at " + str(first_start_tag.start()) + " and ends at " + str(pos)
|
def get_table(text): pos = 0 # find first start tag first_start_tag = re.search(startR, text) if not first_start_tag: if debug: print 'No table found' pass else: if debug: print 'First start tag found at ' + str(first_start_tag.start()) pos = first_start_tag.end() table_level = 1 remaining_text = text while table_level != 0: remaining_text = text[pos:] next_start_tag = re.search(startR, remaining_text, pos) next_end_tag = re.search(endR, remaining_text, pos) if not next_end_tag: if debug: print 'Error: missing end tag' pass if next_start_tag and next_start_tag.start() < next_end_tag.start(): if debug: print 'Next start tag found at ' + str(pos + next_start_tag.start()) pos += next_start_tag.end() table_level += 1 if debug: print 'Table level is ' + str(table_level) else: if debug: print 'Next end tag found at ' + str(pos + next_end_tag.start()) pos += next_end_tag.end() table_level -= 1 if debug: print 'Table level is ' + str(table_level) if debug: print 'Table starts at ' + str(first_start_tag.start()) + ' and ends at ' + str(pos) if debug: print text[first_start_tag.start():pos] return text[first_start_tag.start():pos]
|
if self.oldCat.copyTo(newCatTitle): if batchMode == False: if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle self.oldCat.delete(reason)
|
if self.oldCat.copyAndKeep(newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle if batchMode == True: self.oldCat.delete(reason, False)
|
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat) if self.oldCat.exists(): # try to copy page contents to new cat page if self.oldCat.copyTo(newCatTitle):
|
wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
|
self.oldCat.delete(reason, True) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
|
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat) if self.oldCat.exists(): # try to copy page contents to new cat page if self.oldCat.copyTo(newCatTitle):
|
if self.cat.exists() and self.cat.isEmpty() and batchMode == False:
|
if self.cat.exists() and self.cat.isEmpty():
|
def run(self): articles = self.cat.articles(recurse = 0) if len(articles) == 0: wikipedia.output(u'There are no articles in category %s' % self.cat.title()) else: for article in articles: catlib.change_category(article, self.cat, None) # Also removes the category tag from subcategories' pages subcategories = self.cat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category %s' % self.cat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.cat.title(), None) if self.cat.exists() and self.cat.isEmpty() and batchMode == False: reason = wikipedia.translate(wikipedia.getSite(), self.deletion_reason_remove) self.cat.delete(reason)
|
self.cat.delete(reason)
|
if batchMode == True: self.cat.delete(reason, False) else: self.cat.delete(reason, True)
|
def run(self): articles = self.cat.articles(recurse = 0) if len(articles) == 0: wikipedia.output(u'There are no articles in category %s' % self.cat.title()) else: for article in articles: catlib.change_category(article, self.cat, None) # Also removes the category tag from subcategories' pages subcategories = self.cat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category %s' % self.cat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.cat.title(), None) if self.cat.exists() and self.cat.isEmpty() and batchMode == False: reason = wikipedia.translate(wikipedia.getSite(), self.deletion_reason_remove) self.cat.delete(reason)
|
+ "\s*(?:<script[^<>]+>[^<>]+<[^<>]+script>)?[^+<a href=.+?>(?P<key>.+?)<\/a><br \/>\n"
|
+ "\s*<a href=.+?>(?P<key>.+?)<\/a><br \/>\n"
|
def refresh_messages(site = None): site = site or wikipedia.getSite() # get 'all messages' special page's path path = site.allmessages_address() print 'Retrieving MediaWiki messages for %s' % repr(site) wikipedia.put_throttle() # It actually is a get, but a heavy one. allmessages = site.getUrl(path) print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. if site.version() >= "1.5": itemR = re.compile("<tr class='def' id='.*?'>\n" # first possibility: original MediaWiki message used + "\s*<td>\n" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*(?P<current>.+?)\n" # current message + "\s*</td>" + "\s*</tr>" + "|" + "<tr class='orig' id='.*?'>\n" # second possibility: custom message used + "\s*<td rowspan='2'>" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*.+?\n" # original message + "\s*</td>" + "\s*</tr><tr class='new' id='.*?'>" + "\s*<td>\n" + "\s*(?P<current2>.+?)\n" # current message + "\s*</td>" + "\s*</tr>", re.DOTALL) else: itemR = re.compile("<tr bgcolor=\"#[0-9a-f]{6}\"><td>\n" + "\s*(?:<script[^<>]+>[^<>]+<[^<>]+script>)?[^+<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>.+?<\/a>\n" + "\s*</td><td>\n" + "\s*.+?\n" + "\s*</td><td>\n" + "\s*(?P<current>.+?)\n" + "\s*<\/td><\/tr>", re.DOTALL) # we will save the found key:value pairs here dictionary = {} for match in itemR.finditer(allmessages): # Key strings only contain ASCII characters, so we can use them as dictionary keys key = match.group('key') or match.group('key2') current = match.group('current') or match.group('current2') dictionary[key] = current # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. if dictionary == {}: wikipedia.debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages ) sys.exit() else: f = open(makepath('mediawiki-messages/mediawiki-messages-%s-%s.dat' % (site.family.name, site.lang)), 'w') pickle.dump(dictionary, f) f.close() #print dictionary['addgroup'] #print dictionary['sitestatstext']
|
print "Search string is:\n",textsearch print "Replace string is:\n",textsubst
|
def parseFile(filename):
|
|
print newtext
|
def parseFile(filename):
|
|
return Page(self.site(), self.site().namespace(ns - 1) + ':' + self.titleWithoutNamespace())
|
if self.namespace() == 1: return Page(self.site(), self.titleWithoutNamespace()) else: return Page(self.site(), self.site().namespace(ns - 1) + ':' + self.titleWithoutNamespace())
|
def switchTalkPage(self): """ If self is a talk page, returns the associated content page; otherwise, returns the associated talk page. Returns None if self is a special page. """ ns = self.namespace() if ns < 0: # Special page return None if self.isTalkPage(): return Page(self.site(), self.site().namespace(ns - 1) + ':' + self.titleWithoutNamespace()) else: return Page(self.site(), self.site().namespace(ns + 1) + ':' + self.titleWithoutNamespace())
|
if self.primaryIgnoreManager.isIgnored(refpl2):
|
if not self.primaryIgnoreManager.isIgnored(refpl2):
|
def treat(self, refpl, disambPl): """ Parameters: disambPl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to disambPl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """ if self.mylang in link_trail: linktrail=link_trail[self.mylang] else: linktrail='[a-z]*' trailR=re.compile(linktrail) # The regular expression which finds links. Results consist of three groups: # group(1) is the target page title, that is, everything before | or ]. # group(2) is the alternative link title, that's everything between | and ]. # group(3) is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. linkR=re.compile(r'\[\[([^\]\|]*)(?:\|([^\]]*))?\]\](' + linktrail + ')') try: include = False text=refpl.get(throttle=False) include = True except wikipedia.IsRedirectPage: wikipedia.output(u'%s is a redirect to %s' % (refpl.linkname(), disambPl.linkname())) if self.solve_redirect: choice = wikipedia.input(u'Do you want to make redirect %s point to %s? [y|N]' % (refpl.linkname(), target)) if choice2 == 'y': redir_text = '#REDIRECT [[%s]]' % target refpl.put(redir_text) else: choice = wikipedia.input(u'Do you want to work on pages linking to %s? [y|N|c(hange redirect)]' % refpl.linkname()) if choice == 'y': gen = ReferringPageGenerator(refpl) for refpl2 in gen.generate(): if self.primaryIgnoreManager.isIgnored(refpl2): # run until the user selected 'quit' if not self.treat(refpl2, disambPl): break elif choice == 'c': text="#%s [[%s]]"%(self.mysite.redirect(default=True), disambPl.linkname()) include = "redirect" if include in [True,"redirect"]: # make a backup of the original text so we can show the changes later original_text=text n = 0 curpos = 0 edited = False # This loop will run until we have finished the current page while True: m=linkR.search(text, pos = curpos) if not m: if n == 0: wikipedia.output(u"No changes necessary in %s" % refpl.linkname()) return True else: # stop loop and save page break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): continue else: linkpl=wikipedia.PageLink(disambPl.site(), m.group(1)) # Check whether the link found is to disambPl. if linkpl != disambPl: continue n += 1 # how many bytes should be displayed around the current link context = 30 # This loop will run while the user doesn't choose an option # that will actually change the page while True: print '\n' wikipedia.output(u">>> %s <<<" % refpl.linkname()) # at the beginning of the link, start red color. # at the end of the link, reset the color to default displayedText = text[max(0, m.start() - context):m.start()] + wikipedia.colorize(text[m.start():m.end()], '91') + text[m.end():m.end()+context] wikipedia.output(displayedText) if not self.always: if edited: choice=wikipedia.input(u"Option (#, r#, s=skip link, e=edit page, n=next page, u=unlink,\n" " q=quit, m=more context, l=list, a=add new, x=save in this form):") else: choice=wikipedia.input(u"Option (#, r#, s=skip link, e=edit page, n=next page, u=unlink,\n" " q=quit, m=more context, l=list, a=add new):") else: choice = self.always if choice=='a': newAlternative = wikipedia.input(u'New alternative:') self.alternatives.append(newAlternative) self.listAlternatives() elif choice=='e': import gui edit_window = gui.EditBoxWindow() newtxt = edit_window.edit(text) # if user didn't press Cancel if newtxt: text = newtxt break elif choice=='l': self.listAlternatives() elif choice=='m': # show more text around the link we're working on context*=2 else: break if choice == 'e': # user has edited the page and then pressed 'OK' edited = True curpos = 0 continue elif choice == 'n': # skip this page if self.primary: # If run with the -primary argument, skip this occurence next time. self.primaryIgnoreManager.ignore(refpl) return True elif choice=='q': # quit the program return False elif choice=='s': # Next link on this page n -= 1 continue elif choice=='x' and edited: # Save the page as is break # The link looks like this: # [[page_title|link_text]]trailing_chars page_title = m.group(1) link_text = m.group(2) if not link_text: # or like this: [[page_title]]trailing_chars link_text = page_title trailing_chars = m.group(3) if trailing_chars: link_text += trailing_chars if choice=='u': # unlink text = text[:m.start()] + link_text + text[m.end():] continue else: if len(choice)>0 and choice[0] == 'r': # we want to throw away the original link text replaceit = 1 choice = choice[1:] elif include == "redirect": replaceit = 1 else: replaceit = 0 try: choice=int(choice) except ValueError: print '\nUnknown option' # step back to ask the user again what to do with the current link curpos -= 1 continue if choice >= len(self.alternatives) or choice < 0: print '\nChoice out of range. Please select a number between 0 and %d.' % (len(self.alternatives) - 1) # show list of possible choices self.listAlternatives() # step back to ask the user again what to do with the current link curpos -= 1 continue new_page_title = self.alternatives[choice] reppl = wikipedia.PageLink(disambPl.site(), new_page_title) new_page_title = reppl.linkname() # There is a function that uncapitalizes the link target's first letter # if the link description starts with a small letter. This is useful on # nl: but annoying on de:. # At the moment the de: exclusion is only a workaround because I don't # know if other languages don't want this feature either. # We might want to introduce a list of languages that don't want to use # this feature. if self.mylang != 'de' and link_text[0] in 'abcdefghijklmnopqrstuvwxyz': new_page_title = new_page_title[0].lower() + new_page_title[1:] if replaceit and trailing_chars: newlink = "[[%s]]%s" % (new_page_title, trailing_chars) elif new_page_title == link_text or replaceit: newlink = "[[%s]]" % new_page_title # check if we can create a link with trailing characters instead of a pipelink elif len(new_page_title) <= len(link_text) and link_text[:len(new_page_title)] == new_page_title and re.sub(trailR, '', link_text[len(new_page_title):]) == '': newlink = "[[%s]]%s" % (new_page_title, link_text[len(new_page_title):]) else: newlink = "[[%s|%s]]" % (new_page_title, link_text) text = text[:m.start()] + newlink + text[m.end():] continue wikipedia.output(text[max(0,m.start()-30):m.end()+30]) print '\nThe following changes have been made:\n' wikipedia.showColorDiff(original_text, text) print '' # save the page refpl.put(text) return True
|
if not self.treat(refpl2, disambPl):
|
if not self.treat(refpl2, refpl):
|
def treat(self, refpl, disambPl): """ Parameters: disambPl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to disambPl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """ if self.mylang in link_trail: linktrail=link_trail[self.mylang] else: linktrail='[a-z]*' trailR=re.compile(linktrail) # The regular expression which finds links. Results consist of three groups: # group(1) is the target page title, that is, everything before | or ]. # group(2) is the alternative link title, that's everything between | and ]. # group(3) is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. linkR=re.compile(r'\[\[([^\]\|]*)(?:\|([^\]]*))?\]\](' + linktrail + ')') try: include = False text=refpl.get(throttle=False) include = True except wikipedia.IsRedirectPage: wikipedia.output(u'%s is a redirect to %s' % (refpl.linkname(), disambPl.linkname())) if self.solve_redirect: choice = wikipedia.input(u'Do you want to make redirect %s point to %s? [y|N]' % (refpl.linkname(), target)) if choice2 == 'y': redir_text = '#REDIRECT [[%s]]' % target refpl.put(redir_text) else: choice = wikipedia.input(u'Do you want to work on pages linking to %s? [y|N|c(hange redirect)]' % refpl.linkname()) if choice == 'y': gen = ReferringPageGenerator(refpl) for refpl2 in gen.generate(): if self.primaryIgnoreManager.isIgnored(refpl2): # run until the user selected 'quit' if not self.treat(refpl2, disambPl): break elif choice == 'c': text="#%s [[%s]]"%(self.mysite.redirect(default=True), disambPl.linkname()) include = "redirect" if include in [True,"redirect"]: # make a backup of the original text so we can show the changes later original_text=text n = 0 curpos = 0 edited = False # This loop will run until we have finished the current page while True: m=linkR.search(text, pos = curpos) if not m: if n == 0: wikipedia.output(u"No changes necessary in %s" % refpl.linkname()) return True else: # stop loop and save page break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): continue else: linkpl=wikipedia.PageLink(disambPl.site(), m.group(1)) # Check whether the link found is to disambPl. if linkpl != disambPl: continue n += 1 # how many bytes should be displayed around the current link context = 30 # This loop will run while the user doesn't choose an option # that will actually change the page while True: print '\n' wikipedia.output(u">>> %s <<<" % refpl.linkname()) # at the beginning of the link, start red color. # at the end of the link, reset the color to default displayedText = text[max(0, m.start() - context):m.start()] + wikipedia.colorize(text[m.start():m.end()], '91') + text[m.end():m.end()+context] wikipedia.output(displayedText) if not self.always: if edited: choice=wikipedia.input(u"Option (#, r#, s=skip link, e=edit page, n=next page, u=unlink,\n" " q=quit, m=more context, l=list, a=add new, x=save in this form):") else: choice=wikipedia.input(u"Option (#, r#, s=skip link, e=edit page, n=next page, u=unlink,\n" " q=quit, m=more context, l=list, a=add new):") else: choice = self.always if choice=='a': newAlternative = wikipedia.input(u'New alternative:') self.alternatives.append(newAlternative) self.listAlternatives() elif choice=='e': import gui edit_window = gui.EditBoxWindow() newtxt = edit_window.edit(text) # if user didn't press Cancel if newtxt: text = newtxt break elif choice=='l': self.listAlternatives() elif choice=='m': # show more text around the link we're working on context*=2 else: break if choice == 'e': # user has edited the page and then pressed 'OK' edited = True curpos = 0 continue elif choice == 'n': # skip this page if self.primary: # If run with the -primary argument, skip this occurence next time. self.primaryIgnoreManager.ignore(refpl) return True elif choice=='q': # quit the program return False elif choice=='s': # Next link on this page n -= 1 continue elif choice=='x' and edited: # Save the page as is break # The link looks like this: # [[page_title|link_text]]trailing_chars page_title = m.group(1) link_text = m.group(2) if not link_text: # or like this: [[page_title]]trailing_chars link_text = page_title trailing_chars = m.group(3) if trailing_chars: link_text += trailing_chars if choice=='u': # unlink text = text[:m.start()] + link_text + text[m.end():] continue else: if len(choice)>0 and choice[0] == 'r': # we want to throw away the original link text replaceit = 1 choice = choice[1:] elif include == "redirect": replaceit = 1 else: replaceit = 0 try: choice=int(choice) except ValueError: print '\nUnknown option' # step back to ask the user again what to do with the current link curpos -= 1 continue if choice >= len(self.alternatives) or choice < 0: print '\nChoice out of range. Please select a number between 0 and %d.' % (len(self.alternatives) - 1) # show list of possible choices self.listAlternatives() # step back to ask the user again what to do with the current link curpos -= 1 continue new_page_title = self.alternatives[choice] reppl = wikipedia.PageLink(disambPl.site(), new_page_title) new_page_title = reppl.linkname() # There is a function that uncapitalizes the link target's first letter # if the link description starts with a small letter. This is useful on # nl: but annoying on de:. # At the moment the de: exclusion is only a workaround because I don't # know if other languages don't want this feature either. # We might want to introduce a list of languages that don't want to use # this feature. if self.mylang != 'de' and link_text[0] in 'abcdefghijklmnopqrstuvwxyz': new_page_title = new_page_title[0].lower() + new_page_title[1:] if replaceit and trailing_chars: newlink = "[[%s]]%s" % (new_page_title, trailing_chars) elif new_page_title == link_text or replaceit: newlink = "[[%s]]" % new_page_title # check if we can create a link with trailing characters instead of a pipelink elif len(new_page_title) <= len(link_text) and link_text[:len(new_page_title)] == new_page_title and re.sub(trailR, '', link_text[len(new_page_title):]) == '': newlink = "[[%s]]%s" % (new_page_title, link_text[len(new_page_title):]) else: newlink = "[[%s|%s]]" % (new_page_title, link_text) text = text[:m.start()] + newlink + text[m.end():] continue wikipedia.output(text[max(0,m.start()-30):m.end()+30]) print '\nThe following changes have been made:\n' wikipedia.showColorDiff(original_text, text) print '' # save the page refpl.put(text) return True
|
wikipedia.output(u'Skiping: %s is auto entry %s(%s)' % (page,dictName,year))
|
wikipedia.output(u'Skiping: %s is auto entry %s(%s)' % (page.title(),dictName,year))
|
def generateMore(self, number): """Generate more subjects. This is called internally when the list of subjects becomes too small, but only if there is a PageGenerator""" fs = self.firstSubject() if fs: wikipedia.output(u"NOTE: The first unfinished subject is " + fs.pl().aslink(forceInterwiki = True)) print "NOTE: Number of pages queued is %d, trying to add %d more."%(len(self.subjects), number) for i in range(number): try: while True: page = self.pageGenerator.next() if page in globalvar.skip: wikipedia.output(u'Skiping: %s is in the skip list' % page) continue if globalvar.skipauto: dictName, year = date.getDictionaryYear(page.site().language(), page.title()) if dictName != None: wikipedia.output(u'Skiping: %s is auto entry %s(%s)' % (page,dictName,year)) continue break
|
'ro': (LINKS, u"Wikipedia:Articole de calitate"),
|
'ro': (LINKS, u"Wikipedia:Articole fructuoase"),
|
def LINKS(site,name, ignore=[]): p=wikipedia.Page(site, name) links=p.linkedPages() for n in links[:]: if n.titleWithoutNamespace() in ignore: links.remove(n) links.sort() return links
|
'he' : [u'disambiguationafter', u'פירושונים'],
|
'he' : [u'disambiguationAfter', u'פירושונים'],
|
def __init__(self):
|
'sr': [u'СТРАНИЦА']
|
'sr': [u'СТРАНИЦА'],
|
def category_namespaces(self, code): namespaces = [] namespace_title = self.namespace(code, 14) namespaces.append(namespace_title) if namespace_title != namespace_title.lower(): namespaces.append(namespace_title.lower()) default_namespace_title = self.namespace('_default', 14) if namespace_title != default_namespace_title: namespaces.append(default_namespace_title) if default_namespace_title != default_namespace_title.lower(): namespaces.append(default_namespace_title.lower()) return namespaces
|
'ru': [u'НАЗВАНИЕСТРАНИЦЫ2']
|
'ru': [u'НАЗВАНИЕСТРАНИЦЫ2'],
|
def category_namespaces(self, code): namespaces = [] namespace_title = self.namespace(code, 14) namespaces.append(namespace_title) if namespace_title != namespace_title.lower(): namespaces.append(namespace_title.lower()) default_namespace_title = self.namespace('_default', 14) if namespace_title != default_namespace_title: namespaces.append(default_namespace_title) if default_namespace_title != default_namespace_title.lower(): namespaces.append(default_namespace_title.lower()) return namespaces
|
def change_category(article, old_cat, new_cat):
|
def change_category(article, old_cat_title, new_cat_title):
|
def add_category(sort_by_last_name = False): print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input('Wikipedia page with list of pages to change: ') if listpage: try: pl = wikipedia.PageLink(wikipedia.mylang, listpage) except NoPage: print 'The page ' + listpage + ' could not be loaded from the server.' sys.exit() pagenames = pl.links() else: refpage = wikipedia.input('Wikipedia page that is now linked to: ') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input('Category to add (do not give namespace) : ') newcat = newcat[:1].capitalize() + newcat[1:] ns = wikipedia.family.category_namespaces(wikipedia.mylang) cat_namespace = ns[0].encode(wikipedia.code2encoding(wikipedia.mylang)) if not sort_by_last_name: catpl = wikipedia.PageLink(wikipedia.mylang, cat_namespace + ':' + newcat) print "Will add %s"%catpl.aslocallink() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input("%s [y/n/a(ll)] : "%(pl2.asasciilink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input("This should be used if and only if you are sure that your links are correct !!! Are you sure ? [y/n] : ") if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: print "%s doesn't exist yet. Ignoring."%(pl2.aslocallink()) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) print "WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink()) else: print "Current categories: ",cats if sort_by_last_name: page_name = pl2.linkname() split_string = page_name.split(' ') if len(split_string) > 1: # pull last part of the name to the beginning, and append the rest after a comma # e.g. "John von Neumann" becomes "Neumann, John von" new_name = split_string[-1] + ', ' + string.join(split_string[:-1], ' ') # give explicit sort key catpl = wikipedia.PageLink(wikipedia.mylang, cat_namespace + ':' + newcat + '|' + new_name) else: catpl = wikipedia.PageLink(wikipedia.mylang, cat_namespace + ':' + newcat) if catpl in cats: print "%s already has %s"%(pl2.aslocallink(),catpl.aslocallink()) else: cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text, comment = catpl.aslocallink().encode(wikipedia.code2encoding(wikipedia.mylang)))
|
cats.remove(old_cat) if new_cat != None:
|
sort_key = '' for cat in cats: ns = wikipedia.family.category_namespaces(wikipedia.mylang)[0].encode(wikipedia.code2encoding(wikipedia.mylang)) if cat.linkname() == ns + ':' + old_cat_title: cats.remove(cat) elif cat.linkname().startswith(ns + ':' + old_cat_title + '|'): sort_key = cat.linkname().split('|', 1)[1] cats.remove(cat) if new_cat_title != None: if sort_key == '': new_cat = catlib.CatLink(new_cat_title) else: new_cat = catlib.CatLink(new_cat_title + '|' + sort_key)
|
def change_category(article, old_cat, new_cat): cats = article.categories() cats.remove(old_cat) if new_cat != None: cats.append(new_cat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text)
|
old_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_title) new_title = wikipedia.input('Please enter the new name of the category: ') new_cat = catlib.CatLink(new_title)
|
old_cat_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_cat_title) new_cat_title = wikipedia.input('Please enter the new name of the category: ')
|
def rename_category(): old_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_title) new_title = wikipedia.input('Please enter the new name of the category: ') new_cat = catlib.CatLink(new_title) # get edit summary message wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, new_cat) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, new_cat)
|
wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_title)
|
wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_cat_title)
|
def rename_category(): old_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_title) new_title = wikipedia.input('Please enter the new name of the category: ') new_cat = catlib.CatLink(new_title) # get edit summary message wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, new_cat) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, new_cat)
|
print 'There are no articles in category ' + old_title
|
print 'There are no articles in category ' + old_cat_title
|
def rename_category(): old_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_title) new_title = wikipedia.input('Please enter the new name of the category: ') new_cat = catlib.CatLink(new_title) # get edit summary message wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, new_cat) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, new_cat)
|
change_category(article, old_cat, new_cat)
|
change_category(article, old_cat_title, new_cat_title)
|
def rename_category(): old_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_title) new_title = wikipedia.input('Please enter the new name of the category: ') new_cat = catlib.CatLink(new_title) # get edit summary message wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, new_cat) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, new_cat)
|
print 'There are no subcategories in category ' + old_title
|
print 'There are no subcategories in category ' + old_cat_title
|
def rename_category(): old_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_title) new_title = wikipedia.input('Please enter the new name of the category: ') new_cat = catlib.CatLink(new_title) # get edit summary message wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, new_cat) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, new_cat)
|
change_category(subcategory, old_cat, new_cat)
|
change_category(subcategory, old_cat_title, new_cat_title)
|
def rename_category(): old_title = wikipedia.input('Please enter the old name of the category: ') old_cat = catlib.CatLink(old_title) new_title = wikipedia.input('Please enter the new name of the category: ') new_cat = catlib.CatLink(new_title) # get edit summary message wikipedia.setAction(msg_change[wikipedia.chooselang(wikipedia.mylang,msg_change)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, new_cat) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, new_cat)
|
old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title)
|
old_cat_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_cat_title)
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title)
|
wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_cat_title)
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
print 'There are no articles in category ' + old_title
|
print 'There are no articles in category ' + old_cat_title
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
change_category(article, old_cat, None)
|
change_category(article, old_cat_title, None)
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
print 'There are no subcategories in category ' + old_title
|
print 'There are no subcategories in category ' + old_cat_title
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
change_category(subcategory, old_cat, None)
|
change_category(subcategory, old_cat_title, None)
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
* every category page must contain some text; otherwise an edit box will be displayed instead of an article list, and the bot won't work
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_remove[wikipedia.chooselang(wikipedia.mylang,msg_remove)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
|
x,l=decode_func(x)
|
try: x,l=decode_func(x) except UnicodeError: print code,name print repr(x) raise
|
def getPage(code, name, do_edit=1, do_quote=1): """Get the contents of page 'name' from the 'code' language wikipedia""" host = langs[code] if host[-4:]=='.com': # Old algorithm name = re.sub('_', ' ', name) n=[] for x in name.split(): n.append(x[0].capitalize()+x[1:]) name='_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name!=urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if host[-4:] == '.org': # New software address = '/w/wiki.phtml?title='+name if do_edit: address += '&action=edit' elif host[-4:]=='.com': # Old software if not do_edit: raise "can not skip edit on old-software wikipedia" address = '/wiki.cgi?action=edit&id='+name if debug: print host,address text,charset = getUrl(host,address) if do_edit: if debug: print "Raw:",len(text),type(text),text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code]==charset charsets[code]=charset if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code,space2underline(name)]=m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code,name]=m.group(1) else: edittime[code,name]=0 try: i1 = re.search('<textarea[^>]*>',text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>',text).start() if i2-i1 < 2: # new software raise NoPage() if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage() Rredirect=re.compile(r'\#redirect:? *\[\[(.*?)\]\]',re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) assert edittime[code,name]!=0 or host[-4:]=='.com', "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text) x=text[i1:i2] x=unescape(x) else: x=text # If not editing if charset=='utf-8': # Make it to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup('utf-8') x,l=decode_func(x) # Convert the unicode characters to &# references, and make it ascii. x=str(UnicodeToAsciiHtml(x)) return x
|
if code in ['meta','eo','ja','zh','hi']:
|
if code in ['meta','eo','ja','zh','hi','he','hu']:
|
def code2encoding(code): if code=='ru': return 'iso-8859-5' if code=='pl': return 'iso-8859-2' if code in ['meta','eo','ja','zh','hi']: return 'utf-8' return 'iso-8859-1'
|
result=str(name.encode('latin1'))
|
result=str(name.encode(code2encoding(code)))
|
def link2url(name,code='nl',incode='nl'): """Convert a interwiki link name of a page to the proper name to be used in a URL for that page. code should specify the language for the link, incode the language of the page the link is in.""" import urllib name=name[0].upper()+name[1:] name=name.strip() if '%' in name: name=url2unicode(name,encoding=code2encoding(code)) else: import urllib name=html2unicode(name,encoding=code2encoding(code), inencoding=code2encoding(incode)) try: #print "Trying to encode into latin1" result=str(name.encode('latin1')) #print "Result=",result except UnicodeError: result=str(name.encode('utf-8')) result=space2underline(result) return urllib.quote(result)
|
result=str(name.encode('utf-8'))
|
try: result=str(name.encode('latin1')) except UnicodeError: result=str(name.encode('utf-8'))
|
def link2url(name,code='nl',incode='nl'): """Convert a interwiki link name of a page to the proper name to be used in a URL for that page. code should specify the language for the link, incode the language of the page the link is in.""" import urllib name=name[0].upper()+name[1:] name=name.strip() if '%' in name: name=url2unicode(name,encoding=code2encoding(code)) else: import urllib name=html2unicode(name,encoding=code2encoding(code), inencoding=code2encoding(incode)) try: #print "Trying to encode into latin1" result=str(name.encode('latin1')) #print "Result=",result except UnicodeError: result=str(name.encode('utf-8')) result=space2underline(result) return urllib.quote(result)
|
def newpages(number=10, onlyonce=False site=None, throttle=True):
|
def newpages(number=10, onlyonce=False, site=None, throttle=True):
|
def newpages(number=10, onlyonce=False site=None, throttle=True): """Generator which yields new articles subsequently. It starts with the article created 'number' articles ago (first argument). When these are all yielded (as PageLinks) it fetches NewPages again. If there is no new page, it blocks until there is one, sleeping between subsequent fetches of NewPages. NOT FINISHED """ raise NotImplementedError, "this function is not finished yet, do not use" if site is None: site = getSite() while True: returned_html = getPage(site, site.newpagesname(number), do_quote=False, get_edit_page=False, throttle=throttle) start = "<ol start='1' class='special'>" end = "</ol>" startpos = returned_html.index(start) + len(start) endpos = startpos + returned_html[startpos:].index(end) relevant = returned_html[startpos:endpos] lines = [line.strip() for line in relevant.strip().split('\n')][::-1] for line in lines: # get date, pagelink, size, user, comment print line if onlyonce: break # get new batch: make sure they overlap and start with the newest # if they don't overlap, refetch with more articles # if the overlay = 100%: wait a while and try again return
|
if wikipedia.getSite('nn') in new or self.inpl.language() == 'nn':
|
if wikipedia.getSite('nn') in new or self.inpl.site().lang == 'nn':
|
def finish(self, sa = None): """Round up the subject, making any necessary changes. This method should be called exactly once after the todo list has gone empty.
|
insite.getSite()
|
insite = getSite()
|
def categoryFormat(links, insite = None): """Create a suitable string encoding all category links for a wikipedia page. 'links' should be a list of category pagelink objects. The string is formatted for inclusion in insite. """ if not links: return '' if insite is None: insite.getSite() s = [] for pl in links: s.append(pl.aslink()) if Site(default_code).category_on_one_line(): sep = ' ' else: sep = '\r\n' s.sort() s=sep.join(s) + '\r\n' return s
|
if Site(default_code).category_on_one_line():
|
if insite.category_on_one_line():
|
def categoryFormat(links, insite = None): """Create a suitable string encoding all category links for a wikipedia page. 'links' should be a list of category pagelink objects. The string is formatted for inclusion in insite. """ if not links: return '' if insite is None: insite.getSite() s = [] for pl in links: s.append(pl.aslink()) if Site(default_code).category_on_one_line(): sep = ' ' else: sep = '\r\n' s.sort() s=sep.join(s) + '\r\n' return s
|
wikipedia.output(u'TitleTranslate: %s was recognized as %s with value %d' % (pl.linkname(),dictName,year))
|
wikipedia.output(u'TitleTranslate: %s was recognized as %s with value %d' % (pl.title(),dictName,year))
|
def translate(pl, arr, same = False, hints = None, auto = True): site = pl.site() if same: return sametranslate(pl, arr, same) if hints: for h in hints: if h.find(':') == -1: # argument given as -hint:xy where xy is a language code codes = h newname = '' else: codes, newname = h.split(':', 1) if newname == '': # if given as -hint:xy or -hint:xy:, assume that there should # be a page in language xy with the same title as the page # we're currently working on newname = pl.title() if codes == 'all': codes = site.family.seriouslangs elif codes == '10' or codes == 'main': # names 'main' and 'more' kept for backward compatibility codes = site.family.biglangs elif codes == '20' or codes == 'more': codes = site.family.biglangs2 elif codes == '30': codes = site.family.biglangs3 elif codes == '50': codes = site.family.biglangs4 elif codes == 'cyril': codes = site.family.cyrilliclangs else: codes = codes.split(',') for newcode in codes: if newcode in site.languages(): if newcode != site.language(): x = wikipedia.Page(site.getSite(code=newcode), newname) if x not in arr: arr[x] = None else: wikipedia.output(u"Ignoring unknown language code %s"%newcode) # Autotranslate dates into all other languages, the rest will come from existing interwiki links. if auto: # search inside all dictionaries for this link dictName, year = date.getDictionaryYear( pl.site().language(), pl.title() ) if dictName: wikipedia.output(u'TitleTranslate: %s was recognized as %s with value %d' % (pl.linkname(),dictName,year)) for entryLang, entry in date.dateFormats[dictName].iteritems(): try: if entryLang != pl.site().language(): if dictName == 'yearsBC' and date.maxyearBC.has_key(pl.site().language()) and year > date.maxyearBC[pl.site().language()]: pass newname = entry(year) x = wikipedia.Page( wikipedia.getSite(code=newcode, fam=site.family), newname ) if x not in arr: arr[x] = None # add new page except: pass
|
'exceptions': [u'Zемфира', u'KoЯn', u'Deadушки', u'ENTERМУЗЫКА', u'Юz', u'Lюк', u'Яndex'],
|
def SetColor(color): try: import win32console stdout=win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) stdout.SetConsoleTextAttribute(color) except: if color == FOREGROUND_BLUE: print '(b:' if color == FOREGROUND_GREEN: print '(g:' if color == FOREGROUND_RED: print '(r:'
|
|
'localsuspects': u'АаВЕеКкМНОоРрСсТуХх', 'latinsuspects': u'AaBEeKkMHOoPpCcTyXx',
|
'localsuspects': u'АаВЕеКкМНОопРрСсТуХх', 'latinsuspects': u'AaBEeKkMHOonPpCcTyXx',
|
def SetColor(color): try: import win32console stdout=win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) stdout.SetConsoleTextAttribute(color) except: if color == FOREGROUND_BLUE: print '(b:' if color == FOREGROUND_GREEN: print '(g:' if color == FOREGROUND_RED: print '(r:'
|
'exceptions': [],
|
def SetColor(color): try: import win32console stdout=win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) stdout.SetConsoleTextAttribute(color) except: if color == FOREGROUND_BLUE: print '(b:' if color == FOREGROUND_GREEN: print '(g:' if color == FOREGROUND_RED: print '(r:'
|
|
'localsuspects': u'АаВЕеІіКкМНОоРрСсТУуХх', 'latinsuspects': u'AaBEeIiKkMHOoPpCcTYyXx',
|
'localsuspects': u'АаВЕеІіКкМНОопРрСсТУуХх', 'latinsuspects': u'AaBEeIiKkMHOonPpCcTYyXx',
|
def SetColor(color): try: import win32console stdout=win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) stdout.SetConsoleTextAttribute(color) except: if color == FOREGROUND_BLUE: print '(b:' if color == FOREGROUND_GREEN: print '(g:' if color == FOREGROUND_RED: print '(r:'
|
'localsuspects': u'АаВЕеКкМНОоРрСсТуХх', 'latinsuspects': u'AaBEeKkMHOoPpCcTyXx',
|
'localsuspects': u'АаВЕеКкМНОопРрСсТуХх', 'latinsuspects': u'AaBEeKkMHOonPpCcTyXx', }, 'be': { 'alphabet' : u'АаБбВвГ㥴ДдЖжЗзЕеЁёЖжЗзІіЙйКкЛлМмНнОоПпРрСсТтУуЎўФфХхЦцЧчШшЫыЬьЭэЮюЯя', 'localsuspects': u'АаВЕеІіКкМНОопРрСсТуХх', 'latinsuspects': u'AaBEeIiKkMHOonPpCcTyXx',
|
def SetColor(color): try: import win32console stdout=win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) stdout.SetConsoleTextAttribute(color) except: if color == FOREGROUND_BLUE: print '(b:' if color == FOREGROUND_GREEN: print '(g:' if color == FOREGROUND_RED: print '(r:'
|
namespaces = [0, 10, 12, 14]
|
namespaces = []
|
def SetColor(color): try: import win32console stdout=win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) stdout.SetConsoleTextAttribute(color) except: if color == FOREGROUND_BLUE: print '(b:' if color == FOREGROUND_GREEN: print '(g:' if color == FOREGROUND_RED: print '(r:'
|
if arg.startswith('-from:'): self.apfrom = arg[6:] elif arg.startswith('-from'): self.apfrom = wikipedia.input(u'Which page to start from: ')
|
if arg.startswith('-from'): if arg.startswith('-from:'): self.apfrom = arg[6:] else: self.apfrom = wikipedia.input(u'Which page to start from: ')
|
def __init__(self, args): for arg in args: arg = wikipedia.argHandler(arg, 'casechecker') if arg: if arg.startswith('-from:'): self.apfrom = arg[6:] elif arg.startswith('-from'): self.apfrom = wikipedia.input(u'Which page to start from: ') elif arg.startswith('-reqsize:'): self.aplimit = int(arg[9:]) elif arg == '-links': self.links = True elif arg == '-linksonly': self.links = True self.titles = False elif arg == '-replace': self.replace = True elif arg.startswith('-limit:'): self.stopAfter = int(arg[7:]) elif arg == '-verbose': self.verbose = True elif arg == '-autonomous': self.autonomous = True elif arg.startswith('-namespace:'): self.namespaces = [int(arg[11:])] elif arg.startswith('-wikilog:'): try: self.wikilog = codecs.open(arg[9:], 'a', 'utf-8') except IOError: self.wikilog = codecs.open(arg[9:], 'w', 'utf-8') else: wikipedia.output(u'Unknown argument %s' % arg) sys.exit()
|
elif arg.startswith('-namespace:'): self.namespaces = [int(arg[11:])]
|
elif arg.startswith('-ns:'): self.namespaces.append( int(arg[4:]) )
|
def __init__(self, args): for arg in args: arg = wikipedia.argHandler(arg, 'casechecker') if arg: if arg.startswith('-from:'): self.apfrom = arg[6:] elif arg.startswith('-from'): self.apfrom = wikipedia.input(u'Which page to start from: ') elif arg.startswith('-reqsize:'): self.aplimit = int(arg[9:]) elif arg == '-links': self.links = True elif arg == '-linksonly': self.links = True self.titles = False elif arg == '-replace': self.replace = True elif arg.startswith('-limit:'): self.stopAfter = int(arg[7:]) elif arg == '-verbose': self.verbose = True elif arg == '-autonomous': self.autonomous = True elif arg.startswith('-namespace:'): self.namespaces = [int(arg[11:])] elif arg.startswith('-wikilog:'): try: self.wikilog = codecs.open(arg[9:], 'a', 'utf-8') except IOError: self.wikilog = codecs.open(arg[9:], 'w', 'utf-8') else: wikipedia.output(u'Unknown argument %s' % arg) sys.exit()
|
self.params['what'] += '|links';
|
self.params['what'] += '|links|categories';
|
def __init__(self, args): for arg in args: arg = wikipedia.argHandler(arg, 'casechecker') if arg: if arg.startswith('-from:'): self.apfrom = arg[6:] elif arg.startswith('-from'): self.apfrom = wikipedia.input(u'Which page to start from: ') elif arg.startswith('-reqsize:'): self.aplimit = int(arg[9:]) elif arg == '-links': self.links = True elif arg == '-linksonly': self.links = True self.titles = False elif arg == '-replace': self.replace = True elif arg.startswith('-limit:'): self.stopAfter = int(arg[7:]) elif arg == '-verbose': self.verbose = True elif arg == '-autonomous': self.autonomous = True elif arg.startswith('-namespace:'): self.namespaces = [int(arg[11:])] elif arg.startswith('-wikilog:'): try: self.wikilog = codecs.open(arg[9:], 'a', 'utf-8') except IOError: self.wikilog = codecs.open(arg[9:], 'w', 'utf-8') else: wikipedia.output(u'Unknown argument %s' % arg) sys.exit()
|
self.knownWords = set(l['exceptions'])
|
def __init__(self, args): for arg in args: arg = wikipedia.argHandler(arg, 'casechecker') if arg: if arg.startswith('-from:'): self.apfrom = arg[6:] elif arg.startswith('-from'): self.apfrom = wikipedia.input(u'Which page to start from: ') elif arg.startswith('-reqsize:'): self.aplimit = int(arg[9:]) elif arg == '-links': self.links = True elif arg == '-linksonly': self.links = True self.titles = False elif arg == '-replace': self.replace = True elif arg.startswith('-limit:'): self.stopAfter = int(arg[7:]) elif arg == '-verbose': self.verbose = True elif arg == '-autonomous': self.autonomous = True elif arg.startswith('-namespace:'): self.namespaces = [int(arg[11:])] elif arg.startswith('-wikilog:'): try: self.wikilog = codecs.open(arg[9:], 'a', 'utf-8') except IOError: self.wikilog = codecs.open(arg[9:], 'w', 'utf-8') else: wikipedia.output(u'Unknown argument %s' % arg) sys.exit()
|
|
self.apfrom = self.apfrom
|
def Run(self): try: count = 0 for namespace in self.namespaces: self.params['apnamespace'] = namespace self.apfrom = self.apfrom title = None while True: # Get data self.params['apfrom'] = self.apfrom data = query.GetData(self.site.lang, self.params, self.verbose) try: self.apfrom = data['query']['allpages']['next'] except: self.apfrom = None # Process received data if 'pages' in data: for pageID, page in data['pages'].iteritems(): printed = False title = page['title'] if self.titles: err = self.ProcessTitle(title) if err: changed = False if self.replace and namespace != 14: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True if self.links: if 'links' in page: pageObj = None pageTxt = None msg = [] for l in page['links']: ltxt = l['*'] err = self.ProcessTitle(ltxt) if err: newTitle = None if self.replace: newTitle = self.PickTarget(True, ltxt, err[1]) if newTitle: if pageObj is None: pageObj = wikipedia.Page(self.site, title) pageTxt = pageObj.get() msg.append(u'[[%s]] => [[%s]]' % (ltxt, newTitle)) pageTxt = pageTxt.replace(ltxt, newTitle) pageTxt = pageTxt.replace(ltxt[0].lower() + ltxt[1:], newTitle[0].lower() + newTitle[1:]) pageTxt = pageTxt.replace(ltxt.replace(u' ', '_'), newTitle) if not newTitle: if not printed: self.WikiLog(u"* [[:%s]]: link to %s" % (title, err[0])) printed = True else: self.WikiLog(u"** link to %s" % err[0]) if pageObj is not None: if pageObj.get() == pageTxt: self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, u', '.join(msg))) else: wikipedia.output(u'Case Replacements: %s' % u', '.join(msg)) try: pageObj.put(pageTxt, u'Case Replacements: %s' % u', '.join(msg)) except: self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, u', '.join(msg))) count += 1 if self.stopAfter > 0 and count == self.stopAfter: raise "Stopping because we are done" if self.apfrom is None: break
|
|
changed = False if self.replace and namespace != 14: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True
|
if page['ns'] == 14: self.WikiLog(u"* Move category content: " + err[0]) else: changed = False if self.replace: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True
|
def Run(self): try: count = 0 for namespace in self.namespaces: self.params['apnamespace'] = namespace self.apfrom = self.apfrom title = None while True: # Get data self.params['apfrom'] = self.apfrom data = query.GetData(self.site.lang, self.params, self.verbose) try: self.apfrom = data['query']['allpages']['next'] except: self.apfrom = None # Process received data if 'pages' in data: for pageID, page in data['pages'].iteritems(): printed = False title = page['title'] if self.titles: err = self.ProcessTitle(title) if err: changed = False if self.replace and namespace != 14: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True if self.links: if 'links' in page: pageObj = None pageTxt = None msg = [] for l in page['links']: ltxt = l['*'] err = self.ProcessTitle(ltxt) if err: newTitle = None if self.replace: newTitle = self.PickTarget(True, ltxt, err[1]) if newTitle: if pageObj is None: pageObj = wikipedia.Page(self.site, title) pageTxt = pageObj.get() msg.append(u'[[%s]] => [[%s]]' % (ltxt, newTitle)) pageTxt = pageTxt.replace(ltxt, newTitle) pageTxt = pageTxt.replace(ltxt[0].lower() + ltxt[1:], newTitle[0].lower() + newTitle[1:]) pageTxt = pageTxt.replace(ltxt.replace(u' ', '_'), newTitle) if not newTitle: if not printed: self.WikiLog(u"* [[:%s]]: link to %s" % (title, err[0])) printed = True else: self.WikiLog(u"** link to %s" % err[0]) if pageObj is not None: if pageObj.get() == pageTxt: self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, u', '.join(msg))) else: wikipedia.output(u'Case Replacements: %s' % u', '.join(msg)) try: pageObj.put(pageTxt, u'Case Replacements: %s' % u', '.join(msg)) except: self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, u', '.join(msg))) count += 1 if self.stopAfter > 0 and count == self.stopAfter: raise "Stopping because we are done" if self.apfrom is None: break
|
for l in page['links']:
|
for l in allLinks:
|
def Run(self): try: count = 0 for namespace in self.namespaces: self.params['apnamespace'] = namespace self.apfrom = self.apfrom title = None while True: # Get data self.params['apfrom'] = self.apfrom data = query.GetData(self.site.lang, self.params, self.verbose) try: self.apfrom = data['query']['allpages']['next'] except: self.apfrom = None # Process received data if 'pages' in data: for pageID, page in data['pages'].iteritems(): printed = False title = page['title'] if self.titles: err = self.ProcessTitle(title) if err: changed = False if self.replace and namespace != 14: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True if self.links: if 'links' in page: pageObj = None pageTxt = None msg = [] for l in page['links']: ltxt = l['*'] err = self.ProcessTitle(ltxt) if err: newTitle = None if self.replace: newTitle = self.PickTarget(True, ltxt, err[1]) if newTitle: if pageObj is None: pageObj = wikipedia.Page(self.site, title) pageTxt = pageObj.get() msg.append(u'[[%s]] => [[%s]]' % (ltxt, newTitle)) pageTxt = pageTxt.replace(ltxt, newTitle) pageTxt = pageTxt.replace(ltxt[0].lower() + ltxt[1:], newTitle[0].lower() + newTitle[1:]) pageTxt = pageTxt.replace(ltxt.replace(u' ', '_'), newTitle) if not newTitle: if not printed: self.WikiLog(u"* [[:%s]]: link to %s" % (title, err[0])) printed = True else: self.WikiLog(u"** link to %s" % err[0]) if pageObj is not None: if pageObj.get() == pageTxt: self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, u', '.join(msg))) else: wikipedia.output(u'Case Replacements: %s' % u', '.join(msg)) try: pageObj.put(pageTxt, u'Case Replacements: %s' % u', '.join(msg)) except: self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, u', '.join(msg))) count += 1 if self.stopAfter > 0 and count == self.stopAfter: raise "Stopping because we are done" if self.apfrom is None: break
|
pageTxt = pageTxt.replace(ltxt, newTitle) pageTxt = pageTxt.replace(ltxt[0].lower() + ltxt[1:], newTitle[0].lower() + newTitle[1:]) pageTxt = pageTxt.replace(ltxt.replace(u' ', '_'), newTitle)
|
frmParts = self.wordBreaker.split(ltxt) toParts = self.wordBreaker.split(newTitle) if len(frmParts) != len(toParts): raise u'Splitting parts do not match counts' for i in range(0, len(frmParts)): if len(frmParts[i]) != len(toParts[i]): raise u'Splitting parts do not match word length' if len(frmParts[i]) > 0: pageTxt = pageTxt.replace(frmParts[i], toParts[i]) pageTxt = pageTxt.replace(frmParts[i][0].lower() + frmParts[i][1:], toParts[i][0].lower() + toParts[i][1:])
|
def Run(self): try: count = 0 for namespace in self.namespaces: self.params['apnamespace'] = namespace self.apfrom = self.apfrom title = None while True: # Get data self.params['apfrom'] = self.apfrom data = query.GetData(self.site.lang, self.params, self.verbose) try: self.apfrom = data['query']['allpages']['next'] except: self.apfrom = None # Process received data if 'pages' in data: for pageID, page in data['pages'].iteritems(): printed = False title = page['title'] if self.titles: err = self.ProcessTitle(title) if err: changed = False if self.replace and namespace != 14: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True if self.links: if 'links' in page: pageObj = None pageTxt = None msg = [] for l in page['links']: ltxt = l['*'] err = self.ProcessTitle(ltxt) if err: newTitle = None if self.replace: newTitle = self.PickTarget(True, ltxt, err[1]) if newTitle: if pageObj is None: pageObj = wikipedia.Page(self.site, title) pageTxt = pageObj.get() msg.append(u'[[%s]] => [[%s]]' % (ltxt, newTitle)) pageTxt = pageTxt.replace(ltxt, newTitle) pageTxt = pageTxt.replace(ltxt[0].lower() + ltxt[1:], newTitle[0].lower() + newTitle[1:]) pageTxt = pageTxt.replace(ltxt.replace(u' ', '_'), newTitle) if not newTitle: if not printed: self.WikiLog(u"* [[:%s]]: link to %s" % (title, err[0])) printed = True else: self.WikiLog(u"** link to %s" % err[0]) if pageObj is not None: if pageObj.get() == pageTxt: self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, u', '.join(msg))) else: wikipedia.output(u'Case Replacements: %s' % u', '.join(msg)) try: pageObj.put(pageTxt, u'Case Replacements: %s' % u', '.join(msg)) except: self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, u', '.join(msg))) count += 1 if self.stopAfter > 0 and count == self.stopAfter: raise "Stopping because we are done" if self.apfrom is None: break
|
self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, u', '.join(msg)))
|
self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, coloredMsg))
|
def Run(self): try: count = 0 for namespace in self.namespaces: self.params['apnamespace'] = namespace self.apfrom = self.apfrom title = None while True: # Get data self.params['apfrom'] = self.apfrom data = query.GetData(self.site.lang, self.params, self.verbose) try: self.apfrom = data['query']['allpages']['next'] except: self.apfrom = None # Process received data if 'pages' in data: for pageID, page in data['pages'].iteritems(): printed = False title = page['title'] if self.titles: err = self.ProcessTitle(title) if err: changed = False if self.replace and namespace != 14: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True if self.links: if 'links' in page: pageObj = None pageTxt = None msg = [] for l in page['links']: ltxt = l['*'] err = self.ProcessTitle(ltxt) if err: newTitle = None if self.replace: newTitle = self.PickTarget(True, ltxt, err[1]) if newTitle: if pageObj is None: pageObj = wikipedia.Page(self.site, title) pageTxt = pageObj.get() msg.append(u'[[%s]] => [[%s]]' % (ltxt, newTitle)) pageTxt = pageTxt.replace(ltxt, newTitle) pageTxt = pageTxt.replace(ltxt[0].lower() + ltxt[1:], newTitle[0].lower() + newTitle[1:]) pageTxt = pageTxt.replace(ltxt.replace(u' ', '_'), newTitle) if not newTitle: if not printed: self.WikiLog(u"* [[:%s]]: link to %s" % (title, err[0])) printed = True else: self.WikiLog(u"** link to %s" % err[0]) if pageObj is not None: if pageObj.get() == pageTxt: self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, u', '.join(msg))) else: wikipedia.output(u'Case Replacements: %s' % u', '.join(msg)) try: pageObj.put(pageTxt, u'Case Replacements: %s' % u', '.join(msg)) except: self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, u', '.join(msg))) count += 1 if self.stopAfter > 0 and count == self.stopAfter: raise "Stopping because we are done" if self.apfrom is None: break
|
self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, u', '.join(msg)))
|
self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, coloredMsg))
|
def Run(self): try: count = 0 for namespace in self.namespaces: self.params['apnamespace'] = namespace self.apfrom = self.apfrom title = None while True: # Get data self.params['apfrom'] = self.apfrom data = query.GetData(self.site.lang, self.params, self.verbose) try: self.apfrom = data['query']['allpages']['next'] except: self.apfrom = None # Process received data if 'pages' in data: for pageID, page in data['pages'].iteritems(): printed = False title = page['title'] if self.titles: err = self.ProcessTitle(title) if err: changed = False if self.replace and namespace != 14: newTitle = self.PickTarget(False, title, err[1]) if newTitle: src = wikipedia.Page(self.site, title) src.move( newTitle, u'mixed case rename') changed = True if not changed: self.WikiLog(u"* " + err[0]) printed = True if self.links: if 'links' in page: pageObj = None pageTxt = None msg = [] for l in page['links']: ltxt = l['*'] err = self.ProcessTitle(ltxt) if err: newTitle = None if self.replace: newTitle = self.PickTarget(True, ltxt, err[1]) if newTitle: if pageObj is None: pageObj = wikipedia.Page(self.site, title) pageTxt = pageObj.get() msg.append(u'[[%s]] => [[%s]]' % (ltxt, newTitle)) pageTxt = pageTxt.replace(ltxt, newTitle) pageTxt = pageTxt.replace(ltxt[0].lower() + ltxt[1:], newTitle[0].lower() + newTitle[1:]) pageTxt = pageTxt.replace(ltxt.replace(u' ', '_'), newTitle) if not newTitle: if not printed: self.WikiLog(u"* [[:%s]]: link to %s" % (title, err[0])) printed = True else: self.WikiLog(u"** link to %s" % err[0]) if pageObj is not None: if pageObj.get() == pageTxt: self.WikiLog(u"* Error: Text replacement failed in [[:%s]] (%s)" % (title, u', '.join(msg))) else: wikipedia.output(u'Case Replacements: %s' % u', '.join(msg)) try: pageObj.put(pageTxt, u'Case Replacements: %s' % u', '.join(msg)) except: self.WikiLog(u"* Error: Could not save updated page [[:%s]] (%s)" % (title, u', '.join(msg))) count += 1 if self.stopAfter > 0 and count == self.stopAfter: raise "Stopping because we are done" if self.apfrom is None: break
|
reptxt = new_page_title
|
newlink = "[[%s]]" % new_page_title
|
def treat(refpl, thispl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: pass else: n = 0 curpos = 0 while 1: m=linkR.search(reftxt, pos = curpos) if not m: if n == 0: print "Not found in %s"%refpl elif not debug: refpl.put(reftxt) return True # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): linkpl = None else: linkpl=wikipedia.PageLink(thispl.code(), m.group(1), incode = refpl.code()) # Check whether the link found is to thispl. if linkpl != thispl: continue n += 1 context = 30 while 1: print '\n' print "== %s =="%(refpl) print wikipedia.UnicodeToAsciiHtml(reftxt[max(0,m.start()-context):m.end()+context]) if always == None: choice=raw_input("Option (#,r#,s=skip link,n=next page,u=unlink,q=quit,\n" " m=more context,l=list,a=add new):") else: choice=always if choice=='n': return True elif choice=='s': choice=-1 break elif choice=='u': choice=-2 break elif choice=='a': ns=raw_input('New alternative:') alternatives.append(ns) elif choice=='q': return False elif choice=='m': context*=2 elif choice=='l': print '\n' for i in range(len(alternatives)): print "%3d" % i,repr(alternatives[i]) else: if choice[0] == 'r': replaceit = 1 choice = choice[1:] else: replaceit = 0 try: choice=int(choice) except ValueError: pass else: break if choice==-1: # Next link on this page continue page_title = m.group(1) link_text = m.group(2) if not link_text: link_text = page_title trailing_chars = m.group(3) if trailing_chars: link_text += trailing_chars if choice==-2: # unlink reftxt = reftxt[:m.start()] + link_text + reftxt[m.end():] else: # Normal replacement new_page_title = alternatives[choice] reppl = wikipedia.PageLink(thispl.code(), new_page_title, incode = refpl.code()) new_page_title = reppl.linkname() # There is a function that uncapitalizes the link target's first letter # if the link description starts with a small letter. This is useful on # nl: but annoying on de:. # At the moment the de: exclusion is only a workaround because I don't # know if other languages don't want this feature either. # We might want to introduce a list of languages that don't want to use # this feature. if wikipedia.mylang != 'de' and link_text[0] in 'abcdefghijklmnopqrstuvwxyz': new_page_title = new_page_title[0].lower() + new_page_title[1:] if replaceit or new_page_title == link_text: reptxt = new_page_title # check if we can create a link with trailing characters instead of a pipelink elif len(new_page_title) <= len(link_text) and link_text[:len(new_page_title)] == new_page_title and re.sub(trailR, '', link_text[len(new_page_title):]) == '': newlink = "[[%s]]%s" % (new_page_title, link_text[len(new_page_title):]) else: newlink = "[[%s|%s]]" % (new_page_title, link_text) reftxt = reftxt[:m.start()] + newlink + reftxt[m.end():] print wikipedia.UnicodeToAsciiHtml(reftxt[max(0,m.start()-30):m.end()+30]) if not debug: refpl.put(reftxt) return True
|
substring. which matches the exception. Otherwise it returns None.
|
substring which matches the exception. Otherwise it returns None.
|
def checkExceptions(self, original_text): """ If one of the exceptions applies for the given text, returns the substring. which matches the exception. Otherwise it returns None. """ for exception in self.exceptions: if self.regex: exception = re.compile(exception, re.UNICODE) hit = exception.search(original_text) if hit: return hit.group(0) else: hit = original_text.find(exception) if hit != -1: return original_text[hit:hit + len(exception)] return None
|
def allowedbot(site):
|
def allowedbot(username, site):
|
def allowedbot(site): """Checks whether the bot is listed on Wikipedia:bots""" pl = wikipedia.PageLink(site, "Wikipedia:Bots") text = pl.get() return "[[User:%s" % username in text
|
return "[[User:%s" % username in text
|
return "[[user:%s" % username in text.lower()
|
def allowedbot(site): """Checks whether the bot is listed on Wikipedia:bots""" pl = wikipedia.PageLink(site, "Wikipedia:Bots") text = pl.get() return "[[User:%s" % username in text
|
if not allowedbot(ensite):
|
if not allowedbot(username, ensite):
|
def main(args): username = password = None for arg in args:#sys.argv[1:]: arg = wikipedia.argHandler(arg) if arg is None: continue if arg.startswith("-user:"): username = arg[6:] elif arg.startswith("-pass:"): # not recommended password = arg[6:] else: sys.exit("Unknown argument: %s" % arg) mysite = wikipedia.getSite() wikipedia.output(u"Logging in to %s" % repr(mysite)) user = username if username is None: username = config.username # wikipedia.input(u'username:', encode = True) if not password: # As we don't want the password to appear on the screen, we use getpass(). password = getpass.getpass('password: ') # Convert the password from the encoding your shell uses to the one your wiki # uses, via Unicode. This is the same as wikipedia.input() does with the # username, but input() uses raw_input() instead of getpass(). password = unicode(password, config.console_encoding) password = password.encode(wikipedia.myencoding()) # Ensure bot policy on the English Wikipedia ensite=wikipedia.getSite(code='en',fam='wikipedia') if mysite == ensite: if not allowedbot(ensite): print "Your username is not listed on [[Wikipedia:Bots]]" print "Please make sure you are allowed to use the robot" print "Before actually using it!" cookiedata = login(mysite, username, password) print cookiedata if cookiedata: storecookiedata(cookiedata, mysite, user) print "Should be logged in now" else: print "Login failed. Wrong password?"
|
if namespace == -1 or namespace != entry.namespace :
|
if namespace != -1 and namespace != entry.namespace:
|
def read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex, namespace): ''' Generator which will yield PageLinks to pages that might contain text to replace. These pages will be retrieved from a local sql dump file (cur table). Arguments: * sqlfilename - the dump's path, either absolute or relative * replacements - a dictionary where old texts are keys and new texts are values * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions ''' import sqldump dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) for entry in dump.entries(): skip_page = False if namespace == -1 or namespace != entry.namespace : continue else: for exception in exceptions: if regex: exception = re.compile(exception) if exception.search(entry.text): skip_page = True break else: if entry.text.find(exception) != -1: skip_page = True break if not skip_page: for old in replacements.keys(): if regex: old = re.compile(old) if old.search(entry.text): yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break else: if entry.text.find(old) != -1: yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break
|
lineR = re.compile('<li> \(.+?\) \(.+?\) <a href=".+?" title=".+?">(?P<datetime>.+?)</a> . . <a href=".+?" title=".+?">(?P<username>.+?)</a> \(<a href=".+?" title=".+?">.+?</a>\) . . (?P<resolution>\d+.+?\d+) \((?P<size>\d+) .+?\)( <span class=\'comment\'>(?P<comment>.*?)</span>)?</li>')
|
lineR = re.compile('<li> \(.+?\) \(.+?\) <a href=".+?" title=".+?">(?P<datetime>.+?)</a> . . <a href=".+?" title=".+?">(?P<username>.+?)</a> \(<a href=".+?" title=".+?">.+?</a>\) . . (?P<resolution>\d+.+?\d+) \((?P<size>[\d,]+) .+?\)( <span class="comment">(?P<comment>.*?)</span>)?</li>')
|
def getFileVersionHistory(self): result = [] history = re.search('(?s)<ul class="special">.+?</ul>', self.getImagePageContents()).group() lineR = re.compile('<li> \(.+?\) \(.+?\) <a href=".+?" title=".+?">(?P<datetime>.+?)</a> . . <a href=".+?" title=".+?">(?P<username>.+?)</a> \(<a href=".+?" title=".+?">.+?</a>\) . . (?P<resolution>\d+.+?\d+) \((?P<size>\d+) .+?\)( <span class=\'comment\'>(?P<comment>.*?)</span>)?</li>') for match in lineR.finditer(history): datetime = match.group('datetime') username = match.group('username') resolution = match.group('resolution') size = match.group('size') comment = match.group('comment') or '' result.append((datetime, username, resolution, size, comment)) return result
|
pl3 = wikipedia.Page(pl.site(),arg.args[0]) wikipedia.output(u"NOTE: %s is redirect to %s" % (pl.aslink(forceInterwiki = True), pl3.aslink(forceInterwiki = True))) if pl == self.inpl: isredirect = 1 for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} pass elif not globalvar.followredirect: print "NOTE: not following redirects." else: if self.conditionalAdd(pl3, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new redirect %s" % (self.inpl.aslink(), pl.aslink(forceInterwiki = True), pl3.aslink(forceInterwiki = True)))
|
try: pl3 = wikipedia.Page(pl.site(),arg.args[0]) wikipedia.output(u"NOTE: %s is redirect to %s" % (pl.aslink(forceInterwiki = True), pl3.aslink(forceInterwiki = True))) if pl == self.inpl: isredirect = 1 for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} pass elif not globalvar.followredirect: print "NOTE: not following redirects." else: if self.conditionalAdd(pl3, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new redirect %s" % (self.inpl.aslink(), pl.aslink(forceInterwiki = True), pl3.aslink(forceInterwiki = True))) except UnicodeDecodeError: wikipedia.output(u"BUG>>> processing %s: could not decode redirect to %s:%s" % (pl.aslink(forceInterwiki=True),pl.site(),arg.args[0]))
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site()
|
first = first.lower()
|
first = first.lower().strip()
|
def isInterwikiLink(self, s): """ Try to check whether s is in the form "foo:bar" or ":foo:bar" where foo is a known language code or family. In such a case we are dealing with an interwiki link. """ s = s.lstrip(":") if not ':' in s: return False first, rest = s.split(':',1) # interwiki codes are case-insensitive first = first.lower() if first in self.validLanguageLinks() or ( first in self.family.known_families and self.family.known_families[first] != self.family.name): return True return False
|
return False
|
return self.isInterwikiLink(rest)
|
def isInterwikiLink(self, s): """ Try to check whether s is in the form "foo:bar" or ":foo:bar" where foo is a known language code or family. In such a case we are dealing with an interwiki link. """ s = s.lstrip(":") if not ':' in s: return False first, rest = s.split(':',1) # interwiki codes are case-insensitive first = first.lower() if first in self.validLanguageLinks() or ( first in self.family.known_families and self.family.known_families[first] != self.family.name): return True return False
|
data = {"wpName": self.username,
|
data = {"wpName": self.username.encode(self.site.encoding()),
|
def getCookie(self, remember=True): """Login to wikipedia. remember Remember login (default: True) Returns cookie data if succesful, None otherwise.""" data = {"wpName": self.username, "wpPassword": self.password, "wpLoginattempt": "Aanmelden & Inschrijven", # dutch button label seems to work for all wikis "wpRemember": str(int(bool(remember)))} data = wikipedia.urlencode(data.items()) headers = { "Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0" } pagename = self.site.login_address() if self.site.hostname() in config.authenticate.keys(): response = urllib2.urlopen(urllib2.Request('http://'+self.site.hostname()+pagename, data)) data = response.read() wikipedia.cj.save(wikipedia.COOKIEFILE) return "Ok" else: conn = httplib.HTTPConnection(self.site.hostname()) conn.request("POST", pagename, data, headers) response = conn.getresponse() conn.close()
|
fam Wikimedia family (optional: defaults to configured)
|
fam Wikimedia family (optional: defaults to configured). Can either be a string or a Family object.
|
def __init__(self, code, fam=None, user=None): """Constructor takes three arguments:
|
family = Family(fam) if code in family.obsolete and family.obsolete[code]: code = family.obsolete[code]
|
def getSite(code = None, fam = None, user=None): if code == None: code = default_code if fam == None: fam = default_family # if we got an outdated code, use the new one instead. family = Family(fam) if code in family.obsolete and family.obsolete[code]: code = family.obsolete[code] key = '%s:%s'%(fam,code) if not _sites.has_key(key): _sites[key] = Site(code=code, fam=fam, user=user) return _sites[key]
|
|
self.problem(u"%s: %s gives duplicate interwiki on same site %s" % (self.originPage.aslink(), page.aslink(True), linkedPage.aslink(True))) if globalvar.autonomous: self.todo = [] return
|
wikipedia.output(u"NOTE: %s: %s gives duplicate interwiki on same site %s" % (self.originPage.aslink(), page.aslink(True), linkedPage.aslink(True)))
|
def workDone(self, counter): """ This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo. """ # Loop over all the pages that should have been taken care of for page in self.pending: # Mark the page as done self.done.append(page)
|
print "The specified page is not a redirect." sys.exit(1)
|
print "The specified page is not a redirect. Skipping." continue
|
def run(self): if self.main_only: if ignore_title.has_key(self.mylang): ignore_title[self.mylang] += self.mysite.namespaces() else: ignore_title[self.mylang] = self.mysite.namespaces() for disambTitle in self.page_list: # when run with -redir argument, there's another summary message if self.solve_redirect: wikipedia.setAction(wikipedia.translate(self.mysite,msg_redir) % disambTitle) else: wikipedia.setAction(wikipedia.translate(self.mysite,msg) % disambTitle) disambPl = wikipedia.PageLink(self.mysite, disambTitle) self.primaryIgnoreManager = PrimaryIgnoreManager(disambPl, enabled = self.primary) if self.solve_redirect: try: target = disambPl.getRedirectTo() target = unicode(target, wikipedia.myencoding()) self.alternatives.append(target) except wikipedia.NoPage: print "The specified page was not found." user_input = wikipedia.input(u"Please enter the name of the page where the redirect should have pointed at, or press enter to quit:") if user_input == "": sys.exit(1) else: self.alternatives.append(user_input) except wikipedia.IsNotRedirectPage: print "The specified page is not a redirect." sys.exit(1) elif self.getAlternatives: try: if self.primary: disamb_pl = wikipedia.PageLink(self.mysite, primary_topic_format[self.mylang] % disambTitle) thistxt = disamb_pl.get(throttle=False) else: thistxt = disambPl.get(throttle=False) except wikipedia.IsRedirectPage,arg: thistxt = wikipedia.PageLink(self.mysite, str(arg)).get(throttle=False) except wikipedia.NoPage: print "Page does not exist?!" thistxt = "" thistxt = wikipedia.removeLanguageLinks(thistxt) thistxt = wikipedia.removeCategoryLinks(thistxt, self.mysite) # regular expression matching a wikilink w = r'([^\]\|]*)' Rlink = re.compile(r'\[\['+w+r'(\|'+w+r')?\]\]') for matchObj in Rlink.findall(thistxt): self.alternatives.append(matchObj[0]) self.makeAlternativesUnique() # sort possible choices self.alternatives.sort() self.listAlternatives() gen = ReferringPageGenerator(disambPl, self.primary) for refpl in gen.generate(): if not self.primaryIgnoreManager.isIgnored(refpl): # run until the user selected 'quit' if not self.treat(refpl, disambPl): break # clear alternatives before working on next disambiguation page self.alternatives = []
|
replacements.append((templateR, '{{subst:' + self.old + '}}'))
|
replacements.append((templateR, '{{subst:' + self.old + '\g<parameters>}}'))
|
def run(self): # regular expression to find the original template. # {{msg:vfd}} does the same thing as {{msg:Vfd}}, so both will be found. # The new syntax, {{vfd}}, will also be found. # The group 'parameters' will either match the parameters, or an # empty string if there are none. if not wikipedia.getSite().nocapitalize: old = '[' + self.old[0].upper() + self.old[0].lower() + ']' + self.old[1:] else: old = self.old old = re.sub('[_ ]', '[_ ]', old) templateR=re.compile(r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}') replacements = [] if self.remove: replacements.append((templateR, '')) elif self.resolve: replacements.append((templateR, '{{subst:' + self.old + '}}')) else: replacements.append((templateR, '{{' + self.new + '\g<parameters>}}')) replaceBot = replace.ReplaceRobot(self.generator, replacements, regex = True) replaceBot.run()
|
wikipedia.setAction(wikipedia.translate(wikipedia.getSite(),msg_change) % oldCatTitle)
|
wikipedia.setAction(wikipedia.translate(wikipedia.getSite(),msg_change) % oldCat.title())
|
def __init__(self, oldCatTitle, newCatTitle): self.oldCat = catlib.Category(wikipedia.getSite(), 'Category:' + oldCatTitle) self.newCatTitle = newCatTitle # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(),msg_change) % oldCatTitle)
|
def read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex):
|
def read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex, namespace):
|
def read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): ''' Generator which will yield PageLinks to pages that might contain text to replace. These pages will be retrieved from a local sql dump file (cur table). Arguments: * sqlfilename - the dump's path, either absolute or relative * replacements - a dictionary where old texts are keys and new texts are values * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions ''' import sqldump dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) for entry in dump.entries(): skip_page = False for exception in exceptions: if regex: exception = re.compile(exception) if exception.search(entry.text): skip_page = True break else: if entry.text.find(exception) != -1: skip_page = True break if not skip_page: for old in replacements.keys(): if regex: old = re.compile(old) if old.search(entry.text): yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break else: if entry.text.find(old) != -1: yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break
|
for exception in exceptions: if regex: exception = re.compile(exception) if exception.search(entry.text): skip_page = True break else: if entry.text.find(exception) != -1: skip_page = True break
|
if namespace == -1 or namespace != entry.namespace : continue else: for exception in exceptions: if regex: exception = re.compile(exception) if exception.search(entry.text): skip_page = True break else: if entry.text.find(exception) != -1: skip_page = True break
|
def read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): ''' Generator which will yield PageLinks to pages that might contain text to replace. These pages will be retrieved from a local sql dump file (cur table). Arguments: * sqlfilename - the dump's path, either absolute or relative * replacements - a dictionary where old texts are keys and new texts are values * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions ''' import sqldump dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) for entry in dump.entries(): skip_page = False for exception in exceptions: if regex: exception = re.compile(exception) if exception.search(entry.text): skip_page = True break else: if entry.text.find(exception) != -1: skip_page = True break if not skip_page: for old in replacements.keys(): if regex: old = re.compile(old) if old.search(entry.text): yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break else: if entry.text.find(old) != -1: yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None):
|
def generator(source, replacements, exceptions, regex, namespace, textfilename = None, sqlfilename = None, pagenames = None):
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex):
|
for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex, namespace):
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
namespace = -1
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
|
wikipedia.setAction(wikipedia.translate(wikipedia.mylang, msg)+change)
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
|
for pl in generator(source, replacements, exceptions, regex, textfilename, sqlfilename, pagenames):
|
for pl in generator(source, replacements, exceptions, regex, namespace, textfilename, sqlfilename, pagenames):
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
print 'No changes were necessary in %s' % pl.linkname()
|
try: print 'No changes were necessary in %s' % pl.linkname() except UnicodeEncodeError: print 'Error decoding pl.linkname()' continue
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
def resolveRedirect(self):
|
def resolveRedirect(self, useHEAD = True):
|
def resolveRedirect(self): ''' Requests the header from the server. If the page is an HTTP redirect, returns the redirect target URL as a string. Otherwise returns None. ''' if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse() if response.status >= 300 and response.status <= 399: #print response.getheaders() redirTarget = response.getheader('Location') #print "redirTarget:", redirTarget if redirTarget: if redirTarget.startswith('http://') or redirTarget.startswith('https://'): self.changeUrl(redirTarget) return True elif redirTarget.startswith('/'): self.changeUrl('%s://%s%s' % (self.protocol, self.host, redirTarget)) return True else: # redirect to relative position # cut off filename directory = self.path[:self.path.rindex('/') + 1] # handle redirect to parent directory while redirTarget.startswith('../'): redirTarget = redirTarget[3:] # change /foo/bar/ to /foo/ directory = directory[:-1] directory = directory[:directory.rindex('/') + 1] self.changeUrl('%s://%s%s%s' % (self.protocol, self.host, directory, redirTarget)) return True else: return False # not a redirect
|
conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse()
|
try: if useHEAD: conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) else: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse() except httplib.BadStatusLine: return self.resolveRedirect(useHEAD = False)
|
def resolveRedirect(self): ''' Requests the header from the server. If the page is an HTTP redirect, returns the redirect target URL as a string. Otherwise returns None. ''' if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host) conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse() if response.status >= 300 and response.status <= 399: #print response.getheaders() redirTarget = response.getheader('Location') #print "redirTarget:", redirTarget if redirTarget: if redirTarget.startswith('http://') or redirTarget.startswith('https://'): self.changeUrl(redirTarget) return True elif redirTarget.startswith('/'): self.changeUrl('%s://%s%s' % (self.protocol, self.host, redirTarget)) return True else: # redirect to relative position # cut off filename directory = self.path[:self.path.rindex('/') + 1] # handle redirect to parent directory while redirTarget.startswith('../'): redirTarget = redirTarget[3:] # change /foo/bar/ to /foo/ directory = directory[:-1] directory = directory[:directory.rindex('/') + 1] self.changeUrl('%s://%s%s%s' % (self.protocol, self.host, directory, redirTarget)) return True else: return False # not a redirect
|
choice = wikipedia.input('WARNING: %s is a disambiguation page, but %s doesn\'t seem to be one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink()))
|
choice = wikipedia.inputChoice('WARNING: %s is a disambiguation page, but %s doesn\'t seem to be one. Follow it anyway?' % (self.inpl.aslink(), pl.aslink()), ['Yes', 'No'], ['y', 'N'], 'N')
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site() # Register this fact at the todo-counter. counter.minus(pl.site()) # Assume it's not a redirect isredirect = 0 # Now check whether any interwiki links should be added to the # todo list. if pl.section(): # We have been referred to a part of a page, not the whole page. Do not follow references. pass else: try: iw = pl.interwiki() except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.Page(pl.site(),arg.args[0]) wikipedia.output(u"NOTE: %s is redirect to %s" % (pl.aslink(), pl3.aslink())) if pl == self.inpl: # This is a redirect page itself. We don't need to # follow the redirection. isredirect = 1 # In this case we can also stop all hints! for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} pass elif not globalvar.followredirect: print "NOTE: not following redirects." else: if self.conditionalAdd(pl3, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new redirect %s" % (self.inpl.aslink(), pl.aslink(), pl3.aslink())) except wikipedia.NoPage: wikipedia.output(u"NOTE: %s does not exist" % pl.aslink()) #print "DBG> ",pl.urlname() if pl == self.inpl: # This is the home subject page. # In this case we can stop all hints! for pl2 in self.todo: counter.minus(pl2.site()) self.todo = {} self.done = {} # In some rare cases it might be we already did check some 'automatic' links pass #except wikipedia.SectionError: # wikipedia.output(u"NOTE: section %s does not exist" % pl.aslink()) else: if not globalvar.autonomous: if self.inpl.isDisambig() and not pl.isDisambig(): choice = wikipedia.input('WARNING: %s is a disambiguation page, but %s doesn\'t seem to be one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink())) elif not self.inpl.isDisambig() and pl.isDisambig(): choice = wikipedia.input('WARNING: %s doesn\'t seem to be a disambiguation page, but %s is one. Follow it anyway? [y|N]' % (self.inpl.aslink(), pl.aslink())) else: choice = 'y' if choice not in ['y', 'Y']: wikipedia.output(u"NOTE: ignoring %s and its interwiki links" % pl.aslink()) del self.done[pl] iw = () if self.inpl == pl: self.untranslated = (len(iw) == 0) if globalvar.untranslatedonly: # Ignore the interwiki links. iw = () elif pl.isEmpty(): if not pl.isCategory(): wikipedia.output(u"NOTE: %s is empty; ignoring it and its interwiki links" % pl.aslink()) # Ignore the interwiki links iw = () for page2 in iw: if page2.site().language() in globalvar.neverlink: print "Skipping link %s to an ignored language"% page2 continue if globalvar.same=='wiktionary' and page2.linkname().lower()!=self.inpl.linkname().lower(): print "NOTE: Ignoring %s for %s in wiktionary mode"% (page2, self.inpl) continue if not globalvar.autonomous: if self.inpl.namespace() != page2.namespace(): choice = wikipedia.input('WARNING: %s is in namespace %i, but %s is in namespace %i. Follow it anyway? [y|N]' % (self.inpl.aslink(), self.inpl.namespace(), page2.aslink(), page2.namespace())) if choice not in ['y', 'Y']: continue if self.conditionalAdd(page2, counter, pl): if globalvar.shownew: wikipedia.output(u"%s: %s gives new interwiki %s"% (self.inpl.aslink(), pl.aslink(), page2.aslink())) # These pages are no longer 'in progress' del self.pending # Check whether we need hints and the user offered to give them if self.untranslated and not self.hintsasked: wikipedia.output(u"NOTE: %s does not have any interwiki links" % self.inpl.aslink()) if (self.untranslated or globalvar.askhints) and not self.hintsasked and not isredirect: # Only once! self.hintsasked = True if globalvar.untranslated: newhint = None t = globalvar.showtextlink if t: wikipedia.output(pl.get()[:t]) while 1: newhint = wikipedia.input(u'Give a hint (? to see pagetext):') if newhint == '?': t += globalvar.showtextlinkadd wikipedia.output(pl.get()[:t]) elif newhint and not ':' in newhint: print "Please enter a hint like language:pagename" print "or type nothing if you do not have a hint" elif not newhint: break else: arr = {} titletranslate.translate(pl, arr, same = False, hints = [newhint], auto = globalvar.auto) for pl2 in arr.iterkeys(): self.todo[pl2] = pl2.site() counter.plus(pl2.site()) self.foundin[pl2] = [None]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.