rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
if x0<self.plotaread[0] and x1>self.plotaread[0]:
if x0<self.plotaread[0] and x1>=self.plotaread[0]:
def drawpoint(self, aFullPNString, datapoint): #get datapoint x y values #convert to plot coordinates if self.trace_onoff[aFullPNString]==gtk.FALSE: return 0 x=self.convertx_to_plot(datapoint[0]) y=self.converty_to_plot(datapoint[1]) cur_point_within_frame=self.withinframes([x,y]) #getlastpoint, calculate change to the last lastx=self.lastx[aFullPNString] lasty=self.lasty[aFullPNString] self.lastx[aFullPNString]=x self.lasty[aFullPNString]=y
if x0>self.plotaread[2] and x1<self.plotaread[2]:
if x0>self.plotaread[2] and x1<=self.plotaread[2]:
def drawpoint(self, aFullPNString, datapoint): #get datapoint x y values #convert to plot coordinates if self.trace_onoff[aFullPNString]==gtk.FALSE: return 0 x=self.convertx_to_plot(datapoint[0]) y=self.converty_to_plot(datapoint[1]) cur_point_within_frame=self.withinframes([x,y]) #getlastpoint, calculate change to the last lastx=self.lastx[aFullPNString] lasty=self.lasty[aFullPNString] self.lastx[aFullPNString]=x self.lasty[aFullPNString]=y
if x1<self.plotaread[0] and x0>self.plotaread[0]:
if x1<self.plotaread[0] and x0>=self.plotaread[0]:
def drawpoint(self, aFullPNString, datapoint): #get datapoint x y values #convert to plot coordinates if self.trace_onoff[aFullPNString]==gtk.FALSE: return 0 x=self.convertx_to_plot(datapoint[0]) y=self.converty_to_plot(datapoint[1]) cur_point_within_frame=self.withinframes([x,y]) #getlastpoint, calculate change to the last lastx=self.lastx[aFullPNString] lasty=self.lasty[aFullPNString] self.lastx[aFullPNString]=x self.lasty[aFullPNString]=y
if x1>self.plotaread[2] and x0<self.plotaread[2]:
if x1>self.plotaread[2] and x0<=self.plotaread[2]:
def drawpoint(self, aFullPNString, datapoint): #get datapoint x y values #convert to plot coordinates if self.trace_onoff[aFullPNString]==gtk.FALSE: return 0 x=self.convertx_to_plot(datapoint[0]) y=self.converty_to_plot(datapoint[1]) cur_point_within_frame=self.withinframes([x,y]) #getlastpoint, calculate change to the last lastx=self.lastx[aFullPNString] lasty=self.lasty[aFullPNString] self.lastx[aFullPNString]=x self.lasty[aFullPNString]=y
if string.count( fullid, ':' ) != 2: raise ValueError( "FullID must have 2 ':'s." ) return string.split( fullid, ':' )
aList = string.split( fullid, ':' ) aLength = len( aList ) if aLength != 3: raise ValueError( "FullID must have 2 ':'s. ( %d given ) " % aLength - 1 ) return aList
def parseFullID( fullid ): if string.count( fullid, ':' ) != 2: raise ValueError( "FullID must have 2 ':'s." ) return string.split( fullid, ':' )
if string.count( fullpropertyname, ':' ) != 3: raise ValueError( "FullPropertyName must have 3 ':'s." ) return string.split( fullpropertyname, ':' )
aList = string.split( fullpropertyname, ':' ) aLength = len( aList ) if aLength != 4: raise ValueError( "FullPropertyName must have 3 ':'s. ( %d given ) " % aLength - 1 ) return aList
def parseFullPropertyName( fullpropertyname ): if string.count( fullpropertyname, ':' ) != 3: raise ValueError( "FullPropertyName must have 3 ':'s." ) return string.split( fullpropertyname, ':' )
"FullID has 3 parts. ( %d given )" % aLength )
"FullID has 3 fields. ( %d given )" % aLength )
def constructFullID( words ): aLength = len( words ) if aLength != 3: raise ValueError( "FullID has 3 parts. ( %d given )" % aLength ) return string.join( words, ':' )
"FullPropertyName has 4 parts. ( %d given )" % aLength )
"FullPropertyName has 4 fields. ( %d given )" % aLength )
def constructFullPropertyName( words ): aLength = len( words ) if aLength != 4: raise ValueError( "FullPropertyName has 4 parts. ( %d given )" % aLength ) return string.join( words, ':' )
aKineticLaw = aReaction.getKineticLaw() ListOfKineticLaw = [] if aKineticLaw != []: aFormula_KL = aKineticLaw.getFormula() aMath = [] if( aSBMLDocument.getLevel() == 1 ): aMath.append( '' ) else: anASTNode_KL = aKineticLaw.getMath() aMath.append( libsbml.formulaToString( anASTNode_KL ) ) aString_KL = aMath aTimeUnit_KL = aKineticLaw.getTimeUnits() aSubstanceUnit_KL = aKineticLaw.getSubstanceUnits() if aKineticLaw.getParameter(0): ListOfParameters = [] NumParameter_KL = aKineticLaw.getNumParameters() for NumPara in range( NumParameter_KL ): ListOfParameter = [] aParameter = aKineticLaw.getParameter( NumPara ) anId_KL_P = aParameter.getId() aName_KL_P = aParameter.getName() aValue_KL_P = str( aParameter.getValue() ) aUnit_KL_P = aParameter.getUnits() aConstant_KL_P = aParameter.getConstant() ListOfParameter.append( anId_KL_P ) ListOfParameter.append( aName_KL_P ) ListOfParameter.append( aValue_KL_P ) ListOfParameter.append( aUnit_KL_P ) ListOfParameter.append( aConstant_KL_P ) ListOfParameters.append( ListOfParameter ) else: ListOfParameters = [] anExpressionAnnotation = aKineticLaw.getAnnotation() ListOfKineticLaw.append( aFormula_KL ) ListOfKineticLaw.append( aString_KL ) ListOfKineticLaw.append( aTimeUnit_KL ) ListOfKineticLaw.append( aSubstanceUnit_KL ) ListOfKineticLaw.append( ListOfParameters ) ListOfKineticLaw.append( anExpressionAnnotation )
if aReaction.isSetKineticLaw(): aKineticLaw = aReaction.getKineticLaw() ListOfKineticLaw = [] if aKineticLaw != []: if aKineticLaw.isSetFormula(): aFormula_KL = aKineticLaw.getFormula() else: aFormula_KL = '' aMath = [] if( aSBMLDocument.getLevel() == 1 ): aMath.append( '' ) else: if aKineticLaw.isSetMath(): anASTNode_KL = aKineticLaw.getMath() aMath.append( libsbml.formulaToString\ ( anASTNode_KL ) ) else: aMath.append( '' ) aString_KL = aMath aTimeUnit_KL = aKineticLaw.getTimeUnits() aSubstanceUnit_KL = aKineticLaw.getSubstanceUnits() if aKineticLaw.getParameter(0): ListOfParameters = [] NumParameter_KL = aKineticLaw.getNumParameters() for NumPara in range( NumParameter_KL ): ListOfParameter = [] aParameter = aKineticLaw.getParameter( NumPara ) anId_KL_P = aParameter.getId() aName_KL_P = aParameter.getName() aValue_KL_P = str( aParameter.getValue() ) aUnit_KL_P = aParameter.getUnits() aConstant_KL_P = aParameter.getConstant() ListOfParameter.append( anId_KL_P ) ListOfParameter.append( aName_KL_P ) ListOfParameter.append( aValue_KL_P ) ListOfParameter.append( aUnit_KL_P ) ListOfParameter.append( aConstant_KL_P ) ListOfParameters.append( ListOfParameter ) else: ListOfParameters = [] anExpressionAnnotation = aKineticLaw.getAnnotation() ListOfKineticLaw.append( aFormula_KL ) ListOfKineticLaw.append( aString_KL ) ListOfKineticLaw.append( aTimeUnit_KL ) ListOfKineticLaw.append( aSubstanceUnit_KL ) ListOfKineticLaw.append( ListOfParameters ) ListOfKineticLaw.append( anExpressionAnnotation )
def getReaction( aSBMLmodel, aSBMLDocument ): " [[ Id , Name , [ Formula , String , TimeUnit , SubstanceUnit , [[ ParameterId , ParameterName , ParameterValue , ParameterUnit , ParameterConstant ]] ] , Reversible , Fast , [[ ReactantSpecies , ( ReactantStoichiometry , ReactantStoichiometryMath ) , ReactantDenominator ]] , [[ ProductSpecies , ( ProductStoichiometry , ProductStoichiometryMath ) , ProductDenominator ]] , [[ ModifierSpecies ]] ]] " LIST = [] if aSBMLmodel.getReaction(0): NumReaction = aSBMLmodel.getNumReactions() for Num in range( NumReaction ): ListOfReaction = [] aReaction = aSBMLmodel.getReaction( Num ) anId = aReaction.getId() aName =aReaction.getName()
count = 0
count = 1
def checkMultiplicity(self): processes = {} my_pid = 1 count = 0 try: f = open('throttle.log','r') except IOError: if not self.pid: pass else: raise else: now = time.time() for line in f.readlines(): line = line.split(' ') pid = int(line[0]) ptime = int(line[1].split('.')[0]) if now - ptime <= self.releasepid: if now - ptime <= self.dropdelay and pid != self.pid: count += 1 processes[pid] = ptime if pid >= my_pid: my_pid = pid+1 if not self.pid: self.pid = my_pid self.checktime = time.time() processes[self.pid] = self.checktime f = open('throttle.log','w') for p in processes.keys(): f.write(str(p)+' '+str(processes[p])+'\n') f.close() self.process_multiplicity = count print("Checked for running processes. %s processes currently running, "%len(processes) + "including the current process.")
print("Checked for running processes. %s processes currently running, "%len(processes) +
print("Checked for running processes. %s processes currently running, "%count +
def checkMultiplicity(self): processes = {} my_pid = 1 count = 0 try: f = open('throttle.log','r') except IOError: if not self.pid: pass else: raise else: now = time.time() for line in f.readlines(): line = line.split(' ') pid = int(line[0]) ptime = int(line[1].split('.')[0]) if now - ptime <= self.releasepid: if now - ptime <= self.dropdelay and pid != self.pid: count += 1 processes[pid] = ptime if pid >= my_pid: my_pid = pid+1 if not self.pid: self.pid = my_pid self.checktime = time.time() processes[self.pid] = self.checktime f = open('throttle.log','w') for p in processes.keys(): f.write(str(p)+' '+str(processes[p])+'\n') f.close() self.process_multiplicity = count print("Checked for running processes. %s processes currently running, "%len(processes) + "including the current process.")
ofp.write(oldcontent.encode(config.console_encoding))
if self.options.new_data == '': ofp.write(oldcontent.encode(config.console_encoding)) else: ofp.write(oldcontent.encode(config.console_encoding)+'\n===========\n'+self.options.new_data)
def edit(self): """Edit the page using the editor. It returns two strings: the old version and the new version.""" ofn = tempfile.mktemp() ofp = open(ofn, 'w') try: oldcontent = self.pagelink.get() except wikipedia.NoPage: oldcontent = "" except wikipedia.IsRedirectPage: if self.options.redirect: oldcontent = self.pagelink.get(force=True, get_redirect=redirect) else: raise ofp.write(oldcontent.encode(config.console_encoding)) # FIXME: encoding of wiki ofp.close() os.system("%s %s" % (self.options.editor, ofn)) newcontent = open(ofn).read().decode(config.console_encoding) os.unlink(ofn) return oldcontent, newcontent
if site == mysite and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty():
if site == mysite and pl.exists() and not pl.isRedirectPage():
def assemble(self): # No errors have been seen so far nerr = 0 # Build up a dictionary of all links found, with the site as key. # Each value will be a list. mysite = wikipedia.getSite() new = {} for pl in self.done.keys(): site = pl.site() if site == mysite and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty(): if pl != self.inpl: self.problem("Found link to %s"%pl.aslink(None)) self.whereReport(pl) nerr += 1 elif pl.exists() and not pl.isRedirectPage(): if site in new: new[site].append(pl) else: new[site] = [pl] # Clean up the Chinese links zhcnsite = mysite.getSite(code='zh-cn') zhtwsite = mysite.getSite(code='zh-tw') zhsite = mysite.getSite(code='zh') if zhtwsite in new and zhcnsite in new: if len(new[zhcnsite]) == 1 and len(new[zhtwsite]) == 1: if new[zhcnsite][0].linkname() == new[zhtwsite][0].linkname(): zhpl = wikipedia.PageLink(zhsite,new[zhcnsite][0].linkname()) new[zhsite] = [zhpl] for pl2 in self.foundin[new[zhcnsite][0]]: if self.foundin.has_key(zhpl): self.foundin[zhpl]=self.foundin[zhpl] + self.foundin[new[zhcnsite][0]]
print "WARNING: Could not load %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site.hostname(), path, retry_idle_time)
print "WARNING: Could not load %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time)
def getEditPage(self, get_redirect=False, throttle = True, sysop = False): """ Get the contents of the Page via the edit page. Do not use this directly, use get() instead. Arguments: get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text. """ isWatched = False editRestriction = None output(u'Getting page %s' % self.aslink()) path = self.site().edit_address(self.urlname()) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() try: text = self.site().getUrl(path, sysop = sysop) except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: Could not load %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site.hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue get_throttle.setDelay(time.time() - starttime)\ # Look for the edit token R = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = R.search(text) if tokenloc: self.site().putToken(tokenloc.group(1), sysop = sysop) elif not self.site().getToken(getalways = False): self.site().putToken('', sysop = sysop) # Look if the page is on our watchlist R = re.compile(r"\<input tabindex='[\d]+' type='checkbox' name='wpWatchthis' checked='checked'") matchWatching = R.search(text) if matchWatching: isWatched = True m = re.search('value="(\d+)" name=["\']wpEdittime["\']', text) if m: self._editTime = m.group(1) else: self._editTime = "0" # Extract the actual text from the textedit field try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(self.site(), self.title()) m = self.site().redirectRegex().match(text[i1:i2]) if self._editTime == "0": output(u"DBG> page may be locked?!") editRestriction = 'sysop' if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (self.title(), m.group(1))) raise IsRedirectPage(m.group(1)) x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] return x, isWatched, editRestriction
'mk': [u'Никулец'],
def __init__(self): family.Family.__init__(self) self.name = 'wikipedia'
'eu' : lambda v: dh_dayOfMnth( v, u'Aprilaren %d' ),
'eu' : lambda v: dh_dayOfMnth( v, u'Apirilaren %d' ),
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
'cy' : lambda v: dh_dayOfMnth( v, u'%d Gorffenaf' ),
'cy' : lambda v: dh_dayOfMnth( v, u'%d Gorffennaf' ),
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
'la' : lambda v: dh_noConv( v, u'%d a.C.n' ),
'la' : lambda v: dh_noConv( v, u'%d a.C.n.' ),
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
the optional argument 'into' specifies the encoding of the target wikipedia
the argument 'incode' specifies the encoding of the target wikipedia
def url2link(percentname,incode,code): """Convert a url-name of a page into a proper name for an interwiki link the optional argument 'into' specifies the encoding of the target wikipedia """ result=underline2space(percentname) x=url2unicode(result,language=code) if code2encoding(incode)==code2encoding(code): #print "url2link",repr(x),"same encoding" return unicode2html(x,encoding=code2encoding(code)) else: #print "url2link",repr(x),"different encoding" return unicode2html(x,encoding='ascii')
edits = editR.findall(txt)
edits = editR.findall(self._versionhistory)
def getVersionHistory(self, forceReload = False): """ Loads the version history page and returns a list of tuples, where each tuple represents one edit and is built of edit date/time, user name, and edit summary. """ site = self.site() host = site.hostname() url = site.family.version_history_address(site, self.urlname())
yield wikipedia.PageLink(wikipedia.mylang, pagenames)
for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': yield wikipedia.PageLink(wikipedia.mylang, pagenames)
pass
continue
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': yield wikipedia.PageLink(wikipedia.mylang, pagenames)
newTable, num = re.subn(r'([\r\n]+(\!|\||\{\|)[^\r\n\|]+)[ ]*=[ ]*([^"][^\s]+?[^"])(\s)', r'\1="\3"\4', newTable, 1)
newTable, num = re.subn(r'([\r\n]+(?:!|\||\{\|)[^\r\n\|]+) *= *([^"\s>]+)(\s)', r'\1="\2"\3', newTable, 1)
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
retry_idle_time = 1 while True:
startFromPage = None thisCatDone = False while not thisCatDone: retry_idle_time = 1 while True: try: if startFromPage: txt = wikipedia.getPage(site, cat.urlname() + '&from=' + startFromPage, get_edit_page = False) else: txt = wikipedia.getPage(site, cat.urlname(), get_edit_page = False) except: raise print "WARNING: There was a problem retrieving %s. Maybe the server is down. Retrying in %d minutes..." % (cat.linkname(), retry_idle_time) time.sleep(retry_idle_time * 60) retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue break self_txt = txt ibegin = txt.index('"clear:both;"')
def _make_catlist(self, recurse = False, site = None): """Make a list of all articles and categories that are in this category. If recurse is set to True, articles and subcategories of any subcategories are also retrieved.
txt = wikipedia.getPage(site, cat.urlname(), get_edit_page = False) except: raise print "WARNING: There was a problem retrieving %s. Maybe the server is down. Retrying in %d minutes..." % (cat.linkname(), retry_idle_time) time.sleep(retry_idle_time * 60) retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue break self_txt = txt ibegin = txt.index('"clear:both;"') try: iend = txt.index('<div id="catlinks">') except ValueError: iend = txt.index('<!-- end content -->') txt = txt[ibegin:iend] for title in Rtitle.findall(txt): if iscattitle(title): ncat = _CatLink(self.site(), title) if recurse and ncat not in catsdone: catstodo.append(ncat) subcats.append(title)
iend = txt.index('<div id="catlinks">') except ValueError: iend = txt.index('<!-- end content -->') txt = txt[ibegin:iend] for title in Rtitle.findall(txt): if iscattitle(title): ncat = _CatLink(self.site(), title) if recurse and ncat not in catsdone: catstodo.append(ncat) subcats.append(title) else: articles.append(title) matchObj = RLinkToNextPage.search(txt) if matchObj: startFromPage = matchObj.group(1) wikipedia.output('There are more articles in %s.' % cat.linkname())
def _make_catlist(self, recurse = False, site = None): """Make a list of all articles and categories that are in this category. If recurse is set to True, articles and subcategories of any subcategories are also retrieved.
articles.append(title)
thisCatDone = True
def _make_catlist(self, recurse = False, site = None): """Make a list of all articles and categories that are in this category. If recurse is set to True, articles and subcategories of any subcategories are also retrieved.
title = title.encode(site.encoding()) title = urllib.unquote(title) return unicode(title, site.encoding())
try: t = title.encode(site.encoding()) t = urllib.unquote(t) return unicode(t, site.encoding()) except UnicodeError: for enc in site.encodings(): try: t = title.encode(enc) t = urllib.unquote(t) return unicode(t, enc) except UnicodeError: pass raise
def url2unicode(title, site): title = title.encode(site.encoding()) title = urllib.unquote(title) return unicode(title, site.encoding())
def main():
try:
def fix_double_redirects(source): mysite = wikipedia.getSite() for redir_name in retrieve_double_redirects(source): print '' redir = wikipedia.PageLink(mysite, redir_name) try: target = redir.getRedirectTo() except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % redir.linkname()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % redir.linkname()) except wikipedia.LockedPage: wikipedia.output(u'%s is locked, skipping.' % redir.linkname()) else: try: second_redir = wikipedia.PageLink(mysite, target) second_target = second_redir.getRedirectTo(read_only = True) except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % second_redir.linkname()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % second_redir.linkname()) else: txt = "#REDIRECT [[%s]]" % second_target status, reason, data = redir.put(txt) print status, reason
if True: try: main() except: wikipedia.stopme() raise else: wikipedia.stopme()
finally: wikipedia.stopme()
def main(): # read command line parameters # what the bot should do (either resolve double redirs, or delete broken redirs) action = None # where the bot should get his infos from (either None to load the maintenance # special page from the live wiki, the filename of a local sql dump file) source = None # Which namespace should be processed when using a SQL dump # default to -1 which means all namespaces will be processed namespace = -1 # at which redirect shall we start searching double redirects again (only with dump) # default to -1 which means all redirects are checked restart = -1 for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg) if arg: if arg == 'double': action = 'double' elif arg == 'broken': action = 'broken' elif arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input(u'Please enter the SQL dump\'s filename: ') else: sqlfilename = arg[5:] import sqldump source = sqlfilename elif arg.startswith('-namespace:'): namespace = int(arg[11:]) elif arg.startswith('-restart:'): restart = int(arg[9:]) else: print 'Unknown argument: %s' % arg if action == 'double': # get summary text wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_double)) fix_double_redirects(source) elif action == 'broken': delete_broken_redirects(source) else: wikipedia.output(__doc__, 'utf-8')
self.process_multiplicity = len(processes)
self.process_multiplicity = count
def checkMultiplicity(self): processes = {} my_pid = 1 count = 0 try: f = open('throttle.log','r') except IOError: if not self.pid: pass else: raise else: now = time.time() for line in f.readlines(): line = line.split(' ') pid = int(line[0]) ptime = int(line[1].split('.')[0]) if now - ptime <= self.releasepid: if now - ptime <= self.dropdelay and pid != self.pid: count += 1 processes[pid] = ptime if pid >= my_pid: my_pid = pid+1 if not self.pid: self.pid = my_pid self.checktime = time.time() processes[self.pid] = self.checktime f = open('throttle.log','w') for p in processes.keys(): f.write(str(p)+' '+str(processes[p])+'\n') f.close() self.process_multiplicity = len(processes) print("Checked for running processes. %s processes currently running, "%len(processes) + "including the current process.")
if now - ptime <= self.dropdelay and pid != self.pid:
if now - ptime <= self.releasepid and pid != self.pid:
def drop(self): """Remove me from the list of running bots processes.""" self.checktime = 0 processes = {} try: f = open('throttle.log','r') except IOError: return else: now = time.time() for line in f.readlines(): line = line.split(' ') pid = int(line[0]) ptime = int(line[1].split('.')[0]) if now - ptime <= self.dropdelay and pid != self.pid: processes[pid] = ptime f = open('throttle.log','w') for p in processes.keys(): f.write(str(p)+' '+str(processes[p])+'\n') f.close()
if self.oldCat.exists() and batchMode == False:
if self.oldCat.exists():
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat) if self.oldCat.exists() and batchMode == False: # try to copy page contents to new cat page if self.oldCat.copyTo(newCatTitle): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle self.oldCat.delete(reason) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle self.oldCat.delete(reason) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
if batchMode == False: if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle self.oldCat.delete(reason) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat) if self.oldCat.exists() and batchMode == False: # try to copy page contents to new cat page if self.oldCat.copyTo(newCatTitle): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle self.oldCat.delete(reason) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
t=self.sectionFreeTitle() p=t.split(':', 1) if len(p) == 1: return p[0] else: return p[1]
if self.namespace() == 0: return self.title() else: return self.sectionFreeTitle().split(':', 1)[1]
def titleWithoutNamespace(self): """The name of the page without the namespace part. Returns the sectionFreeTitle if the page is from the main namespace. Note that this is a raw way of doing things - it simply looks for a : in the name.""" t=self.sectionFreeTitle() p=t.split(':', 1) if len(p) == 1: # page is in the main namespace return p[0] else: return p[1]
print "NOTE: Ignoring %s, using %s"%(new[zh].asasciilink(),pl.asasciilink())
print "NOTE: Ignoring %s, using %s"%(new['zh'].asasciilink(),pl.asasciilink())
def treesearch(pl): arr = {pl:None} # First make one step based on the language itself try: n = treestep(arr, pl, abort_on_redirect = 1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n == 0 and not arr[pl]: print "Mother doesn't exist" return if untranslated: if len(arr) > 1: print "Already has translations" else: if bell: sys.stdout.write('\07') newhint = raw_input("Hint:") if not newhint: return hints.append(newhint) # Then add translations if we survived. autotranslate(pl, arr, same = same) modifications = 1 while modifications: modifications = 0 for newpl in arr.keys(): if arr[newpl] is None: modifications += treestep(arr, newpl) return arr
Rlink=re.compile('\[\[([^\]]*)\]\]')
w=r'([^\]\|]*)' Rlink=re.compile(r'\[\['+w+r'(\|'+w+r')?\]\]')
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
alternatives=Rlink.findall(thistxt)
alternatives=[] for a in Rlink.findall(thistxt): alternatives.append(a[0])
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
Rthis=re.compile('\[\[%s(\|[^\]]*)?\]\]'%thispl.linkname())
exps=[] zz='\[\[(%s)(\|[^\]]*)?\]\]' Rthis=re.compile(zz%thispl.linkname()) exps.append(Rthis)
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
Rthis2=re.compile('\[\[%s(\|[^\]]*)?\]\]'%aln)
Rthis=re.compile(zz%aln) exps.append(Rthis) Rthis=re.compile(zz%thispl.linkname().lower()) exps.append(Rthis) Rthis=re.compile(zz%aln.lower()) exps.append(Rthis)
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
m=Rthis.search(reftxt) if not m: m=Rthis2.search(reftxt) if not m:
for Rthis in exps: m=Rthis.search(reftxt) if m: break else:
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
print "== %s =="%(refpl) print reftxt[m.start()-context:m.end()+context]
print "== %s =="%(refpl),m.start(),m.end() print reftxt[max(0,m.start()-context):m.end()+context]
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
if not g1: g1=thispl.linkname() reptxt="%s|%s"%(alternatives[choice],g1)
g2=m.group(2) if g2: g2=g2[1:] else: g2=g1 reptxt="%s|%s"%(alternatives[choice],g2)
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
print newtxt[m.start()-30:m.end()+30]
print newtxt[max(0,m.start()-30):m.end()+30]
def getreferences(pl): host = wikipedia.langs[pl.code()] url="/w/wiki.phtml?title=Speciaal:Whatlinkshere&target=%s"%(pl.urlname()) txt,charset=wikipedia.getUrl(host,url) Rref=re.compile('<li><a href.* title="([^"]*)"') return Rref.findall(txt)
print 'match'
def getReferences(pl, follow_redirects = True): host = family.hostname(pl.code()) url = family.references_address(mylang, pl.urlname()) output('Getting references to %s:%s' % (pl.code(), pl.linkname())) txt, charset = getUrl(host,url) # remove brackets which would disturb the regular expression cascadedListR txt = txt.replace('<a', 'a') txt = txt.replace('</a', '/a') txt = txt.replace('<li', 'li') txt = txt.replace('</li', 'li') if not follow_redirects: # remove these links from HTML which are in an unordered # list at level > 1. cascadedListR = re.compile(r"(.*<ul>[^<]*)<ul>[^<]*<\/ul>([^<]*</\ul>.*)") endR = re.compile(r"</ul>") # current index in txt string pos = 0 while cascadedListR.search(txt): print 'match' m = cascadedListR.search(txt) txt = m.group(1) + m.group(2) Rref = re.compile('li>a href.*="([^"]*)"') x = Rref.findall(txt) x.sort() # Remove duplicates for i in range(len(x)-1, 0, -1): if x[i] == x[i-1]: del x[i] return x
if pl.isDisambig(): return pl
try: if pl.isDisambig(): return pl except wikipedia.NoPage: pass
def hasdisambig(self,site): # Returns TRUE if a link to a disambiguation page on site site has been found for pl in self.pending + self.done.keys(): if pl.site() == site: if pl.isDisambig(): return pl return False
if not pl.isDisambig(): return pl
try: if not pl.isDisambig(): return pl except wikipedia.NoPage: pass
def hasnondisambig(self,site): # Returns TRUE if a link to a disambiguation page on site site has been found for pl in self.pending + self.done.keys(): if pl.site() == site: if not pl.isDisambig(): return pl return False
def getVersionHistory(self, forceReload = False):
def getVersionHistory(self, forceReload = False, reverseOrder = False, getAll = False):
def getRedirectTarget(self): """ If the page is a redirect page, gives the title of the page it redirects to. Otherwise it will raise an IsNotRedirectPage exception. This function can raise a NoPage exception. """ try: self.get() except NoPage: raise NoPage(self) except IsRedirectPage, arg: if '|' in arg: warnings.warn("%s has a | character, this makes no sense", Warning) return arg[0] else: raise IsNotRedirectPage(self)
summary.
summary. Defaults to getting the first 500 edits.
def getVersionHistory(self, forceReload = False): """ Loads the version history page and returns a list of tuples, where each tuple represents one edit and is built of edit date/time, user name, and edit summary. """ site = self.site() path = site.family.version_history_address(self.site().language(), self.urlname())
path = site.family.version_history_address(self.site().language(), self.urlname()) if not hasattr(self, '_versionhistory') or forceReload: output(u'Getting version history of %s' % self.title()) txt = site.getUrl(path) self._versionhistory = txt
def getVersionHistory(self, forceReload = False): """ Loads the version history page and returns a list of tuples, where each tuple represents one edit and is built of edit date/time, user name, and edit summary. """ site = self.site() path = site.family.version_history_address(self.site().language(), self.urlname())
edits = editR.findall(self._versionhistory) return edits def getVersionHistoryTable(self, forceReload = False):
startFromPage = None thisHistoryDone = False skip = False RLinkToNextPage = re.compile('&amp;offset=(.*?)&amp;') if reverseOrder: if not hasattr(self, '_versionhistoryearliest') or forceReload: self._versionhistoryearliest = [] elif getAll and len(self._versionhistoryearliest) == 500: thisHistoryDone = False skip = True else: thisHistoryDone = True elif not hasattr(self, '_versionhistory') or forceReload: self._versionhistory = [] elif getAll and len(self._versionhistory) == 500: thisHistoryDone = False skip = True else: thisHistoryDone = True while not thisHistoryDone: path = site.family.version_history_address(self.site().language(), self.urlname()) if reverseOrder: if len(self._versionhistoryearliest) >= 500: path += '&dir=prev' else: path += '&go=first' if startFromPage: path += '&offset=' + startFromPage retry_idle_time = 1 while True: try: if startFromPage: output(u'Continuing to get version history of %s' % self.title()) else: output(u'Getting version history of %s' % self.title()) txt = site.getUrl(path) except: raise print "WARNING: There was a problem retrieving %s. Maybe the server is down. Retrying in %d minutes..." % (self.title(), retry_idle_time) time.sleep(retry_idle_time * 60) if retry_idle_time < 32: retry_idle_time *= 2 continue break self_txt = txt if reverseOrder: if getAll: if len(self._versionhistoryearliest) == 0: matchObj = RLinkToNextPage.search(self_txt) if matchObj: startFromPage = matchObj.group(1) else: thisHistoryDone = True edits = editR.findall(self_txt) edits.reverse() for edit in edits: self._versionhistoryearliest.append(edit) if len(edits) < 500: thisHistoryDone = True else: if not skip: edits = editR.findall(self_txt) edits.reverse() for edit in edits: self._versionhistoryearliest.append(edit) if len(edits) < 500: thisHistoryDone = True matchObj = RLinkToNextPage.search(self_txt) if matchObj: startFromPage = matchObj.group(1) else: thisHistoryDone = True else: skip = False matchObj = RLinkToNextPage.search(self_txt) if matchObj: startFromPage = matchObj.group(1) else: thisHistoryDone = True else: for edit in editR.findall(self_txt): self._versionhistoryearliest.append(edit) self._versionhistoryearliest.reverse() thisHistoryDone = True else: if getAll: if len(self._versionhistory) == 0: matchObj = RLinkToNextPage.search(self_txt) if matchObj: startFromPage = matchObj.group(1) else: thisHistoryDone = True edits = editR.findall(self_txt) for edit in edits: self._versionhistory.append(edit) if len(edits) < 500: thisHistoryDone = True else: if not skip: edits = editR.findall(self_txt) for edit in edits: self._versionhistory.append(edit) if len(edits) < 500: thisHistoryDone = True matchObj = RLinkToNextPage.findall(self_txt) if len(matchObj) >= 2: startFromPage = matchObj[1] else: thisHistoryDone = True else: skip = False matchObj = RLinkToNextPage.search(self_txt) if matchObj: startFromPage = matchObj.group(1) else: thisHistoryDone = True else: for edit in editR.findall(self_txt): self._versionhistory.append(edit) thisHistoryDone = True if reverseOrder: if len(self._versionhistoryearliest) > 500 and not getAll: return self._versionhistoryearliest[0:500] return self._versionhistoryearliest if len(self._versionhistory) > 500 and not getAll: return self._versionhistory[0:500] return self._versionhistory def getVersionHistoryTable(self, forceReload = False, reverseOrder = False, getAll = False):
def getVersionHistory(self, forceReload = False): """ Loads the version history page and returns a list of tuples, where each tuple represents one edit and is built of edit date/time, user name, and edit summary. """ site = self.site() path = site.family.version_history_address(self.site().language(), self.urlname())
for time, username, summary in self.getVersionHistory(forceReload = forceReload):
for time, username, summary in self.getVersionHistory(forceReload = forceReload, reverseOrder = reverseOrder, getAll = getAll):
def getVersionHistoryTable(self, forceReload = False): """ Returns the version history as a wiki table. """ result = '{| border="1"\n' result += '! date/time || username || edit summary\n' for time, username, summary in self.getVersionHistory(forceReload = forceReload): result += '|----\n' result += '| %s || %s || <nowiki>%s</nowiki>\n' % (time, username, summary) result += '|}\n' return result
def delete(pl, reason = None, prompt = True):
def delete(self, reason = None, prompt = True):
def delete(pl, reason = None, prompt = True): """Deletes the page from the wiki. Requires administrator status. If reason is None, asks for a reason. If prompt is True, asks the user if he wants to delete the page. """ # TODO: Find out if bot is logged in with an admin account, raise exception # or show error message otherwise # TODO: Find out if deletion was successful or e.g. if file has already been # deleted by someone else # taken from lib_images.py and modified UGLY COPY def post_multipart(host, selector, fields, cookies): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files. Return the server's response page. """ content_type, body = encode_multipart_formdata(fields) h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('content-type', content_type) h.putheader('content-length', str(len(body))) h.putheader("User-agent", "PythonWikipediaBot/1.0") h.putheader('Host', host) if cookies: h.putheader('Cookie', cookies) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read() # taken from lib_images.py and modified UGLY COPY def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body if reason == None: reason = input(u'Please enter a reason for the deletion:') answer = 'y' if prompt: answer = input(u'Do you want to delete %s? [y|N]' % pl.linkname()) if answer in ['y', 'Y']: output(u'Deleting page %s...' % pl.linkname()) token = pl.site().getToken() returned_html = post_multipart(pl.site().hostname(), pl.site().delete_address(pl.urlname()), (('wpReason', reason), ('wpConfirm', '1'), ('wpEditToken', token)), pl.site().cookies()) # check if deletion was successful # therefore, we need to know what the MediaWiki software says after # a successful deletion deleted_msg = mediawiki_messages.get('actioncomplete') deleted_msg = re.escape(deleted_msg) deleted_msgR = re.compile(deleted_msg) if deleted_msgR.search(returned_html): output(u'Deletion successful.') else: output(u'Deletion failed:.') try: ibegin = returned_html.index('<!-- start content -->') + 22 iend = returned_html.index('<!-- end content -->') except ValueError: # if begin/end markers weren't found, show entire HTML file output(returned_html, myencoding()) else: # otherwise, remove the irrelevant sections returned_html = returned_html[ibegin:iend] output(returned_html, myencoding())
def post_multipart(host, selector, fields, cookies): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files. Return the server's response page. """ content_type, body = encode_multipart_formdata(fields) h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('content-type', content_type) h.putheader('content-length', str(len(body))) h.putheader("User-agent", "PythonWikipediaBot/1.0") h.putheader('Host', host) if cookies: h.putheader('Cookie', cookies) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read() def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
def delete(pl, reason = None, prompt = True): """Deletes the page from the wiki. Requires administrator status. If reason is None, asks for a reason. If prompt is True, asks the user if he wants to delete the page. """ # TODO: Find out if bot is logged in with an admin account, raise exception # or show error message otherwise # TODO: Find out if deletion was successful or e.g. if file has already been # deleted by someone else # taken from lib_images.py and modified UGLY COPY def post_multipart(host, selector, fields, cookies): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files. Return the server's response page. """ content_type, body = encode_multipart_formdata(fields) h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('content-type', content_type) h.putheader('content-length', str(len(body))) h.putheader("User-agent", "PythonWikipediaBot/1.0") h.putheader('Host', host) if cookies: h.putheader('Cookie', cookies) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read() # taken from lib_images.py and modified UGLY COPY def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body if reason == None: reason = input(u'Please enter a reason for the deletion:') answer = 'y' if prompt: answer = input(u'Do you want to delete %s? [y|N]' % pl.linkname()) if answer in ['y', 'Y']: output(u'Deleting page %s...' % pl.linkname()) token = pl.site().getToken() returned_html = post_multipart(pl.site().hostname(), pl.site().delete_address(pl.urlname()), (('wpReason', reason), ('wpConfirm', '1'), ('wpEditToken', token)), pl.site().cookies()) # check if deletion was successful # therefore, we need to know what the MediaWiki software says after # a successful deletion deleted_msg = mediawiki_messages.get('actioncomplete') deleted_msg = re.escape(deleted_msg) deleted_msgR = re.compile(deleted_msg) if deleted_msgR.search(returned_html): output(u'Deletion successful.') else: output(u'Deletion failed:.') try: ibegin = returned_html.index('<!-- start content -->') + 22 iend = returned_html.index('<!-- end content -->') except ValueError: # if begin/end markers weren't found, show entire HTML file output(returned_html, myencoding()) else: # otherwise, remove the irrelevant sections returned_html = returned_html[ibegin:iend] output(returned_html, myencoding())
answer = input(u'Do you want to delete %s? [y|N]' % pl.linkname())
answer = inputChoice(u'Do you want to delete %s?' % self.linkname(), ['Yes', 'No'], ['y', 'N'], 'N')
def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
output(u'Deleting page %s...' % pl.linkname()) token = pl.site().getToken() returned_html = post_multipart(pl.site().hostname(), pl.site().delete_address(pl.urlname()), (('wpReason', reason), ('wpConfirm', '1'), ('wpEditToken', token)), pl.site().cookies()) deleted_msg = mediawiki_messages.get('actioncomplete') deleted_msg = re.escape(deleted_msg) deleted_msgR = re.compile(deleted_msg) if deleted_msgR.search(returned_html): output(u'Deletion successful.') else: output(u'Deletion failed:.') try: ibegin = returned_html.index('<!-- start content -->') + 22 iend = returned_html.index('<!-- end content -->') except ValueError: output(returned_html, myencoding())
token = self.site().getToken(self) host = self.site().hostname() address = self.site().delete_address(space2underline(self.linkname())) while not self.site().loggedin(check = True): loginMan = login.LoginManager() loginMan.login() predata = [ ('wpReason', reason), ('wpConfirm', '1')] if token: predata.append(('wpEditToken', token)) data = urlencode(tuple(predata)) conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.5) Gecko/20041107 Firefox/1.0") if self.site().cookies(): conn.putheader('Cookie', self.site().cookies()) conn.endheaders() conn.send(data) response = conn.getresponse() data = response.read() conn.close() if data != '': data = data.decode(myencoding()) if mediawiki_messages.get('actioncomplete') in data: output(u'Deletion successful.')
def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
returned_html = returned_html[ibegin:iend] output(returned_html, myencoding())
output(u'Deletion failed:.') try: ibegin = data.index('<!-- start content -->') + 22 iend = data.index('<!-- end content -->') except ValueError: output(data) else: data = data[ibegin:iend] output(data)
def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
R = re.compile(r'\[\[(.+?)\]\]')
R = re.compile(ur'\[\[(.+?)\]\]')
def __iter__(self): site = wikipedia.getSite() f = codecs.open(self.filename, 'r', config.textfile_encoding) R = re.compile(r'\[\[(.+?)\]\]') for pageTitle in R.findall(f.read()): parts = pageTitle.split(':') i = 0 try: fam = wikipedia.Family(parts[i], fatal = False) i += 1 except: fam = site.family if parts[i] in fam.langs: code = parts[i] i += 1 else: code = site.lang pagename = ':'.join(parts[i:]) site = wikipedia.getSite(code = code, fam = fam) yield wikipedia.Page(site, pagename) f.close()
fam == default_family
fam = default_family
def getSite(code = None, fam = None, user=None): if code == None: code = default_code if fam == None: fam == default_family key = '%s:%s'%(fam,code) if not _sites.has_key(key): _sites[key] = Site(code=code, fam=fam, user=user) return _sites[key]
content = talk.get()
content = talk.get() + "\n\n"
def report(self, url, errorReport, containingPage): """ Tries to add an error report to the talk page belonging to the page containing the dead link. """ if config.report_dead_links_on_talk and not containingPage.isTalkPage(): wikipedia.output(u"** Reporting dead link on talk page...") talk = containingPage.switchTalkPage() try: content = talk.get() if url in content: wikipedia.output(u"** Dead link seems to have already been reported.") return except (wikipedia.NoPage, wikipedia.IsRedirectPage): content = u'' content += wikipedia.translate(wikipedia.getSite(), talk_report) % errorReport talk.put(content)
for (page, date, length, user, comment) in wikipedia.newpages(1000):
for (page, date, length, loggedIn, user, comment) in wikipedia.newpages(1000):
def declare_alternative(self,alt): if not alt in knownwords[self.word]: knownwords[self.word].append(word) newwords.append(self.word) return self.alternatives
'da':u'Robot: Fjerner fra kategori %s', 'de':u'Bot: Entferne aus Kategorie %s', 'en':u'Robot: Removing from category %s', 'es':u'Bot: Eliminada de la categoría %s', 'is':u'Vélmenni: Fjarlægi [[Flokkur:%s]]', 'nl':u'Bot: Verwijderd uit Categorie %s', 'pt':u'Bot: Removendo [[Categoria:%s]]',
'da':u'Robot: Fjerner fra %s', 'de':u'Bot: Entferne aus %s', 'en':u'Robot: Removing from %s', 'es':u'Bot: Eliminada de la %s', 'is':u'Vélmenni: Fjarlægi [[%s]]', 'nl':u'Bot: Verwijderd uit %s', 'pt':u'Bot: Removendo [[%s]]',
def run(self): articles = self.oldCat.articles(recurse = 0)
cat.delete(reason)
self.cat.delete(reason)
def run(self): articles = self.cat.articles(recurse = 0) if len(articles) == 0: wikipedia.output(u'There are no articles in category %s' % self.cat.title()) else: for article in articles: catlib.change_category(article, self.cat, None) # Also removes the category tag from subcategories' pages subcategories = self.cat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category %s' % self.cat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.cat.title(), None) if self.cat.exists() and self.cat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), self.deletion_reason_remove) cat.delete(reason)
x2=wikipedia.PageLink(wikipedia.getSite(code=newcode, fam=site.family), newname.lower())
x2=wikipedia.PageLink(wikipedia.getSite(code=newcode, fam=site.family), newname[0].lower() + newname[1:])
def sametranslate(pl, arr, same): site = pl.site() for newcode in site.family.seriouslangs: # Put as suggestion into array newname = pl.linkname() if newcode in ['eo','cs'] and same == 'name': newname = newname.split(' ') newname[-1] = newname[-1].upper() newname = ' '.join(newname) x=wikipedia.PageLink(wikipedia.getSite(code=newcode, fam=site.family), newname) x2=wikipedia.PageLink(wikipedia.getSite(code=newcode, fam=site.family), newname.lower()) if x not in arr: if same == "wiktionary": if site.language() in site.family.nocapitalize: if newcode in site.family.nocapitalize: arr[x] = None elif pl.linkname()[0].upper() == pl.linkname()[0]: arr[x] = None else: arr[x] = None arr[x2] = None else: arr[x] = None
arr[x2] = None
if newcode in site.family.nocapitalize: arr[x2] = None
def sametranslate(pl, arr, same): site = pl.site() for newcode in site.family.seriouslangs: # Put as suggestion into array newname = pl.linkname() if newcode in ['eo','cs'] and same == 'name': newname = newname.split(' ') newname[-1] = newname[-1].upper() newname = ' '.join(newname) x=wikipedia.PageLink(wikipedia.getSite(code=newcode, fam=site.family), newname) x2=wikipedia.PageLink(wikipedia.getSite(code=newcode, fam=site.family), newname.lower()) if x not in arr: if same == "wiktionary": if site.language() in site.family.nocapitalize: if newcode in site.family.nocapitalize: arr[x] = None elif pl.linkname()[0].upper() == pl.linkname()[0]: arr[x] = None else: arr[x] = None arr[x2] = None else: arr[x] = None
if hasattr(self, '_getexception'):
if hasattr(self, '_redirarg') and not get_redirect: raise IsRedirectPage, self._redirarg elif hasattr(self, '_getexception'):
def get(self, read_only = False, force = False, get_redirect=False, throttle = True): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
elif hasattr(self, '_redirarg') and not get_redirect: raise IsRedirectPage, self._redirarg
def get(self, read_only = False, force = False, get_redirect=False, throttle = True): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
if text.startswith('\r\n'):
if text and text.startswith('\r\n'):
def removeLanguageLinks(text): """Given the wiki-text of a page, return that page with all interwiki links removed. If a link to an unknown language is encountered, a warning is printed.""" for code in langs: text=re.sub(r'\[\['+code+':([^\]]*)\]\]', '', text) m=re.search(r'\[\[([a-z][a-z]):([^\]]*)\]\]', text) if m: print "WARNING: Link to unknown language %s name %s"%(m.group(1), m.group(2)) # Remove white space at the beginning while 1: if text.startswith('\r\n'): text=text[2:] elif text.startswith(' '): # This assumes that the first line NEVER starts with a space! text=text[1:] else: break # Remove white space at the end while 1: if text[-1:] in '\r\n \t': text=text[:-1] else: break # Add final newline back in text += '\n' return text
elif text.startswith(' '):
elif text and text.startswith(' '):
def removeLanguageLinks(text): """Given the wiki-text of a page, return that page with all interwiki links removed. If a link to an unknown language is encountered, a warning is printed.""" for code in langs: text=re.sub(r'\[\['+code+':([^\]]*)\]\]', '', text) m=re.search(r'\[\[([a-z][a-z]):([^\]]*)\]\]', text) if m: print "WARNING: Link to unknown language %s name %s"%(m.group(1), m.group(2)) # Remove white space at the beginning while 1: if text.startswith('\r\n'): text=text[2:] elif text.startswith(' '): # This assumes that the first line NEVER starts with a space! text=text[1:] else: break # Remove white space at the end while 1: if text[-1:] in '\r\n \t': text=text[:-1] else: break # Add final newline back in text += '\n' return text
if text[-1:] in '\r\n \t':
if text and text[-1:] in '\r\n \t':
def removeLanguageLinks(text): """Given the wiki-text of a page, return that page with all interwiki links removed. If a link to an unknown language is encountered, a warning is printed.""" for code in langs: text=re.sub(r'\[\['+code+':([^\]]*)\]\]', '', text) m=re.search(r'\[\[([a-z][a-z]):([^\]]*)\]\]', text) if m: print "WARNING: Link to unknown language %s name %s"%(m.group(1), m.group(2)) # Remove white space at the beginning while 1: if text.startswith('\r\n'): text=text[2:] elif text.startswith(' '): # This assumes that the first line NEVER starts with a space! text=text[1:] else: break # Remove white space at the end while 1: if text[-1:] in '\r\n \t': text=text[:-1] else: break # Add final newline back in text += '\n' return text
templateRegex = re.compile(r'\{\{([mM][sS][gG]:)?' + templateName + '(?P<parameters>\|[^}]+|)}}')
templateRegex = re.compile(r'\{\{ *([mM][sS][gG]:)?' + templateName + ' *(?P<parameters>\|[^}]+|) *}}')
def __iter__(self): """ Yield page objects until the entire XML dump has been read. """ import xmlreader mysite = wikipedia.getSite() dump = xmlreader.XmlDump(self.xmlfilename) # regular expression to find the original template. # {{vfd}} does the same thing as {{Vfd}}, so both will be found. # The old syntax, {{msg:vfd}}, will also be found. # TODO: check site.nocapitalize() templateName = self.template.titleWithoutNamespace() if wikipedia.getSite().nocapitalize: old = self.old else: templateName = '[' + templateName[0].upper() + templateName[0].lower() + ']' + templateName[1:] templateName = re.sub(' ', '[_ ]', templateName) templateRegex = re.compile(r'\{\{([mM][sS][gG]:)?' + templateName + '(?P<parameters>\|[^}]+|)}}') for entry in dump.parse(): if templateRegex.search(entry.text): page = wikipedia.Page(mysite, entry.title) yield page
allTemplates = (', ').join(old)
if isinstance(self.old, list): allTemplates = (', ').join(old) else: allTemplates = old
def __init__(self, generator, old, new = None, remove = False, editSummary = '', acceptAll = False, extras = False): """ Arguments: * generator - A page generator. * old - The title of the old template (without namespace) * new - The title of the new template (without namespace), or None if you want to substitute the template with its text. * remove - True if the template should be removed. """ self.generator = generator self.old = old self.new = new self.remove = remove
templateRegex = re.compile(r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}')
templateRegex = re.compile(r'\{\{ *(?:[Tt]emplate:|[mM][sS][gG]:)?' + old + ' *(?P<parameters>\|[^}]+|) *}}')
def run(self): """ Starts the robot's action. """ # regular expression to find the original template. # {{vfd}} does the same thing as {{Vfd}}, so both will be found. # The old syntax, {{msg:vfd}}, will also be found. # The group 'parameters' will either match the parameters, or an # empty string if there are none.
def __init__(self, oldCatTitle, newCatTitle, batchMode = False, editSummary = '', inPlace = False):
def __init__(self, oldCatTitle, newCatTitle, batchMode = False, editSummary = '', inPlace = False, moveCatPage = True):
def __init__(self, oldCatTitle, newCatTitle, batchMode = False, editSummary = '', inPlace = False): self.editSummary = editSummary self.inPlace = inPlace self.oldCat = catlib.Category(wikipedia.getSite(), 'Category:' + oldCatTitle) self.newCatTitle = newCatTitle # set edit summary message
catlib.change_category(article, self.oldCat, newCat, inPlace=inPlace)
catlib.change_category(article, self.oldCat, newCat, inPlace=self.inPlace)
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat, inPlace=inPlace) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat, inPlace=inPlace) if self.oldCat.exists(): # try to copy page contents to new cat page if self.oldCat.copyAndKeep(newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle if batchMode == True: self.oldCat.delete(reason, False) else: self.oldCat.delete(reason, True) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
if self.oldCat.exists():
if self.oldCat.exists() and self.moveCatPage:
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat, inPlace=inPlace) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat, inPlace=inPlace) if self.oldCat.exists(): # try to copy page contents to new cat page if self.oldCat.copyAndKeep(newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle if batchMode == True: self.oldCat.delete(reason, False) else: self.oldCat.delete(reason, True) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
if self.oldCat.copyAndKeep(newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)):
if self.oldCat.copyAndKeep(self.newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)):
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat, inPlace=inPlace) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat, inPlace=inPlace) if self.oldCat.exists(): # try to copy page contents to new cat page if self.oldCat.copyAndKeep(newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle if batchMode == True: self.oldCat.delete(reason, False) else: self.oldCat.delete(reason, True) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle
reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % self.newCatTitle
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat, inPlace=inPlace) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat, inPlace=inPlace) if self.oldCat.exists(): # try to copy page contents to new cat page if self.oldCat.copyAndKeep(newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % newCatTitle if batchMode == True: self.oldCat.delete(reason, False) else: self.oldCat.delete(reason, True) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
m = re.search("== *%s *==" % section, text)
m = re.search("== *%s *==" % re.escape(section), text)
def oneDone(self, entry): title = entry.title username = entry.username ipedit = entry.ipedit timestamp = entry.timestamp text = entry.text editRestriction = entry.editRestriction moveRestriction = entry.moveRestriction pl = Page(self.site, title) for pl2 in self.pages: if pl2.sectionFreeTitle() == pl.sectionFreeTitle(): if not hasattr(pl2,'_contents') and not hasattr(pl2,'_getexception'): break else: output(u"BUG>> title %s (%s) not found in list" % (title, pl.aslink(forceInterwiki=True))) output(u'Expected one of: %s' % u','.join([pl2.aslink(forceInterwiki=True) for pl2 in self.pages])) raise PageNotFound
'et': u'Kasuteju',
'et': u'Kasutaja',
def __init__(self):
'es': u'Imagen_Discusión',
'es': u'Imagen Discusión',
def __init__(self):
'pt': u'Predefinição_Discussão',
'pt': u'Predefinição Discussão',
def __init__(self):
'pt': u'Ajuda_Discussão',
'pt': u'Ajuda Discussão',
def __init__(self):
imagelist = []
def main(): # if -file is not used, this temporary array is used to read the page title. pageTitle = [] page = None gen = None imagelist = [] interwiki = False for arg in sys.argv[1:]: #for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'imagetransfer') if arg: if arg == '-interwiki': interwiki = True elif arg.startswith('-file'): if len(arg) == 5: filename = wikipedia.input(u'Please enter the list\'s filename: ') else: filename = arg[6:] gen = pagegenerators.TextfileGenerator(filename) else: pageTitle.append(arg) if not gen: # if the page title is given as a command line argument, # connect the title's parts with spaces if pageTitle != []: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) # if no page title was given as an argument, and none was # read from a file, query the user if not page: pageTitle = wikipedia.input(u'Which page to check: ') page = wikipedia.Page(wikipedia.getSite(), pageTitle) # generator which will yield only a single Page gen = iter([page]) bot = ImageTransferBot(gen, interwiki = interwiki) bot.run()
bot = ImageTransferBot(gen, interwiki = interwiki)
if not targetLang and not targetFamily: targetSite = wikipedia.getSite('commons', 'commons') else: if not targetLang: targetLang = wikipedia.getSite().language if not targetFamily: targetFamily = wikipedia.getSite().family targetSite = wikipedia.Site(targetLang, targetFamily) bot = ImageTransferBot(gen, interwiki = interwiki, targetSite = targetSite)
def main(): # if -file is not used, this temporary array is used to read the page title. pageTitle = [] page = None gen = None imagelist = [] interwiki = False for arg in sys.argv[1:]: #for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'imagetransfer') if arg: if arg == '-interwiki': interwiki = True elif arg.startswith('-file'): if len(arg) == 5: filename = wikipedia.input(u'Please enter the list\'s filename: ') else: filename = arg[6:] gen = pagegenerators.TextfileGenerator(filename) else: pageTitle.append(arg) if not gen: # if the page title is given as a command line argument, # connect the title's parts with spaces if pageTitle != []: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) # if no page title was given as an argument, and none was # read from a file, query the user if not page: pageTitle = wikipedia.input(u'Which page to check: ') page = wikipedia.Page(wikipedia.getSite(), pageTitle) # generator which will yield only a single Page gen = iter([page]) bot = ImageTransferBot(gen, interwiki = interwiki) bot.run()
description = wikipedia.unicode2html(description, wikipedia.code2encoding(wikipedia.mylang))
try: description = description.encode(wikipedia.code2encoding(wikipedia.mylang)) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.code2encoding(wikipedia.mylang)) except UnicodeDecodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.code2encoding(wikipedia.mylang))
def get_image(original_url, source_wiki, original_description, keep=False, debug=False): # work with a copy of argument variables so we can reuse the # original ones if the upload fails fn = original_url description = original_description # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] # convert ISO 8859-1 to Unicode, or parse UTF-8 if source_wiki != None: fn = unicode(fn, wikipedia.code2encoding(source_wiki)) if not keep: print "The filename on wikipedia will default to:",fn newfn = raw_input("Better name : ") if newfn: fn = unicode(newfn, config.console_encoding) try: fn = fn.encode(wikipedia.code2encoding(wikipedia.mylang)) except UnicodeDecodeError: print "This filename can't be displayed in " + wikipedia.code2encoding(wikipedia.mylang) sys.exit(1) # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # A proper description for the submission. if description=='': description = wikipedia.input('Give a description for the image:') else: print ("The suggested description is:") print print wikipedia.output(description) print print ("Enter return to use this description, enter a text to add something") print ("at the end, or enter = followed by a text to replace the description.") newtext = wikipedia.input('Enter return, text or =text : ') if newtext=='': pass elif newtext[0]=='=': description=newtext[1:] else: description=description+' '+newtext # try to encode the description to the encoding used by the home Wikipedia. # if that's not possible (e.g. because there are non-Latin-1 characters and # the home Wikipedia uses Latin-1), convert all non-ASCII characters to # HTML entities. description = wikipedia.unicode2html(description, wikipedia.code2encoding(wikipedia.mylang)) # don't upload if we're in debug mode if not debug: returned_html = post_multipart(wikipedia.family.hostname(wikipedia.mylang), wikipedia.family.upload_address(wikipedia.mylang), (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpIgnoreWarning', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) # do we know how the "success!" HTML page should look like? success_msg = mediawiki_messages.get('successfulupload') success_msgR = re.compile(re.escape(success_msg)) if success_msgR.search(returned_html): print "Upload successful." else: # dump the HTML page print returned_html + "\n\n" answer = raw_input("Upload of " + fn + " failed. Above you see the HTML page which was returned by MediaWiki. Try again? [y|N]") if answer in ["y", "Y"]: return get_image(original_url, source_wiki, original_description, debug) else: return return fn
fixedSites += ' ' + page_title
fixedSites += ' ' + article
def treat(page_title, site = None): ''' Loads a page, converts all HTML tables in its text to wiki syntax, and saves the converted text. Returns True if the converted table was successfully saved, otherwise returns False. ''' if site is None: site = wikipedia.getSite() pl = wikipedia.PageLink(site, page_title) try: text = pl.get() except wikipedia.NoPage: print "ERROR: couldn't find " + page_title return False except wikipedia.LockedPage: wikipedia.output(u'Skipping locked page %s' % page_title) return False except wikipedia.IsRedirectPage: wikipedia.output(u'Skipping redirect %s' % page_title) return False converted_text = convert(text) # If the user pressed 'n' if not converted_text: return False else: pl.put(converted_text) return True
notFixedSites += ' ' + page_title
notFixedSites += ' ' + article
def treat(page_title, site = None): ''' Loads a page, converts all HTML tables in its text to wiki syntax, and saves the converted text. Returns True if the converted table was successfully saved, otherwise returns False. ''' if site is None: site = wikipedia.getSite() pl = wikipedia.PageLink(site, page_title) try: text = pl.get() except wikipedia.NoPage: print "ERROR: couldn't find " + page_title return False except wikipedia.LockedPage: wikipedia.output(u'Skipping locked page %s' % page_title) return False except wikipedia.IsRedirectPage: wikipedia.output(u'Skipping redirect %s' % page_title) return False converted_text = convert(text) # If the user pressed 'n' if not converted_text: return False else: pl.put(converted_text) return True
def __init__(self, generator, old, new = None, remove = False, customSummary = False, editSummary = '', acceptAll = False):
def __init__(self, generator, old, new = None, remove = False, editSummary = '', acceptAll = False):
def __init__(self, generator, old, new = None, remove = False, customSummary = False, editSummary = '', acceptAll = False): """ Arguments: * generator - A page generator. * old - The title of the old template (without namespace) * new - The title of the new template (without namespace), or None if you want to substitute the template with its text. * remove - True if the template should be removed. """ self.generator = generator self.old = old self.new = new self.remove = remove
self.customSummary = customSummary
def __init__(self, generator, old, new = None, remove = False, customSummary = False, editSummary = '', acceptAll = False): """ Arguments: * generator - A page generator. * old - The title of the old template (without namespace) * new - The title of the new template (without namespace), or None if you want to substitute the template with its text. * remove - True if the template should be removed. """ self.generator = generator self.old = old self.new = new self.remove = remove
if self.customSummary:
if self.editSummary:
def __init__(self, generator, old, new = None, remove = False, customSummary = False, editSummary = '', acceptAll = False): """ Arguments: * generator - A page generator. * old - The title of the old template (without namespace) * new - The title of the new template (without namespace), or None if you want to substitute the template with its text. * remove - True if the template should be removed. """ self.generator = generator self.old = old self.new = new self.remove = remove
customSummary = False
def main(): template_names = [] resolve = False remove = False namespaces = [] customSummary = False editSummary = '' acceptAll = False # If xmlfilename is None, references will be loaded from the live wiki. xmlfilename = None new = None # read command line parameters for arg in wikipedia.handleArgs(): if arg == '-remove': remove = True elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] elif arg.startswith('-namespace:'): namespaces.append(int(arg[len('-namespace:'):])) elif arg.startswith('-summary:'): customSummary = True editSummary = arg[len('-summary:'):] elif arg.startswith('-always'): acceptAll = True else: template_names.append(arg) if len(template_names) == 0 or len(template_names) > 2: wikipedia.showHelp() sys.exit() old = template_names[0] if len(template_names) == 2: new = template_names[1] mysite = wikipedia.getSite() ns = mysite.template_namespace() oldTemplate = wikipedia.Page(mysite, ns + ':' + old) if xmlfilename: gen = XmlDumpTemplatePageGenerator(oldTemplate, xmlfilename) else: gen = pagegenerators.ReferringPageGenerator(oldTemplate, onlyTemplateInclusion = True) if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = TemplateRobot(preloadingGen, old, new, remove, customSummary, editSummary, acceptAll) bot.run()
customSummary = True
def main(): template_names = [] resolve = False remove = False namespaces = [] customSummary = False editSummary = '' acceptAll = False # If xmlfilename is None, references will be loaded from the live wiki. xmlfilename = None new = None # read command line parameters for arg in wikipedia.handleArgs(): if arg == '-remove': remove = True elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] elif arg.startswith('-namespace:'): namespaces.append(int(arg[len('-namespace:'):])) elif arg.startswith('-summary:'): customSummary = True editSummary = arg[len('-summary:'):] elif arg.startswith('-always'): acceptAll = True else: template_names.append(arg) if len(template_names) == 0 or len(template_names) > 2: wikipedia.showHelp() sys.exit() old = template_names[0] if len(template_names) == 2: new = template_names[1] mysite = wikipedia.getSite() ns = mysite.template_namespace() oldTemplate = wikipedia.Page(mysite, ns + ':' + old) if xmlfilename: gen = XmlDumpTemplatePageGenerator(oldTemplate, xmlfilename) else: gen = pagegenerators.ReferringPageGenerator(oldTemplate, onlyTemplateInclusion = True) if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = TemplateRobot(preloadingGen, old, new, remove, customSummary, editSummary, acceptAll) bot.run()
if namespaces != []:
if namespaces:
def main(): template_names = [] resolve = False remove = False namespaces = [] customSummary = False editSummary = '' acceptAll = False # If xmlfilename is None, references will be loaded from the live wiki. xmlfilename = None new = None # read command line parameters for arg in wikipedia.handleArgs(): if arg == '-remove': remove = True elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] elif arg.startswith('-namespace:'): namespaces.append(int(arg[len('-namespace:'):])) elif arg.startswith('-summary:'): customSummary = True editSummary = arg[len('-summary:'):] elif arg.startswith('-always'): acceptAll = True else: template_names.append(arg) if len(template_names) == 0 or len(template_names) > 2: wikipedia.showHelp() sys.exit() old = template_names[0] if len(template_names) == 2: new = template_names[1] mysite = wikipedia.getSite() ns = mysite.template_namespace() oldTemplate = wikipedia.Page(mysite, ns + ':' + old) if xmlfilename: gen = XmlDumpTemplatePageGenerator(oldTemplate, xmlfilename) else: gen = pagegenerators.ReferringPageGenerator(oldTemplate, onlyTemplateInclusion = True) if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = TemplateRobot(preloadingGen, old, new, remove, customSummary, editSummary, acceptAll) bot.run()
bot = TemplateRobot(preloadingGen, old, new, remove, customSummary, editSummary, acceptAll)
bot = TemplateRobot(preloadingGen, old, new, remove, editSummary, acceptAll)
def main(): template_names = [] resolve = False remove = False namespaces = [] customSummary = False editSummary = '' acceptAll = False # If xmlfilename is None, references will be loaded from the live wiki. xmlfilename = None new = None # read command line parameters for arg in wikipedia.handleArgs(): if arg == '-remove': remove = True elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] elif arg.startswith('-namespace:'): namespaces.append(int(arg[len('-namespace:'):])) elif arg.startswith('-summary:'): customSummary = True editSummary = arg[len('-summary:'):] elif arg.startswith('-always'): acceptAll = True else: template_names.append(arg) if len(template_names) == 0 or len(template_names) > 2: wikipedia.showHelp() sys.exit() old = template_names[0] if len(template_names) == 2: new = template_names[1] mysite = wikipedia.getSite() ns = mysite.template_namespace() oldTemplate = wikipedia.Page(mysite, ns + ':' + old) if xmlfilename: gen = XmlDumpTemplatePageGenerator(oldTemplate, xmlfilename) else: gen = pagegenerators.ReferringPageGenerator(oldTemplate, onlyTemplateInclusion = True) if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = TemplateRobot(preloadingGen, old, new, remove, customSummary, editSummary, acceptAll) bot.run()
self._add(l,i)
l = self._add(l,i)
def _addall(self,l1,l2): l=l1 for i in l2: self._add(l,i) return l
family.Family.__init__(self)
def __init__(self):
family.Family.__init__(self)
def __init__(self):
displayedText = text[max(0, m.start() - context):m.end()+context] displayedText = displayedText[:context] + '\x1b[91;1m' + displayedText[context:]
def treat(refpl, thispl): """ Parameters: thispl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to thispl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """ try: include = False text=refpl.get(throttle=False) include = True except wikipedia.IsRedirectPage: wikipedia.output(u'%s is a redirect to %s' % (refpl.linkname(), thispl.linkname())) if solve_redirect: choice = wikipedia.input(u'Do you want to make redirect %s point to %s? [y|N]' % (refpl.linkname(), target)) if choice2 == 'y': redir_text = '#REDIRECT [[%s]]' % target refpl.put(redir_text) else: choice = wikipedia.input(u'Do you want to work on pages linking to %s? [y|N|c(hange redirect)]' % refpl.linkname()) if choice == 'y': for ref_redir in getReferences(refpl): refpl_redir=wikipedia.PageLink(mysite, ref_redir) treat(refpl_redir, refpl) elif choice == 'c': text="#%s [[%s]]"%(mysite.redirect(default=True), thispl.linkname()) include = "redirect" if include in [True,"redirect"]: # make a backup of the original text so we can show the changes later original_text=text n = 0 curpos = 0 edited = False # This loop will run until we have finished the current page while True: m=linkR.search(text, pos = curpos) if not m: if n == 0: wikipedia.output(u"No changes necessary in %s" % refpl.linkname()) return True else: # stop loop and save page break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): continue else: linkpl=wikipedia.PageLink(thispl.site(), m.group(1)) # Check whether the link found is to thispl. if linkpl != thispl: continue
displayedText = displayedText[:-context] + '\x1b[0m' + displayedText[-context:]
displayedText = text[max(0, m.start() - context):m.start()] + '\x1b[91;1m' + text[m.start():m.end()] + '\x1b[0m' + text[m.end():m.end()+context]
def treat(refpl, thispl): """ Parameters: thispl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to thispl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """ try: include = False text=refpl.get(throttle=False) include = True except wikipedia.IsRedirectPage: wikipedia.output(u'%s is a redirect to %s' % (refpl.linkname(), thispl.linkname())) if solve_redirect: choice = wikipedia.input(u'Do you want to make redirect %s point to %s? [y|N]' % (refpl.linkname(), target)) if choice2 == 'y': redir_text = '#REDIRECT [[%s]]' % target refpl.put(redir_text) else: choice = wikipedia.input(u'Do you want to work on pages linking to %s? [y|N|c(hange redirect)]' % refpl.linkname()) if choice == 'y': for ref_redir in getReferences(refpl): refpl_redir=wikipedia.PageLink(mysite, ref_redir) treat(refpl_redir, refpl) elif choice == 'c': text="#%s [[%s]]"%(mysite.redirect(default=True), thispl.linkname()) include = "redirect" if include in [True,"redirect"]: # make a backup of the original text so we can show the changes later original_text=text n = 0 curpos = 0 edited = False # This loop will run until we have finished the current page while True: m=linkR.search(text, pos = curpos) if not m: if n == 0: wikipedia.output(u"No changes necessary in %s" % refpl.linkname()) return True else: # stop loop and save page break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): continue else: linkpl=wikipedia.PageLink(thispl.site(), m.group(1)) # Check whether the link found is to thispl. if linkpl != thispl: continue
whereClause = ' OR '.join(["old_text RLIKE '%s'" % old.pattern for (old, new) in replacements]) query = u"""SELECT page_namespace, page_title FROM page JOIN text ON (page_id = old_id) WHERE %s LIMIT 20""" % whereClause
whereClause = 'WHERE (%s)' % ' OR '.join(["old_text RLIKE '%s'" % prepareRegexForMySQL(old.pattern) for (old, new) in replacements]) if exceptions: exceptClause = 'AND NOT (%s)' % ' OR '.join(["old_text RLIKE '%s'" % prepareRegexForMySQL(exc.pattern) for exc in exceptions]) else: exceptClause = '' query = u""" SELECT page_namespace, page_title FROM page JOIN text ON (page_id = old_id) %s %s LIMIT 200""" % (whereClause, exceptClause) query = query.encode(wikipedia.getSite().encoding())
def main(): gen = None # How we want to retrieve information on which pages need to be changed. # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # Array which will collect commandline parameters. # First element is original text, second element is replacement text. commandline_replacements = [] # A list of 2-tuples of original text and replacement text. replacements = [] # Don't edit pages which contain certain texts. exceptions = [] # Should the elements of 'replacements' and 'exceptions' be interpreted # as regular expressions? regex = False # Predefined fixes from dictionary 'fixes' (see above). fix = None # the dump's path, either absolute or relative, which will be used when source # is 'xmldump'. xmlFilename = None useSql = False # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used PageTitles = [] # a page whose referrers will be processed when the -ref parameter is used referredPageTitle = None # a page whose links will be processed when the -links parameter is used linkingPageTitle = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None # Google query googleQuery = None # Load default summary message. wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) # Read commandline parameters. for arg in wikipedia.handleArgs(): if arg == '-regex': regex = True elif arg.startswith('-file'): if len(arg) >= 6: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] elif arg =='-sql': useSql = True elif arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'Which page do you want to chage?')) else: PageTitles.append(arg[6:]) source = 'specificPages' elif arg.startswith('-ref'): if len(arg) == 4: referredPageTitle = wikipedia.input(u'Links to which page should be processed?') else: referredPageTitle = arg[5:] referredPage = wikipedia.Page(wikipedia.getSite(), referredPageTitle) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links'): if len(arg) == 6: linkingPageTitle = wikipedia.input(u'Links from which page should be processed?') else: linkingPageTitle = arg[7:] linkingPage = wikipedia.Page(wikipedia.getSite(), linkingPageTitle) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to chage?') else: firstPageTitle = arg[7:] namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif arg.startswith('-google'): if len(arg) >= 8: googleQuery = arg[8:] gen = pagegenerators.GoogleSearchPageGenerator(googleQuery) elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-fix:'): fix = arg[5:] elif arg == '-always': acceptall = True elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) else: commandline_replacements.append(arg) if (len(commandline_replacements) == 2 and fix == None): replacements.append((commandline_replacements[0], commandline_replacements[1])) wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg ) % ' (-' + commandline_replacements[0] + ' +' + commandline_replacements[1] + ')') elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') change = '(-' + old + ' +' + new replacements.append((old, new)) while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': change = change + ')' break new = wikipedia.input(u'Please enter the new text:') change = change + ' & -' + old + ' +' + new replacements.append((old, new)) default_summary_message = wikipedia.translate(wikipedia.getSite(), msg) % change wikipedia.output(u'The summary message will default to: %s' % default_summary_message) summary_message = wikipedia.input(u'Press Enter to use this default message, or enter a description of the changes your bot will make:') if summary_message == '': summary_message = default_summary_message wikipedia.setAction(summary_message) else: # Perform one of the predefined actions. try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) wikipedia.stopme() sys.exit() if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), fix['msg'])) if fix.has_key('exceptions'): exceptions = fix['exceptions'] replacements = fix['replacements'] # already compile all regular expressions here to save time later for i in range(len(replacements)): old, new = replacements[i] if not regex: old = re.escape(old) oldR = re.compile(old, re.UNICODE) replacements[i] = oldR, new for i in range(len(exceptions)): exception = exceptions[i] if not regex: exception = re.escape(exception) exceptionR = re.compile(exception, re.UNICODE) exceptions[i] = exceptionR if xmlFilename: gen = XmlDumpReplacePageGenerator(xmlfilename, replacements, exceptions) elif useSql: whereClause = ' OR '.join(["old_text RLIKE '%s'" % old.pattern for (old, new) in replacements]) query = u"""SELECT page_namespace, page_title FROM page JOIN text ON (page_id = old_id) WHERE %s LIMIT 20""" % whereClause print query gen = pagegenerators.MySQLPageGenerator(query) elif PageTitles: pages = [wikipedia.Page(wikipedia.getSite(), PageTitle) for PageTitle in PageTitles] gen = iter(pages) if not gen: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = ReplaceRobot(preloadingGen, replacements, exceptions, acceptall) bot.run()