rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
newcat = wikipedia.input('Category to add (do not give namespace) : ', encode = True) newcat = newcat
newcat = wikipedia.input('Category to add (do not give namespace) : ')
def add_category(): print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input('Wikipedia page with list of pages to change: ') if listpage: pl = wikipedia.PageLink(wikipedia.mylang, listpage) pagenames = pl.links() else: refpage = wikipedia.input('Wikipedia page that is now linked to: ') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input('Category to add (do not give namespace) : ', encode = True) newcat = newcat newcat = newcat.encode(wikipedia.code2encoding(wikipedia.mylang)) newcat = newcat[:1].capitalize() + newcat[1:] print newcat ns = wikipedia.family.category_namespaces(wikipedia.mylang) catpl = wikipedia.PageLink(wikipedia.mylang, ns[0].encode(wikipedia.code2encoding(wikipedia.mylang))+':'+newcat) print "Will add %s"%catpl.aslocallink() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input("%s [y/n/a(ll)] : "%(pl2.asasciilink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input("This should be used if and only if you are sure that your links are correct !!! Are you sure ? [y/n] : ") if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: print "%s doesn't exit yet. Ignoring."%(pl2.aslocallink()) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) print "WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink()) else: print "Current categories: ",cats if catpl in cats: print "%s already has %s"%(pl.aslocallink(),catpl.aslocallink()) else: cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text, comment = catpl.aslocallink().encode(wikipedia.code2encoding(wikipedia.mylang)))
if othersite and othersite.lang == self.lang:
if othersite and othersite.lang != self.lang:
def linkto(self, linkname, othersite = None): if othersite and othersite.lang == self.lang: return '[[%s:%s]]' % (self.lang, linkname) else: return '[[%s]]' % linkname
self.problem('Someone refers to %s with us' % pl.asasciilink())
self.problem(err)
def assemble(self, returnonquestion = False, askall = False): new = {} for pl in self.done.keys(): code = pl.code() if code == wikipedia.mylang and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty(): if pl != self.inpl: if returnonquestion: return None self.problem('Someone refers to %s with us' % pl.asasciilink()) if globalvar.autonomous: return None elif pl.exists() and not pl.isRedirectPage(): if new.has_key(code) and new[code] is None: print "NOTE: Ignoring %s"%(pl.asasciilink()) elif new.has_key(code) and new[code] != pl: if returnonquestion: return None self.problem("'%s' as well as '%s'" % (new[code].asasciilink(), pl.asasciilink())) if globalvar.autonomous: return None # beep before asking question if globalvar.bell: sys.stdout.write('\07') while 1: answer = raw_input("Use (f)ormer or (l)atter or (n)either or (g)ive up?") if answer.startswith('f'): break elif answer.startswith('l'): new[pl.code()] = pl break elif answer.startswith('n'): new[pl.code()] = None break elif answer.startswith('g'): # Give up return None elif code in ('zh-tw','zh-cn') and new.has_key('zh') and new['zh'] is not None: print "NOTE: Ignoring %s, using %s"%(new['zh'].asasciilink(),pl.asasciilink()) if self.ask(askall, pl): new['zh'] = None # Remove the global zh link new[code] = pl # Add the more precise one elif code == 'zh' and ( (new.has_key('zh-tw') and new['zh-tw'] is not None) or (new.has_key('zh-cn') and new['zh-cn'] is not None)): print "NOTE: Ignoring %s"%(pl.asasciilink()) pass # do not add global zh if there is a specific zh-tw or zh-cn elif code not in new: if self.ask(askall, pl): new[code] = pl
self.problem("'%s' as well as '%s'" % (new[code].asasciilink(), pl.asasciilink()))
self.problem(err)
def assemble(self, returnonquestion = False, askall = False): new = {} for pl in self.done.keys(): code = pl.code() if code == wikipedia.mylang and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty(): if pl != self.inpl: if returnonquestion: return None self.problem('Someone refers to %s with us' % pl.asasciilink()) if globalvar.autonomous: return None elif pl.exists() and not pl.isRedirectPage(): if new.has_key(code) and new[code] is None: print "NOTE: Ignoring %s"%(pl.asasciilink()) elif new.has_key(code) and new[code] != pl: if returnonquestion: return None self.problem("'%s' as well as '%s'" % (new[code].asasciilink(), pl.asasciilink())) if globalvar.autonomous: return None # beep before asking question if globalvar.bell: sys.stdout.write('\07') while 1: answer = raw_input("Use (f)ormer or (l)atter or (n)either or (g)ive up?") if answer.startswith('f'): break elif answer.startswith('l'): new[pl.code()] = pl break elif answer.startswith('n'): new[pl.code()] = None break elif answer.startswith('g'): # Give up return None elif code in ('zh-tw','zh-cn') and new.has_key('zh') and new['zh'] is not None: print "NOTE: Ignoring %s, using %s"%(new['zh'].asasciilink(),pl.asasciilink()) if self.ask(askall, pl): new['zh'] = None # Remove the global zh link new[code] = pl # Add the more precise one elif code == 'zh' and ( (new.has_key('zh-tw') and new['zh-tw'] is not None) or (new.has_key('zh-cn') and new['zh-cn'] is not None)): print "NOTE: Ignoring %s"%(pl.asasciilink()) pass # do not add global zh if there is a specific zh-tw or zh-cn elif code not in new: if self.ask(askall, pl): new[code] = pl
gen = pagegenerators.TextfileGenerator(filename)
gen = pagegenerators.TextfilePageGenerator(filename)
def main(): # if -file is not used, this temporary array is used to read the page title. pageTitle = [] page = None gen = None interwiki = False targetLang = None targetFamily = None for arg in sys.argv[1:]: #for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'imagetransfer') if arg: if arg == '-interwiki': interwiki = True elif arg.startswith('-tolang:'): targetLang = arg[8:] elif arg.startswith('-tofamily:'): targetFamily = arg[10:] elif arg.startswith('-file'): if len(arg) == 5: filename = wikipedia.input(u'Please enter the list\'s filename: ') else: filename = arg[6:] gen = pagegenerators.TextfileGenerator(filename) else: pageTitle.append(arg) if not gen: # if the page title is given as a command line argument, # connect the title's parts with spaces if pageTitle != []: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) # if no page title was given as an argument, and none was # read from a file, query the user if not page: pageTitle = wikipedia.input(u'Which page to check:') page = wikipedia.Page(wikipedia.getSite(), pageTitle) # generator which will yield only a single Page gen = iter([page]) if not targetLang and not targetFamily: targetSite = wikipedia.getSite('commons', 'commons') else: if not targetLang: targetLang = wikipedia.getSite().language if not targetFamily: targetFamily = wikipedia.getSite().family targetSite = wikipedia.Site(targetLang, targetFamily) bot = ImageTransferBot(gen, interwiki = interwiki, targetSite = targetSite) bot.run()
file = uo.open(self.url)
file = uo.open(self.url,"rb")
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ # Get file contents if '://' in self.url: uo = wikipedia.MyURLopener() file = uo.open(self.url) else: # Opening local files with MyURLopener would be possible, but we # don't do it because it only accepts ASCII characters in the # filename. file = open(self.url) wikipedia.output(u'Reading file %s' % self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'gif', u'jpg', u'jpeg', u'mid', u'midi', u'ogg', u'png', u'svg', u'xcf') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: choice = wikipedia.inputChoice(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if choice == 'n': ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki encodedFilename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['y', 'N'], 'n') if choice == 'y': import editarticle editor = editarticle.TextEditor() newDescription = editor.edit(self.description) # if user saved / didn't press Cancel if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
file = open(self.url)
file = open(self.url,"rb")
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ # Get file contents if '://' in self.url: uo = wikipedia.MyURLopener() file = uo.open(self.url) else: # Opening local files with MyURLopener would be possible, but we # don't do it because it only accepts ASCII characters in the # filename. file = open(self.url) wikipedia.output(u'Reading file %s' % self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'gif', u'jpg', u'jpeg', u'mid', u'midi', u'ogg', u'png', u'svg', u'xcf') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: choice = wikipedia.inputChoice(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if choice == 'n': ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki encodedFilename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['y', 'N'], 'n') if choice == 'y': import editarticle editor = editarticle.TextEditor() newDescription = editor.edit(self.description) # if user saved / didn't press Cancel if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
refresh_messages()
refresh_messages(lang)
def get(key, lang = None): if lang == None: lang = wikipedia.mylang try: # find out how old our saved dump is (in seconds) file_age = time.time() - os.path.getmtime('mediawiki-messages/mediawiki-messages-%s.dat' % lang) # if it's older than 1 month, reload it if file_age > 30 * 24 * 60 * 60: print 'Current MediaWiki message dump is outdated, reloading' refresh_messages() except OSError: # no saved dumped exists yet refresh_messages() # TODO: It's quite inefficient to reload the file every time this function # is used. Maybe we can save its content the first time the function is # called. f = open('mediawiki-messages/mediawiki-messages-%s.dat' % lang, 'r') dictionary = pickle.load(f) f.close() key = key[0].lower() + key[1:] if dictionary.has_key(key): return dictionary[key] else: # TODO: Throw exception instead? print 'ERROR: MediaWiki Key %s not found' % key
wikipedia.output("%3d - %s" % (i, alternatives[i]))
wikipedia.output("%3d - %s" % (i, alternatives[i]), wikipedia.myencoding())
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
if ignore_title.has_key(self.mylang): ignore_title[self.mylang] += self.mysite.namespaces() else: ignore_title[self.mylang] = self.mysite.namespaces()
if not ignore_title.has_key(self.mylang): ignore_title[self.mylang] = [] ignore_title[self.mylang] += [u'%s:' % namespace for namespace in self.mysite.namespaces()]
def run(self): if self.main_only: if ignore_title.has_key(self.mylang): ignore_title[self.mylang] += self.mysite.namespaces() else: ignore_title[self.mylang] = self.mysite.namespaces() for disambTitle in self.page_list: # when run with -redir argument, there's another summary message if self.solve_redirect: wikipedia.setAction(wikipedia.translate(self.mysite,msg_redir) % disambTitle) else: wikipedia.setAction(wikipedia.translate(self.mysite,msg) % disambTitle) disambPl = wikipedia.Page(self.mysite, disambTitle) self.primaryIgnoreManager = PrimaryIgnoreManager(disambPl, enabled = self.primary) if self.solve_redirect: try: target = disambPl.getRedirectTarget() self.alternatives.append(target) except wikipedia.NoPage: wikipedia.output(u"The specified page was not found.") user_input = wikipedia.input(u"Please enter the name of the page where the redirect should have pointed at, or press enter to quit:") if user_input == "": sys.exit(1) else: self.alternatives.append(user_input) except wikipedia.IsNotRedirectPage: wikipedia.output(u"The specified page is not a redirect. Skipping.") continue elif self.getAlternatives: try: if self.primary: disamb_pl = wikipedia.Page(self.mysite, primary_topic_format[self.mylang] % disambTitle) thistxt = disamb_pl.get(throttle=False) else: thistxt = disambPl.get(throttle=False) except wikipedia.IsRedirectPage,arg: thistxt = wikipedia.Page(self.mysite, str(arg)).get(throttle=False) except wikipedia.NoPage: wikipedia.output(u"Page does not exist?!") thistxt = "" thistxt = wikipedia.removeLanguageLinks(thistxt) thistxt = wikipedia.removeCategoryLinks(thistxt, self.mysite) # regular expression matching a wikilink w = r'([^\]\|]*)' Rlink = re.compile(r'\[\['+w+r'(\|'+w+r')?\]\]') for matchObj in Rlink.findall(thistxt): self.alternatives.append(matchObj[0]) self.makeAlternativesUnique() # sort possible choices self.alternatives.sort() self.listAlternatives() gen = ReferringPageGenerator(disambPl, self.primary) preloadingGen = pagegenerators.PreloadingGenerator(gen) for refpl in preloadingGen: if not self.primaryIgnoreManager.isIgnored(refpl): # run until the user selected 'quit' if not self.treat(refpl, disambPl): break # clear alternatives before working on next disambiguation page self.alternatives = []
address = self.site().delete_address(space2underline(self.title()))
address = self.site().delete_address(self.urlname())
def delete(self, reason = None, prompt = True): """Deletes the page from the wiki. Requires administrator status. If reason is None, asks for a reason. If prompt is True, asks the user if he wants to delete the page. """ # TODO: Find out if bot is logged in with an admin account, raise exception # or show error message otherwise if reason == None: reason = input(u'Please enter a reason for the deletion:') reason = reason.encode(self.site().encoding()) answer = 'y' if prompt: answer = inputChoice(u'Do you want to delete %s?' % self.title(), ['Yes', 'No'], ['y', 'N'], 'N') if answer in ['y', 'Y']: token = self.site().getToken(self) # put_throttle() host = self.site().hostname() address = self.site().delete_address(space2underline(self.title()))
catlib.change_category(article, original_cat.titleWithoutNamespace(), current_cat.titleWithoutNamespace())
catlib.change_category(article, original_cat, current_cat.titleWithoutNamespace())
def move_to_category(self, article, original_cat, current_cat): ''' Given an article which is in category original_cat, ask the user if it should be moved to one of original_cat's subcategories. Recursively run through subcategories' subcategories. NOTE: current_cat is only used for internal recursion. You should always use current_cat = original_cat. ''' print wikipedia.output(u'Treating page %s, currently in category %s' % (article.title(), current_cat.title())) subcatlist = self.catDB.getSubcats(current_cat) supercatlist = self.catDB.getSupercats(current_cat) print if len(subcatlist) == 0: print 'This category has no subcategories.' print if len(supercatlist) == 0: print 'This category has no supercategories.' print # show subcategories as possible choices (with numbers) for i in range(len(supercatlist)): # layout: we don't expect a cat to have more than 10 supercats wikipedia.output(u'u%d - Move up to %s' % (i, supercatlist[i].title())) for i in range(len(subcatlist)): # layout: we don't expect a cat to have more than 100 subcats wikipedia.output(u'%2d - Move down to %s' % (i, subcatlist[i].title())) print ' j - Jump to another category' print ' n - Skip this article' print ' r - Remove this category tag' print ' ? - Read the page' wikipedia.output(u'Enter - Save category as %s' % current_cat.title())
Rredir = re.compile('\<li\>\<a href=".+?" title=(.*?)">')
Rredir = re.compile('\<li\>\<a href=".+?" title="(.*?)">')
def retrieve_double_redirects(self): if self.source == None: mysite = wikipedia.getSite() # retrieve information from the live wiki's maintenance page host = mysite.hostname() # double redirect maintenance page's URL url = mysite.double_redirects_address(default_limit = False) print 'Retrieving special page...' maintenance_txt, charset = wikipedia.getUrl(host, url, mysite) # regular expression which finds redirects which point to another redirect inside the HTML Rredir = re.compile('\<li\>\<a href=".+?" title=(.*?)">') redir_names = Rredir.findall(maintenance_txt) print 'Retrieved %d redirects from special page.\n' % len(redir_names) for redir_name in redir_names: yield redir_name else: dict = self.get_redirects_from_dump() num = 0 for (key, value) in dict.iteritems(): num += 1 # check if the value - that is, the redirect target - is a # redirect as well if num>self.restart and dict.has_key(value): print 'Checking redirect %s/%s' % (num, len(dict)) yield key
findTemplate=re.compile(ur'\{\{[Cc]ommons\}\}')
findTemplate=re.compile(ur'\{\{[Cc]ommons')
def run(self): for page in self.generator: try: wikipedia.output(u'\n>>>> %s <<<<' % page.title()) commons = wikipedia.Site('commons', 'commons') commonspage = wikipedia.Page(commons, page.title()) try: getcommons = commonspage.get(get_redirect=True) if page.title() == commonspage.title(): oldText = page.get() text = oldText template = wikipedia.translate(wikipedia.getSite(), commons_template) # find if {{commons}} already in article findTemplate=re.compile(ur'\{\{[Cc]ommons\}\}') s = findTemplate.search(text) if s: wikipedia.output(u'** Already done.') else: # TODO: input template before categories and interwikis text = (text+('%s'%template)) if oldText == text: wikipedia.output(u'** No changes necessary.') else: wikipedia.showDiff(oldText, text) if not self.acceptall: choice = wikipedia.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice in ['a', 'A']: self.acceptall = True if self.acceptall or choice in ['y', 'Y']: try: msg = wikipedia.translate(wikipedia.getSite(), comment) page.put(text, msg) except wikipedia.EditConflict: wikipedia.output(u'Skipping %s because of edit conflict' % (page.title())) except wikipedia.NoPage: wikipedia.output(u'Page does not exist in Commons!') except wikipedia.NoPage: wikipedia.output(u'Page %s does not exist?!' % page.title()) except wikipedia.IsRedirectPage: wikipedia.output(u'Page %s is a redirect; skipping.' % page.title()) except wikipedia.LockedPage: wikipedia.output(u'Page %s is locked?!' % page.title())
name=[]
inname=[]
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
name.append(arg) name='_'.join(name) if not name: name=raw_input('Which page to check:') name=wikipedia.link2url(name) m=treesearch(mylang,name)
inname.append(arg) inname='_'.join(inname) if not inname: inname=raw_input('Which page to check:') inname=wikipedia.link2url(inname) m=treesearch(mylang,inname)
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
old=None
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
if m[code,cname]: old=wikipedia.getLanguageLinks(m[code,cname]) oldtext=m[code,cname]
pass
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
print "NOTE: Replacing %s: %s"%(mylang,name)
print "NOTE: Replacing %s: %s"%(mylang,inname)
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
status,reason,data=wikipedia.putPage(mylang,name,newtext)
status,reason,data=wikipedia.putPage(mylang,inname,newtext)
def treesearch(code,name): arr={(code,name):None} # First make one step based on the language itself try: n=treestep(arr,code,name,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[code,name]: print "Mother doesn't exist" return # Then add translations if we survived. autotranslate(name,arr) modifications=1 while modifications: modifications=0 for newcode,newname in arr.keys(): if arr[newcode,newname] is None: modifications+=treestep(arr,newcode,newname) return arr
except wikipedia.NoPage:
def showImageList(self, imagelist): for i in range(len(imagelist)): image = imagelist[i] print "-"*60 wikipedia.output(u"%s. Found image: %s"% (i, image.aslink())) try: # Show the image description page's contents wikipedia.output(image.get(throttle=False)) except wikipedia.NoPage: try: # Maybe the image is on the target site already targetTitle = '%s:%s' % (self.targetSite.image_namespace(), image.title().split(':', 1)[1]) targetImage = wikipedia.Page(self.targetSite, targetTitle) if targetImage.get(throttle=False): wikipedia.output(u"Image is already on %s." % self.targetSite) wikipedia.output(targetImage.get(throttle=False)) else: print "Description empty." except wikipedia.NoPage: print "Description empty." except wikipedia.IsRedirectPage: print "Description page on Wikimedia Commons is redirect?!" except wikipedia.IsRedirectPage: print "Description page is redirect?!"
except wikipedia.IsRedirectPage: print "Description page is redirect?!"
except wikipedia.IsRedirectPage: print "Description page is redirect?!" except wikipedia.NoPage: break
def showImageList(self, imagelist): for i in range(len(imagelist)): image = imagelist[i] print "-"*60 wikipedia.output(u"%s. Found image: %s"% (i, image.aslink())) try: # Show the image description page's contents wikipedia.output(image.get(throttle=False)) except wikipedia.NoPage: try: # Maybe the image is on the target site already targetTitle = '%s:%s' % (self.targetSite.image_namespace(), image.title().split(':', 1)[1]) targetImage = wikipedia.Page(self.targetSite, targetTitle) if targetImage.get(throttle=False): wikipedia.output(u"Image is already on %s." % self.targetSite) wikipedia.output(targetImage.get(throttle=False)) else: print "Description empty." except wikipedia.NoPage: print "Description empty." except wikipedia.IsRedirectPage: print "Description page on Wikimedia Commons is redirect?!" except wikipedia.IsRedirectPage: print "Description page is redirect?!"
catlib.change_category(article, original_cat.titleWithoutNamespace(), None)
catlib.change_category(article, original_cat, None)
def move_to_category(self, article, original_cat, current_cat): ''' Given an article which is in category original_cat, ask the user if it should be moved to one of original_cat's subcategories. Recursively run through subcategories' subcategories. NOTE: current_cat is only used for internal recursion. You should always use current_cat = original_cat. ''' print wikipedia.output(u'Treating page %s, currently in category %s' % (article.title(), current_cat.title())) subcatlist = self.catDB.getSubcats(current_cat) supercatlist = self.catDB.getSupercats(current_cat) print if len(subcatlist) == 0: print 'This category has no subcategories.' print if len(supercatlist) == 0: print 'This category has no supercategories.' print # show subcategories as possible choices (with numbers) for i in range(len(supercatlist)): # layout: we don't expect a cat to have more than 10 supercats wikipedia.output(u'u%d - Move up to %s' % (i, supercatlist[i].title())) for i in range(len(subcatlist)): # layout: we don't expect a cat to have more than 100 subcats wikipedia.output(u'%2d - Move down to %s' % (i, subcatlist[i].title())) print ' j - Jump to another category' print ' n - Skip this article' print ' r - Remove this category tag' print ' ? - Read the page' wikipedia.output(u'Enter - Save category as %s' % current_cat.title())
reftxt = re.sub(boilerplateR, '{{subst:' + unicode(old, 'iso-8859-1') + '}}', reftxt)
reftxt = re.sub(boilerplateR, '{{subst:' + old + '}}', reftxt)
def treat(refpl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: pass else: # Check if boilerplate is really used in this article if not boilerplateR.search(reftxt): print "Not found in %s"%refpl return # Replace all occurences of the boilerplate in this article if resolve: reftxt = re.sub(boilerplateR, '{{subst:' + unicode(old, 'iso-8859-1') + '}}', reftxt) elif oldformat: reftxt = re.sub(boilerplateR, '{{msg:' + unicode(new, 'iso-8859-1') + '}}', reftxt) else: reftxt = re.sub(boilerplateR, '{{' + unicode(new, 'iso-8859-1') + '}}', reftxt) refpl.put(reftxt)
reftxt = re.sub(boilerplateR, '{{msg:' + unicode(new, 'iso-8859-1') + '}}', reftxt)
reftxt = re.sub(boilerplateR, '{{msg:' + new + '}}', reftxt)
def treat(refpl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: pass else: # Check if boilerplate is really used in this article if not boilerplateR.search(reftxt): print "Not found in %s"%refpl return # Replace all occurences of the boilerplate in this article if resolve: reftxt = re.sub(boilerplateR, '{{subst:' + unicode(old, 'iso-8859-1') + '}}', reftxt) elif oldformat: reftxt = re.sub(boilerplateR, '{{msg:' + unicode(new, 'iso-8859-1') + '}}', reftxt) else: reftxt = re.sub(boilerplateR, '{{' + unicode(new, 'iso-8859-1') + '}}', reftxt) refpl.put(reftxt)
reftxt = re.sub(boilerplateR, '{{' + unicode(new, 'iso-8859-1') + '}}', reftxt)
reftxt = re.sub(boilerplateR, '{{' + new + '}}', reftxt)
def treat(refpl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: pass else: # Check if boilerplate is really used in this article if not boilerplateR.search(reftxt): print "Not found in %s"%refpl return # Replace all occurences of the boilerplate in this article if resolve: reftxt = re.sub(boilerplateR, '{{subst:' + unicode(old, 'iso-8859-1') + '}}', reftxt) elif oldformat: reftxt = re.sub(boilerplateR, '{{msg:' + unicode(new, 'iso-8859-1') + '}}', reftxt) else: reftxt = re.sub(boilerplateR, '{{' + unicode(new, 'iso-8859-1') + '}}', reftxt) refpl.put(reftxt)
print '%d' % len(getReferences(thispl)) sys.exit()
def treat(refpl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: pass else: # Check if boilerplate is really used in this article if not boilerplateR.search(reftxt): print "Not found in %s"%refpl return # Replace all occurences of the boilerplate in this article if resolve: reftxt = re.sub(boilerplateR, '{{subst:' + unicode(old, 'iso-8859-1') + '}}', reftxt) elif oldformat: reftxt = re.sub(boilerplateR, '{{msg:' + unicode(new, 'iso-8859-1') + '}}', reftxt) else: reftxt = re.sub(boilerplateR, '{{' + unicode(new, 'iso-8859-1') + '}}', reftxt) refpl.put(reftxt)
'YearAD' : (lambda v: 0<=v and v<2051, 0,2051),
'YearAD' : (lambda v: 0<=v and v<2501, 0,2501),
def makeMonthNamedList( lang, pattern, makeUpperCase = None ): """Creates a list of 12 elements based on the name of the month. The language-dependent month name is used as a formating argument to the pattern. The pattern must be have one %s that will be replaced by the localized month name. Use %%d for any other parameters that should be preserved. """ if makeUpperCase == None: f = lambda s: s elif makeUpperCase == True: f = lambda s: s[0].upper() + s[1:] elif makeUpperCase == False: f = lambda s: s[0].lower() + s[1:] return [ pattern % f(monthName(lang, m)) for m in range(1,13) ]
'DecadeAD' : (lambda v: 0<=v and v<2051, 0,2051),
'DecadeAD' : (lambda v: 0<=v and v<2501, 0,2501),
def makeMonthNamedList( lang, pattern, makeUpperCase = None ): """Creates a list of 12 elements based on the name of the month. The language-dependent month name is used as a formating argument to the pattern. The pattern must be have one %s that will be replaced by the localized month name. Use %%d for any other parameters that should be preserved. """ if makeUpperCase == None: f = lambda s: s elif makeUpperCase == True: f = lambda s: s[0].upper() + s[1:] elif makeUpperCase == False: f = lambda s: s[0].lower() + s[1:] return [ pattern % f(monthName(lang, m)) for m in range(1,13) ]
def assemblesecondrun(self, previous, askall):
def assemblesecondrun(self, previous, askall=False):
def assemblesecondrun(self, previous, askall): new = previous askit = askall for pl in self.done.keys(): code = pl.code() if code == wikipedia.mylang and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty(): pass elif pl.exists() and not pl.isRedirectPage(): if new.has_key(code): pass elif code in ('zh-tw','zh-cn') and new.has_key('zh') and new['zh'] is not None: print "NOTE: Ignoring %s, using %s"%(new['zh'].asasciilink(),pl.asasciilink()) answer = self.ask(askit,pl) if answer == 'y': new['zh'] = None # Remove the global zh link new[code] = pl # Add the more precise one elif answer == 'n': pass elif answer == 'g': return None elif answer == 'a': new['zh'] = None # Remove the global zh link new[code] = pl # Add the more precise one askit = False elif code == 'zh' and ( (new.has_key('zh-tw') and new['zh-tw'] is not None) or (new.has_key('zh-cn') and new['zh-cn'] is not None)): print "NOTE: Ignoring %s"%(pl.asasciilink()) pass # do not add global zh if there is a specific zh-tw or zh-cn elif code not in new: answer = self.ask(askit,pl) if answer == 'y': new[code] = pl elif answer == 'n': pass elif answer == 'g': return None elif answer == 'a': new[code] = pl askit = False
predata.append(('wpMinorEdit', '1'))
predata.append(('wpMinoredit', '1'))
def putPage(code, name, text, comment = None, watchArticle = False, minorEdit = True, newPage = False): """Upload 'text' on page 'name' to the 'code' language wikipedia. Use of this routine can normally be avoided; use PageLink.put instead. """ import httplib put_throttle() host = family.hostname(code) address = family.put_address(code, space2underline(name)) if comment is None: comment=action if not loggedin or code != mylang: comment = username + ' - ' + comment try: text = forCode(text, code) predata = [ ('wpSave', '1'), ('wpSummary', comment), ('wpTextbox1', text)] if newPage and newPage != '0': predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', edittime[code, link2url(name, code)])) if minorEdit and minorEdit != '0': predata.append(('wpMinorEdit', '1')) if watchArticle and watchArticle != '0': predata.append(('wpWatchthis', '1')) data = urlencode(tuple(predata)) except KeyError: print edittime raise if debug: print text print address print data return None, None, None print "Changing page %s:%s" % (code, name) conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "RobHooftWikiRobot/1.0") if cookies and code == mylang: conn.putheader('Cookie',cookies) conn.endheaders() conn.send(data) response = conn.getresponse() data = response.read() conn.close() print data return response.status, response.reason, data
ask = wikipedia.input('What do you do: (c)hange page name, (n)ext page or (q)uit?')
ask = wikipedia.input('What do you do: (c)hange page name (a)ppend to page name, (n)ext page or (q)uit?')
def Movepages(page, deletedPages): pagetitle = page.title() wikipedia.output(u'\n>>>> %s <<<<' % pagetitle) ask = wikipedia.input('What do you do: (c)hange page name, (n)ext page or (q)uit?') if ask in ['c', 'C']: pagemove = wikipedia.input(u'New page name:') titleroot = wikipedia.Page(wikipedia.getSite(), pagetitle) msg = wikipedia.translate(wikipedia.getSite(), comment) titleroot.move(pagemove, msg) wikipedia.output('Page %s move successful to %s.' % (pagetitle, pagemove)) if deletedPages == True: pagedel = wikipedia.Page(wikipedia.getSite(), pagetitle) pagedel.delete(pagetitle) elif ask in ['n', 'N']: pass elif ask in ['q', 'Q']: sys.exit() else: wikipedia.output('Input certain code.') sys.exit()
def change_category(article, oldCat, newCatTitle):
def change_category(article, oldCat, newCatTitle, comment=None):
def change_category(article, oldCat, newCatTitle): """ Given an article which is in category oldCat, moves it to category called newCatTitle. Moves subcategories of oldCat as well. oldCat should be a Category object, newCatTitle should be the new name as a string, without namespace. If newCatTitle is None, the category will be removed. """ cats = article.categories(withSortKeys = True) site = article.site() sort_key = '' removed = False for cat in cats: # get the category title without the namespace, but possibly with a # "|" sign followed by a sortkey if cat == oldCat: # because a list element is removed, the iteration will skip the # next element. this might lead to forgotten categories, but # usually each category should only appear once per article. cats.remove(cat) removed = True elif cat.title().startswith(oldCat.title() + '|'): sort_key = cat.titleWithoutNamespace().split('|', 1)[1] cats.remove(cat) removed = True if not removed: wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return if newCatTitle is not None: if sort_key == '': newCat = Category(site, newCatTitle) else: newCat = Category(site, newCatTitle + '|' + sort_key) cats.append(newCat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text)
for cat in cats:
for cat in cats[:]:
def change_category(article, oldCat, newCatTitle): """ Given an article which is in category oldCat, moves it to category called newCatTitle. Moves subcategories of oldCat as well. oldCat should be a Category object, newCatTitle should be the new name as a string, without namespace. If newCatTitle is None, the category will be removed. """ cats = article.categories(withSortKeys = True) site = article.site() sort_key = '' removed = False for cat in cats: # get the category title without the namespace, but possibly with a # "|" sign followed by a sortkey if cat == oldCat: # because a list element is removed, the iteration will skip the # next element. this might lead to forgotten categories, but # usually each category should only appear once per article. cats.remove(cat) removed = True elif cat.title().startswith(oldCat.title() + '|'): sort_key = cat.titleWithoutNamespace().split('|', 1)[1] cats.remove(cat) removed = True if not removed: wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return if newCatTitle is not None: if sort_key == '': newCat = Category(site, newCatTitle) else: newCat = Category(site, newCatTitle + '|' + sort_key) cats.append(newCat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text)
article.put(text)
article.put(text, comment)
def change_category(article, oldCat, newCatTitle): """ Given an article which is in category oldCat, moves it to category called newCatTitle. Moves subcategories of oldCat as well. oldCat should be a Category object, newCatTitle should be the new name as a string, without namespace. If newCatTitle is None, the category will be removed. """ cats = article.categories(withSortKeys = True) site = article.site() sort_key = '' removed = False for cat in cats: # get the category title without the namespace, but possibly with a # "|" sign followed by a sortkey if cat == oldCat: # because a list element is removed, the iteration will skip the # next element. this might lead to forgotten categories, but # usually each category should only appear once per article. cats.remove(cat) removed = True elif cat.title().startswith(oldCat.title() + '|'): sort_key = cat.titleWithoutNamespace().split('|', 1)[1] cats.remove(cat) removed = True if not removed: wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return if newCatTitle is not None: if sort_key == '': newCat = Category(site, newCatTitle) else: newCat = Category(site, newCatTitle + '|' + sort_key) cats.append(newCat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text)
R=re.compile("[(HREF)(href)]=[\"'](.*?)[\"']")
R=re.compile("[\"'](.*?)[\"']")
def get_imagelinks(url): # Given a URL, get all images linked to by the page at that URL. # First, we get the location for relative links from the URL. relativepath = url.split("/") if len(relativepath) == 1: relativepath=relativepath[0] else: relativepath=relativepath[:len(relativepath)-1] relativepath="/".join(relativepath) links = [] uo = wikipedia.MyURLopener() file = uo.open(url) text = file.read() file.close() text = text.lower() R=re.compile("[(HREF)(href)]=[\"'](.*?)[\"']") for link in R.findall(text): ext = os.path.splitext(link)[1].lower().strip('.') if ext in fileformats: if re.compile("://").match(text): links += [link] else: links += [relativepath+"/"+link] return links
fileformats = ('jpg', 'jpeg', 'png', 'gif', 'svg', 'ogg')
def main(give_url,image_url): url = give_url fileformats = ('jpg', 'jpeg', 'png', 'gif', 'svg', 'ogg') basicdesc = [] mysite = wikipedia.getSite() if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
mysite = wikipedia.getSite()
def main(give_url,image_url): url = give_url fileformats = ('jpg', 'jpeg', 'png', 'gif', 'svg', 'ogg') basicdesc = [] mysite = wikipedia.getSite() if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
except:
finally:
def main(give_url,image_url): url = give_url fileformats = ('jpg', 'jpeg', 'png', 'gif', 'svg', 'ogg') basicdesc = [] mysite = wikipedia.getSite() if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
raise else: raise
def main(give_url,image_url): url = give_url fileformats = ('jpg', 'jpeg', 'png', 'gif', 'svg', 'ogg') basicdesc = [] mysite = wikipedia.getSite() if not mysite.loggedin(): print "You must be logged in to upload images" import sys sys.exit(1) if url == '': if image_url: url = wikipedia.input(u"What URL range should I check (use $ for the part that is changeable)") else: url = wikipedia.input(u"From what URL should I get the images?") if image_url: minimum=1 maximum=99 answer= wikipedia.input(u"What is the first number to check (default: 1)") if answer: minimum=int(answer) answer= wikipedia.input(u"What is the last number to check (default: 99)") if answer: maximum=int(answer) if basicdesc == []: basicdesc = wikipedia.input( u"What text should be added at the end of the description of each image from this url?") else: basicdesc = ' '.join(desc) if image_url: ilinks = [] i = minimum while i <= maximum: ilinks += [url.replace("$",str(i))] i += 1 else: ilinks = get_imagelinks(url) for image in ilinks: answer =wikipedia.input(u"Include image %s (y/N/s(top))?"%image) if answer in ["y","Y"]: desc = wikipedia.input(u"Give the description of this image:") desc = desc + "\r\n\n\r" + basicdesc lib_images.get_image(image, None, desc) elif answer in ["s","S"]: break
except UnicodeEncodeError:
except UnicodeError:
def link2url(name, code, incode = None): """Convert an interwiki link name of a page to the proper name to be used in a URL for that page. code should specify the language for the link""" if code == 'eo': name = name.replace('cx','&#265;') name = name.replace('Cx','&#264;') name = name.replace('CX','&#264;') name = name.replace('gx','&#285;') name = name.replace('Gx','&#284;') name = name.replace('GX','&#284;') name = name.replace('hx','&#293;') name = name.replace('Hx','&#292;') name = name.replace('HX','&#292;') name = name.replace('jx','&#309;') name = name.replace('Jx','&#308;') name = name.replace('JX','&#308;') name = name.replace('sx','&#349;') name = name.replace('Sx','&#348;') name = name.replace('SX','&#348;') name = name.replace('ux','&#365;') name = name.replace('Ux','&#364;') name = name.replace('UX','&#364;') name = name.replace('XX','X') name = name.replace('Xx','X') name = name.replace('xx','x') name = name.replace('&#265;x','cx') name = name.replace('&#264;x','Cx') name = name.replace('&#264;X','CX') name = name.replace('&#285;x','gx') name = name.replace('&#284;x','Gx') name = name.replace('&#284;X','GX') name = name.replace('&#293;x','hx') name = name.replace('&#292;x','Hx') name = name.replace('&#292;X','HX') name = name.replace('&#309;x','jx') name = name.replace('&#308;x','Jx') name = name.replace('&#308;X','JX') name = name.replace('&#349;x','sx') name = name.replace('&#348;x','Sx') name = name.replace('&#348;X','SX') name = name.replace('&#365;x','ux') name = name.replace('&#364;x','Ux') name = name.replace('&#364;X','UX') if '%' in name: try: name = url2unicode(name, language = code) except UnicodeEncodeError: name = html2unicode(name, language = code, altlanguage = incode) else: name = html2unicode(name, language = code, altlanguage = incode) #print "DBG>",repr(name) # Remove spaces from beginning and the end name = name.strip() # Standardize capitalization if name: if not code in family.nocapitalize: name = name[0].upper()+name[1:] #print "DBG>",repr(name) try: result = str(name.encode(code2encoding(code))) except UnicodeError: print "Cannot convert %s into a URL for %s" % (repr(name), code) # Put entities in there. The URL will not be found. result = addEntity(name) print "Using entities instead",result print "BUG> This is probably a bug in the robot that did not recognize an interwiki link!" #raise result = space2underline(result) return urllib.quote(result)
if newtext[0]=='=':
if newtext=='': pass elif newtext[0]=='=':
def get_image(fn, target, description, debug=False): uploadaddr='/wiki/%s:Upload'%wikipedia.special[wikipedia.mylang] # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] print "The filename on wikipedia will default to:",fn newfn = raw_input("Better name : ") if newfn: fn = unicode(newfn, config.console_encoding) fn = fn.encode(wikipedia.code2encoding(wikipedia.mylang)) # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # A proper description for the submission. if description=='': print ('Give a description for the image:') description = raw_input('') description = unicode(description, config.console_encoding) else: print ("The suggested description is:") print print wikipedia.UnicodeToAsciiHtml(description) print print ("Enter return to use this description, enter a text to add something") print ("at the end, or enter = followed by a text to replace the description.") newtext = raw_input('Enter return, text or =text : ') if newtext[0]=='=': description=newtext[1:] else: description=description+' '+newtext # try to encode the description to the encoding used by the home Wikipedia. # if that's not possible (e.g. because there are non-Latin-1 characters and # the home Wikipedia uses Latin-1), convert all non-ASCII characters to # HTML entities. try: description = description.encode(wikipedia.code2encoding(wikipedia.mylang)) except UnicodeEncodeError: description = wikipedia.UnicodeToAsciiHtml(description).encode(wikipedia.code2encoding(wikipedia.mylang)) # don't upload if we're in debug mode if not debug: data = post_multipart(wikipedia.langs[wikipedia.mylang], uploadaddr, (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) return fn
preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = ReplaceRobot(gen, replacements, exceptions, regex, acceptall)
preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = ReplaceRobot(preloadingGen, replacements, exceptions, regex, acceptall)
def main(): # How we want to retrieve information on which pages need to be changed. # Can either be 'sqldump', 'textfile' or 'userinput'. source = None # Array which will collect commandline parameters. # First element is original text, second element is replacement text. commandline_replacements = [] # A dictionary where keys are original texts and values are replacement texts. replacements = {} # Don't edit pages which contain certain texts. exceptions = [] # Should the elements of 'replacements' and 'exceptions' be interpreted # as regular expressions? regex = False # Predefined fixes from dictionary 'fixes' (see above). fix = None # the dump's path, either absolute or relative, which will be used when source # is 'sqldump'. sqlfilename = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # a list of pages which will be used when source is 'userinput'. pagenames = [] # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespace should be processed when using a SQL dump # default to -1 which means all namespaces will be processed namespace = -1 # Load default summary message. wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) # Read commandline parameters. for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'replace') if arg: if arg == '-regex': regex = True elif arg.startswith('-file'): if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the filename:') else: textfilename = arg[6:] source = 'textfile' elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] source = 'category' elif arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input(u'Please enter the SQL dump\'s filename:') else: sqlfilename = arg[5:] source = 'sqldump' elif arg.startswith('-page'): if len(arg) == 5: pagenames.append(wikipedia.input(u'Which page do you want to chage?')) else: pagenames.append(arg[6:]) source = 'userinput' elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-fix:'): fix = arg[5:] elif arg == '-always': acceptall = True elif arg.startswith('-namespace:'): namespace = int(arg[11:]) else: commandline_replacements.append(arg) if source == None or len(commandline_replacements) not in [0, 2]: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if (len(commandline_replacements) == 2 and fix == None): replacements[commandline_replacements[0]] = commandline_replacements[1] wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg ) % ' (-' + commandline_replacements[0] + ' +' + commandline_replacements[1] + ')') elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') change = '(-' + old + ' +' + new replacements[old] = new while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': change = change + ')' break new = wikipedia.input(u'Please enter the new text:') change = change + ' & -' + old + ' +' + new replacements[old] = new default_summary_message = wikipedia.translate(wikipedia.getSite(), msg) % change wikipedia.output(u'The summary message will default to: %s' % default_summary_message) summary_message = wikipedia.input(u'Press Enter to use this default message, or enter a description of the changes your bot will make:') if summary_message == '': summary_message = default_summary_message wikipedia.setAction(summary_message) else: # Perform one of the predefined actions. try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) wikipedia.stopme() sys.exit() if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), fix['msg'])) if fix.has_key('exceptions'): exceptions = fix['exceptions'] replacements = fix['replacements'] gen = ReplacePageGenerator(source, replacements, exceptions, regex, namespace, textfilename, sqlfilename, categoryname, pagenames) preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = ReplaceRobot(gen, replacements, exceptions, regex, acceptall) bot.run()
for site in updatedSites: del new[site] self.reportBacklinks(new)
self.reportBacklinks(new, updatedSites)
def finish(self, sa = None): """Round up the subject, making any necessary changes. This method should be called exactly once after the todo list has gone empty.
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed."""
def reportBacklinks(self, new, updatedSites): """ Report missing back links. This will be called from finish() if needed. updatedSites is a list that contains all sites we changed, to avoid reporting of missing backlinks for pages we already fixed """
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site, page in new.iteritems(): if not page.section(): shouldlink = new.values() + [self.inpl] linked = page.interwiki() for xpage in shouldlink: if xpage != page and not xpage in linked: for l in linked: if l.site() == xpage.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True))) # Check for superfluous links for xpage in linked: if not xpage in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpage.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
if not page.section():
if site not in updatedSites and not page.section():
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" try: for site, page in new.iteritems(): if not page.section(): shouldlink = new.values() + [self.inpl] linked = page.interwiki() for xpage in shouldlink: if xpage != page and not xpage in linked: for l in linked: if l.site() == xpage.site(): wikipedia.output(u"WARNING: %s: %s does not link to %s but to %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True), l.aslink(forceInterwiki = True))) break else: wikipedia.output(u"WARNING: %s: %s does not link to %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True))) # Check for superfluous links for xpage in linked: if not xpage in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.site() == xpage.site(): # Already reported above. break else: # New warning wikipedia.output(u"WARNING: %s: %s links to incorrect %s" % (page.site().family.name, page.aslink(forceInterwiki = True), xpage.aslink(forceInterwiki = True))) except (socket.error, IOError): wikipedia.output(u'ERROR: could not report backlinks')
replacements[i] = exceptionR
exceptions[i] = exceptionR
def main(): gen = None # How we want to retrieve information on which pages need to be changed. # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # Array which will collect commandline parameters. # First element is original text, second element is replacement text. commandline_replacements = [] # A list of 2-tuples of original text and replacement text. replacements = [] # Don't edit pages which contain certain texts. exceptions = [] # Should the elements of 'replacements' and 'exceptions' be interpreted # as regular expressions? regex = False # Predefined fixes from dictionary 'fixes' (see above). fix = None # the dump's path, either absolute or relative, which will be used when source # is 'xmldump'. xmlfilename = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used pageNames = [] # a page whose referrers will be processed when the -ref parameter is used referredPageName = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None # Load default summary message. wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) # Read commandline parameters. for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'replace') if arg: if arg == '-regex': regex = True elif arg.startswith('-file'): if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the filename:') else: textfilename = arg[6:] source = 'textfile' elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] source = 'category' elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-page'): if len(arg) == 5: pageNames.append(wikipedia.input(u'Which page do you want to chage?')) else: pageNames.append(arg[6:]) source = 'singlepage' elif arg.startswith('-ref'): if len(arg) == 4: referredPageName = wikipedia.input(u'Links to which page should be processed?') else: referredPageName = arg[5:] source = 'ref' elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to chage?') else: firstPageTitle = arg[7:] source = 'allpages' elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-fix:'): fix = arg[5:] elif arg == '-always': acceptall = True elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) else: commandline_replacements.append(arg) if (len(commandline_replacements) == 2 and fix == None): replacements.append((commandline_replacements[0], commandline_replacements[1])) wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg ) % ' (-' + commandline_replacements[0] + ' +' + commandline_replacements[1] + ')') elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') change = '(-' + old + ' +' + new replacements.append((old, new)) while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': change = change + ')' break new = wikipedia.input(u'Please enter the new text:') change = change + ' & -' + old + ' +' + new replacements.append((old, new)) default_summary_message = wikipedia.translate(wikipedia.getSite(), msg) % change wikipedia.output(u'The summary message will default to: %s' % default_summary_message) summary_message = wikipedia.input(u'Press Enter to use this default message, or enter a description of the changes your bot will make:') if summary_message == '': summary_message = default_summary_message wikipedia.setAction(summary_message) else: # Perform one of the predefined actions. try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) wikipedia.stopme() sys.exit() if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), fix['msg'])) if fix.has_key('exceptions'): exceptions = fix['exceptions'] replacements = fix['replacements'] if regex: # already compile all regular expressions here to save time later for i in range(len(replacements)): old, new = replacements[i] oldR = re.compile(old, re.UNICODE) replacements[i] = oldR, new for i in range(len(exceptions)): exception = exceptions[i] exceptionR = re.compile(exception, re.UNICODE) replacements[i] = exceptionR if source == 'textfile': gen = pagegenerators.TextfilePageGenerator(textfilename) elif source == 'category': cat = catlib.Category(wikipedia.getSite(), categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif source == 'xmldump': gen = XmlDumpReplacePageGenerator(xmlfilename, replacements, exceptions, regex) elif source == 'singlepage': pages = [wikipedia.Page(wikipedia.getSite(), pageName) for pageName in pageNames] gen = iter(pages) elif source == 'allpages': namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif source == 'ref': referredPage = wikipedia.Page(wikipedia.getSite(), referredPageName) gen = pagegenerators.ReferringPageGenerator(referredPage) elif source == None or len(commandline_replacements) not in [0, 2]: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = ReplaceRobot(preloadingGen, replacements, exceptions, regex, acceptall) bot.run()
result.append(self.__class__(self.code(), linkname=catname))
result.append(self.__class__(self.code(), title = catname))
def categories(self): """A list of categories that the article is in. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
newtext = s2[:firstafter+1] + s + s2[firstafter+1:]
newtext = s2[:firstafter] + s + s2[firstafter:]
def replaceCategoryLinks(oldtext, new, site = None): """Replace the category links given in the wikitext given in oldtext by the new links given in new. 'new' should be a list of Category objects. """ if site is None: site = getSite() if site == Site('de', 'wikipedia'): raise Error('The PyWikipediaBot is no longer allowed to touch categories on the German Wikipedia. See de.wikipedia.org/wiki/Wikipedia_Diskussion:Personendaten#Position') s = categoryFormat(new, insite = site) s2 = removeCategoryLinks(oldtext, site = site) if s: if site.language() in site.family.category_attop: newtext = s + site.family.category_text_separator + s2 else: # calculate what was after the categories links on the page firstafter = 0 try: while s2[firstafter-1] == oldtext[firstafter-1]: firstafter -= 1 except IndexError: pass # Is there any text in the 'after' part that means we should keep it after? if "</noinclude>" in s2[firstafter:] and firstafter < 0: newtext = s2[:firstafter+1] + s + s2[firstafter+1:] elif site.language() in site.family.categories_last: newtext = s2 + site.family.category_text_separator + s else: interwiki = getLanguageLinks(s2) s2 = removeLanguageLinks(s2, site) + site.family.category_text_separator + s newtext = replaceLanguageLinks(s2, interwiki, site) else: return s2 return newtext
return Page(self,self.namespace(14)+':'+self.family.disambcatname[self.lang])
return catlib.Category(self,self.namespace(14)+':'+self.family.disambcatname[self.lang])
def disambcategory(self): try: return Page(self,self.namespace(14)+':'+self.family.disambcatname[self.lang]) except KeyError: raise NoPage
if wikipedia.isInterwikiLink(m.group('title')):
if m.group('title') == '' or wikipedia.isInterwikiLink(m.group('title')):
def treat(self, refpl, disambPl): """ Parameters: disambPl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to disambPl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """
print "%s already has %s"%(pl.aslocallink(),catpl.aslocallink())
print "%s already has %s"%(pl2.aslocallink(),catpl.aslocallink())
def add_category(): print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input('Wikipedia page with list of pages to change: ') if listpage: try: pl = wikipedia.PageLink(wikipedia.mylang, listpage) except NoPage: print 'The page ' + listpage + ' could not be loaded from the server.' sys.exit() pagenames = pl.links() else: refpage = wikipedia.input('Wikipedia page that is now linked to: ') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input('Category to add (do not give namespace) : ') newcat = newcat.encode(wikipedia.code2encoding(wikipedia.mylang)) newcat = newcat[:1].capitalize() + newcat[1:] print newcat ns = wikipedia.family.category_namespaces(wikipedia.mylang) catpl = wikipedia.PageLink(wikipedia.mylang, ns[0].encode(wikipedia.code2encoding(wikipedia.mylang))+':'+newcat) print "Will add %s"%catpl.aslocallink() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input("%s [y/n/a(ll)] : "%(pl2.asasciilink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input("This should be used if and only if you are sure that your links are correct !!! Are you sure ? [y/n] : ") if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: print "%s doesn't exit yet. Ignoring."%(pl2.aslocallink()) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) print "WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink()) else: print "Current categories: ",cats if catpl in cats: print "%s already has %s"%(pl.aslocallink(),catpl.aslocallink()) else: cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text, comment = catpl.aslocallink().encode(wikipedia.code2encoding(wikipedia.mylang)))
Rsupercat = re.compile('title=.*\"([^\"]*)\"')
Rsupercat = re.compile('title ="([^"]*)"')
def _make_catlist(self, recurse = False): """Make a list of all articles and categories that are in this category. If recurse is set to True, articles and categories of any subcategories are also retrieved.
supercats.append(title)
if iscattitle(title): supercats.append(title)
def _make_catlist(self, recurse = False): """Make a list of all articles and categories that are in this category. If recurse is set to True, articles and categories of any subcategories are also retrieved.
(pages, supercats) = self.catlist()
supercats = [] for title in self.catlist(recurse)[1]: ncat = _CatLink(self.code(), title) supercats.append(ncat)
def supercategories(self, recurse = False): """Create a list of all subcategories of the current category.
if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (self.title(), m.group(1))) raise IsRedirectPage(m.group(1))
if m: if get_redirect: self._redirarg = m.group(1) else: output(u"DBG> %s is redirect to %s" % (self.title(), m.group(1))) raise IsRedirectPage(m.group(1))
def getEditPage(self, get_redirect=False, throttle = True, sysop = False): """ Get the contents of the Page via the edit page. Do not use this directly, use get() instead. Arguments: get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text. """ isWatched = False editRestriction = None output(u'Getting page %s' % self.aslink()) path = self.site().edit_address(self.urlname()) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() try: text = self.site().getUrl(path, sysop = sysop) except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: Could not load %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue get_throttle.setDelay(time.time() - starttime)\ # Look for the edit token R = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = R.search(text) if tokenloc: self.site().putToken(tokenloc.group(1), sysop = sysop) elif not self.site().getToken(getalways = False): self.site().putToken('', sysop = sysop) # Look if the page is on our watchlist R = re.compile(r"\<input tabindex='[\d]+' type='checkbox' name='wpWatchthis' checked='checked'") matchWatching = R.search(text) if matchWatching: isWatched = True m = re.search('value="(\d+)" name=["\']wpEdittime["\']', text) if m: self._editTime = m.group(1) else: self._editTime = "0" m = re.search('value="(\d+)" name=["\']wpStarttime["\']', text) if m: self._startTime = m.group(1) else: self._startTime = "0" # Extract the actual text from the textedit field try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %i minutes..." % (self.site().hostname(), path, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(self.site(), self.title()) m = self.site().redirectRegex().match(text[i1:i2]) if self._editTime == "0": output(u"DBG> page may be locked?!") editRestriction = 'sysop' if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (self.title(), m.group(1))) raise IsRedirectPage(m.group(1)) x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] return x, isWatched, editRestriction
if not trailingChars or label:
if not (trailingChars or label):
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') label = m.group('label') trailingChars = m.group('linktrail')
lines = text.split('\n')
lines = text.split('\r\n')
def removeUselessSpaces(self, text): result = [] multipleSpacesR = re.compile(' +') spaceAtLineEndR = re.compile(' $') preR = re.compile('<pre', re.IGNORECASE) lines = text.split('\n') for line in lines: if len(line) > 0 and line[0] != ' ' and not preR.search(line): line = wikipedia.replaceExceptMathNowikiAndComments(line, multipleSpacesR, ' ') line = wikipedia.replaceExceptMathNowikiAndComments(line, spaceAtLineEndR, '') result.append(line) return '\n'.join(result)
return '\n'.join(result)
return '\r\n'.join(result)
def removeUselessSpaces(self, text): result = [] multipleSpacesR = re.compile(' +') spaceAtLineEndR = re.compile(' $') preR = re.compile('<pre', re.IGNORECASE) lines = text.split('\n') for line in lines: if len(line) > 0 and line[0] != ' ' and not preR.search(line): line = wikipedia.replaceExceptMathNowikiAndComments(line, multipleSpacesR, ' ') line = wikipedia.replaceExceptMathNowikiAndComments(line, spaceAtLineEndR, '') result.append(line) return '\n'.join(result)
+ "(0.[\d\.]+?),"
+ "([\d\.]+?),"
def entries(self): ''' Generator which reads one line at a time from the SQL dump file, and parses it to create SQLentry objects. Stops when the end of file is reached. ''' # This regular expression will match one SQL database entry (i.e. a # page), and each group represents an attribute of that entry. # NOTE: We don't need re.DOTALL because newlines are escaped. pageR = re.compile("\((\d+)," # cur_id (page ID number) + "(\d+)," # cur_namespace (namespace number) + "'(.*?)'," # cur_title (page title w/o namespace) + "'(.*?)'," # cur_text (page contents) + "'(.*?)'," # cur_comment (last edit's summary text) + "(\d+)," # cur_user (user ID of last contributor) + "'(.*?)'," # cur_user_text (user name) + "'(\d{14})'," # cur_timestamp (time of last edit) + "'(.*?)'," # cur_restrictions (protected pages have 'sysop' here) + "(\d+)," # cur_counter (view counter, disabled on WP) + "([01])," # cur_is_redirect + "([01])," # cur_minor_edit + "([01])," # cur_is_new + "(0.[\d\.]+?)," # cur_random (for random page function) + "'(\d{14})'," # inverse_timestamp (obsolete) + "'(\d{14})'\)") # cur_touched (cache update timestamp) print 'Reading SQL dump' # Open the file, read it using the given encoding, and replace invalid # characters with question marks. import codecs f=codecs.open(self.filename, 'r', encoding = self.encoding, errors='replace') eof = False while not eof: # Read only one (very long) line because we would risk out of memory # errors if we read the entire file at once line = f.readline() if line == '': print 'End of file.' eof = True self.entries = [] for id, namespace, title, text, comment, userid, username, timestamp, restrictions, counter, redirect, minor, new, random, inversetimestamp, touched in pageR.findall(line): new_entry = SQLentry(id, namespace, title, text, comment, userid, username, timestamp, restrictions, counter, redirect, minor, new, random, inversetimestamp, touched) yield new_entry f.close()
s.append(('Jaren: '+'[[%d]] -- '*5+"'''%d'''"+' -- [[%d]]'*5)%tuple(range(year-5,year+6)))
s.append(('Jaren: '+'[[%s]] -- '*5+"'''%s'''"+' -- [[%s]]'*5)%ymap(range(year-5,year+6)))
def header(year): s=[] cent=(int(year)-1)/100+1 s.append('<!-- robot -->') s.append('<table align=center><tr><td align=center>') s.append("[[Eeuwen]]: [[%de eeuw]] -- '''[[%de eeuw]]''' -- [[%de eeuw]]"%tuple(range(cent-1,cent+2))) s.append('</td></tr><tr><td align=center>') s.append(('Jaren: '+'[[%d]] -- '*5+"'''%d'''"+' -- [[%d]]'*5)%tuple(range(year-5,year+6))) s.append('</td></tr></table>') s.append('<!-- /robot -->') s.append("----") return '\r\n'.join(s)
pre='(\r?\n)+' post='(\r?\n)+' R9=re.compile(pre+'\\<!-- robot --\\>.*\\<!-- /robot --\\>'+post)
pre='(\r?\n)+ *' post=' *(\r?\n)+' R9=re.compile(pre+'<!-- robot -->(.|\n)+?<!-- /robot -->'+post,re.MULTILINE)
def header(year): s=[] cent=(int(year)-1)/100+1 s.append('<!-- robot -->') s.append('<table align=center><tr><td align=center>') s.append("[[Eeuwen]]: [[%de eeuw]] -- '''[[%de eeuw]]''' -- [[%de eeuw]]"%tuple(range(cent-1,cent+2))) s.append('</td></tr><tr><td align=center>') s.append(('Jaren: '+'[[%d]] -- '*5+"'''%d'''"+' -- [[%d]]'*5)%tuple(range(year-5,year+6))) s.append('</td></tr></table>') s.append('<!-- /robot -->') s.append("----") return '\r\n'.join(s)
orgtext=text
def do(year): page=str(year) if debug: page='Robottest' text=wikipedia.getPage(mylang,page) # Replace all of these by the standardized formulae text=R4.sub("\r\n",text) text=R5.sub("\r\n\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R11.sub("\r\n",text) if R6.search(text): m=R6.search(text).group(0) print "MATCH:", len(m),repr(m) text=R6.sub("\r\n",text) text=R7.sub("\r\n",text) text=R8.sub("\r\n",text) text=R9.sub("\r\n",text) # Must be last text=R3.sub("\r\n"+header(year)+"\r\n'''Gebeurtenissen''':\r\n",text) text=R1.sub("\r\n\r\n----\r\n'''Geboren''':\r\n",text) text=R2.sub("\r\n\r\n----\r\n'''Overleden''':\r\n",text) if debug: print text else: print "="*70 print text print "="*70 answer=raw_input('submit y/n ?') if answer=='y': status,reason,data=wikipedia.putPage('test','Robottest',text) print status,reason else: print "===Not changed==="
if R6.search(text): m=R6.search(text).group(0) print "MATCH:", len(m),repr(m)
def do(year): page=str(year) if debug: page='Robottest' text=wikipedia.getPage(mylang,page) # Replace all of these by the standardized formulae text=R4.sub("\r\n",text) text=R5.sub("\r\n\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R11.sub("\r\n",text) if R6.search(text): m=R6.search(text).group(0) print "MATCH:", len(m),repr(m) text=R6.sub("\r\n",text) text=R7.sub("\r\n",text) text=R8.sub("\r\n",text) text=R9.sub("\r\n",text) # Must be last text=R3.sub("\r\n"+header(year)+"\r\n'''Gebeurtenissen''':\r\n",text) text=R1.sub("\r\n\r\n----\r\n'''Geboren''':\r\n",text) text=R2.sub("\r\n\r\n----\r\n'''Overleden''':\r\n",text) if debug: print text else: print "="*70 print text print "="*70 answer=raw_input('submit y/n ?') if answer=='y': status,reason,data=wikipedia.putPage('test','Robottest',text) print status,reason else: print "===Not changed==="
print text
f=open('/tmp/wik.in','w') f.write(orgtext) f.close() f=open('/tmp/wik.out','w') f.write(text) f.close() f=os.popen('diff -u /tmp/wik.in /tmp/wik.out','r') print f.read()
def do(year): page=str(year) if debug: page='Robottest' text=wikipedia.getPage(mylang,page) # Replace all of these by the standardized formulae text=R4.sub("\r\n",text) text=R5.sub("\r\n\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R11.sub("\r\n",text) if R6.search(text): m=R6.search(text).group(0) print "MATCH:", len(m),repr(m) text=R6.sub("\r\n",text) text=R7.sub("\r\n",text) text=R8.sub("\r\n",text) text=R9.sub("\r\n",text) # Must be last text=R3.sub("\r\n"+header(year)+"\r\n'''Gebeurtenissen''':\r\n",text) text=R1.sub("\r\n\r\n----\r\n'''Geboren''':\r\n",text) text=R2.sub("\r\n\r\n----\r\n'''Overleden''':\r\n",text) if debug: print text else: print "="*70 print text print "="*70 answer=raw_input('submit y/n ?') if answer=='y': status,reason,data=wikipedia.putPage('test','Robottest',text) print status,reason else: print "===Not changed==="
status,reason,data=wikipedia.putPage('test','Robottest',text)
status,reason,data=wikipedia.putPage(mylang,page,text)
def do(year): page=str(year) if debug: page='Robottest' text=wikipedia.getPage(mylang,page) # Replace all of these by the standardized formulae text=R4.sub("\r\n",text) text=R5.sub("\r\n\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R11.sub("\r\n",text) if R6.search(text): m=R6.search(text).group(0) print "MATCH:", len(m),repr(m) text=R6.sub("\r\n",text) text=R7.sub("\r\n",text) text=R8.sub("\r\n",text) text=R9.sub("\r\n",text) # Must be last text=R3.sub("\r\n"+header(year)+"\r\n'''Gebeurtenissen''':\r\n",text) text=R1.sub("\r\n\r\n----\r\n'''Geboren''':\r\n",text) text=R2.sub("\r\n\r\n----\r\n'''Overleden''':\r\n",text) if debug: print text else: print "="*70 print text print "="*70 answer=raw_input('submit y/n ?') if answer=='y': status,reason,data=wikipedia.putPage('test','Robottest',text) print status,reason else: print "===Not changed==="
self.mysite = wikipedia.getSite() pl=wikipedia.PageLink(self.mysite,arg[5:])
mysite = wikipedia.getSite() pl=wikipedia.PageLink(mysite, arg[5:])
def main(): # the option that's always selected when the bot wonders what to do with # a link. If it's None, the user is prompted (default behaviour). always = None alternatives = [] getAlternatives = True solve_redirect = False # if the -file argument is used, page titles are dumped in this array. # otherwise it will only contain one page. page_list = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] primary = False main_only = False for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg) if arg: if arg.startswith('-primary:'): primary = True getAlternatives = False alternatives.append(arg[9:]) elif arg == '-primary': primary = True elif arg.startswith('-always:'): always = arg[8:] elif arg.startswith('-file'): if len(arg) == 5: # todo: check for console encoding to allow special characters # in filenames, as done below with pagename file = wikipedia.input(u'Please enter the list\'s filename:') else: file = arg[6:] # open file and read page titles out of it f=open(file) for line in f.readlines(): if line != '\n': page_list.append(line) f.close() elif arg.startswith('-pos:'): if arg[5]!=':': self.mysite = wikipedia.getSite() pl=wikipedia.PageLink(self.mysite,arg[5:]) if pl.exists(): alternatives.append(pl.linkname()) else: print "Possibility does not actually exist:",pl answer = wikipedia.input(u'Use it anyway? [y|N]') if answer in ('Y', 'y'): alternatives.append(pl.linkname()) else: alternatives.append(arg[5:]) elif arg=='-just': getalternatives = False elif arg=='-redir': solve_redirect = True elif arg=='-main': main_only = True else: page_title.append(arg) # if the disambiguation page is given as a command line argument, # connect the title's parts with spaces if page_title != []: page_title = ' '.join(page_title) page_list.append(page_title) # if no disambiguation pages was given as an argument, and none was # read from a file, query the user if page_list == []: pagename = wikipedia.input(u'Which page to check:') page_list.append(pagename) bot = DisambiguationRobot(always, alternatives, getAlternatives, solve_redirect, page_list, primary, main_only) bot.run()
sys.stderr.write('AllPages: %d done; continuing from "%s";\n'%(m,url2link(start,code='nl',incode='ascii')))
def allpages(start = '%21%200'): """Iterate over all Wikipedia pages in the home language, starting at the given page.""" start = link2url(start, code = mylang) m=0 while 1: text = getPage(mylang, family.allpagesname(mylang, start), do_quote=0, do_edit=0) #print text if family.version(mylang)=="1.2": R = re.compile('/wiki/(.*?)" *class=[\'\"]printable') else: R = re.compile('title =\"(.*?)\"') n = 0 for hit in R.findall(text): if not ':' in hit: # Some dutch exceptions. if not hit in ['Hoofdpagina','In_het_nieuws']: n = n + 1 if family.version(mylang)=="1.2": yield PageLink(mylang, url2link(hit, code = mylang, incode = mylang)) else: yield PageLink(mylang, hit) start = hit + '%20%200' if n < 100: break m += n sys.stderr.write('AllPages: %d done; continuing from "%s";\n'%(m,url2link(start,code='nl',incode='ascii')))
return u'{| border="1"\n' + u'\n|----\n'.join(lines) + '\n|}'
return u'{| border="1"\n! date/time || username || resolution || size || edit summary\n|----\n| ' + u'\n|----\n'.join(lines) + '\n|}'
def getFileVersionHistoryTable(self): lines = [] for (datetime, username, resolution, size, comment) in self.getFileVersionHistory(): lines.append('%s || %s || %s || %s || <nowiki>%s</nowiki>' % (datetime, username, resolution, size, comment)) return u'{| border="1"\n' + u'\n|----\n'.join(lines) + '\n|}'
if first in site.family.langs or first in site.family.known_families:
if first in site.family.langs or (first in site.family.known_families and site.family.known_families[first] != site.family.name):
def isInterwikiLink(s, site = None): """ Try to check whether s is in the form "foo:bar" where foo is a known language code or family. In such a case we are dealing with an interwiki link. """ if not ':' in s: return False site = site or getSite() first, rest = s.split(':',1) # interwiki codes are case-insensitive first = first.lower() if first in site.family.langs or first in site.family.known_families: return True return False
Page(self, "%s:Sandbox" % self.family.namespace(self.lang, 4)).get(force = True, sysop = sysop)
Page(self, "%s:Sandbox" % self.family.namespace(self.lang, 4)).get(force = True, get_redirect = True, sysop = sysop)
def getToken(self, getalways = True, getagain = False, sysop = False): if getagain or (getalways and ((sysop and not self._sysoptoken) or (not sysop and not self._token))): output(u"Getting page to get a token.") try: Page(self, "%s:Sandbox" % self.family.namespace(self.lang, 4)).get(force = True, sysop = sysop) #Page(self, "Non-existing page").get(force = True, sysop = sysop) except UserBlocked: raise except Error: pass if sysop: if not self._sysoptoken: return False else: return self._sysoptoken else: if not self._token: return False else: return self._token
wikipedia.output("Skipping link %s to an ignored page"%page2)
wikipedia.output(u"Skipping link %s to an ignored page"%page2)
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site()
output(u"WARNING: [[%s]] is a double-redirect.")
output(u"WARNING: [[%s]] is a double-redirect." % item.group(1))
def getReferences(self, follow_redirects=True, offset=0): """ Return a list of pages that link to the page. If follow_redirects is True, also returns pages that link to a redirect pointing to the page. If offset is non-zero, skips that many references before loading. """ site = self.site() path = site.references_address(self.urlname()) if offset: path = path + "&offset=%i" % offset output(u'Getting references to %s' % self.aslink()) delay = 1 while True: txt = site.getUrl(path) # trim irrelevant portions of page # NOTE: this code relies on the way MediaWiki 1.5 formats the # "Whatlinkshere" special page; if future versions change the # format, they may break this code. startmarker = u"<!-- start content -->" endmarker = u"<!-- end content -->" try: start = txt.index(startmarker) + len(startmarker) end = txt.index(endmarker) except ValueError: output( u"Invalid page received from server.... Retrying in %i minutes." % delay) time.sleep(delay * 60.) delay *= 2 if delay > 30: delay = 30 continue txt = txt[start:end] break try: start = txt.index(u"<ul>") end = txt.rindex(u"</ul>") except ValueError: # No incoming links found on page return [] txt = txt[start:end+5]
output(u"ERROR> link from %s to %s:%s has leading colon?!" % (self.linkname(), newsite, newname))
output(u"ERROR: link from %s to [[%s:%s]] has leading colon?!" % (self.aslink(), newsite, newname))
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
output(u"ERROR> link from %s to %s:%s has leading space?!" % (self.linkname(), newsite, newname))
output(u"ERROR: link from %s to [[%s:%s]] has leading space?!" % (self.aslink(), newsite, newname))
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
except UnicodeEncodeError: output(u"ERROR> link from %s to %s:%s is invalid encoding?!" % (self.linkname(), newsite, newname))
except UnicodeError: output(u"ERROR: link from %s to [[%s:%s]] is invalid encoding?!" % (self.aslink(), newsite, newname))
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
output(u"ERROR> link from %s to %s:%s contains invalid character?!" % (self.linkname(), newsite, newname))
output(u"ERROR: link from %s to [[%s:%s]] contains invalid character?!" % (self.aslink(), newsite, newname))
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
output(u"ERROR> link from %s to %s:%s contains invalid unicode reference?!" % (self.linkname(), newsite, newname))
output(u"ERROR: link from %s to [[%s:%s]] contains invalid unicode reference?!" % (self.aslink(), newsite, newname))
def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method.
pagenames = pagenames.encode(site.encoding())
pagenames = pagenames.encode(self.site.encoding())
def getData(self): if self.pages == []: return address = self.site.export_address() # In the next line, we assume that what we got for eo: is NOT in x-convention # but SHOULD be. This is worst-case; to avoid not getting what we need, if we # find nothing, we will retry the normal way with an unadapted form. pagenames = u'\r\n'.join([x.sectionFreeLinkname(doublex = False) for x in self.pages]) if type(pagenames) != type(u''): print 'Warning: wikipedia.WikipediaXMLHandler.getData() got non-unicode page names. Please report this.' print pagenames # convert Unicode string to the encoding used on that wiki pagenames = pagenames.encode(site.encoding()) data = urlencode(( ('action', 'submit'), ('pages', pagenames), ('curonly', 'True'), )) #print repr(data) # Slow ourselves down get_throttle(requestsize = len(self.pages)) # Now make the actual request to the server now = time.time() conn = httplib.HTTPConnection(self.site.hostname()) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "PythonWikipediaBot/1.0") if self.site.cookies(): conn.putheader('Cookie', self.site.cookies()) conn.endheaders() conn.send(data) response = conn.getresponse() data = response.read() conn.close() get_throttle.setDelay(time.time() - now) return data
for encoding in ('utf-8',)+site.encodings():
for encoding in site.encodings():
def url2unicode(percentname, site): # Does the input string contain non-ascii characters? In that case, # it is not really an url, and we do not have to unquote it.... for c in percentname: if ord(c)>128: x=percentname break else: # Before removing the % encoding, make sure it is an ASCII string. # unquote doesn't work on unicode strings. x=urllib.unquote(str(percentname)) #print "DBG> ",language,repr(percentname),repr(x) # Try utf-8 first. It almost cannot succeed by accident! for encoding in ('utf-8',)+site.encodings(): try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = decode_func(x) #print "DBG> ",encoding,repr(x) return x except UnicodeError: pass raise UnicodeError("Could not decode %s" % repr(percentname))
encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = decode_func(x)
x = x.encode(encoding)
def url2unicode(percentname, site): # Does the input string contain non-ascii characters? In that case, # it is not really an url, and we do not have to unquote it.... for c in percentname: if ord(c)>128: x=percentname break else: # Before removing the % encoding, make sure it is an ASCII string. # unquote doesn't work on unicode strings. x=urllib.unquote(str(percentname)) #print "DBG> ",language,repr(percentname),repr(x) # Try utf-8 first. It almost cannot succeed by accident! for encoding in ('utf-8',)+site.encodings(): try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = decode_func(x) #print "DBG> ",encoding,repr(x) return x except UnicodeError: pass raise UnicodeError("Could not decode %s" % repr(percentname))
except UnicodeError:
except:
def url2unicode(percentname, site): # Does the input string contain non-ascii characters? In that case, # it is not really an url, and we do not have to unquote it.... for c in percentname: if ord(c)>128: x=percentname break else: # Before removing the % encoding, make sure it is an ASCII string. # unquote doesn't work on unicode strings. x=urllib.unquote(str(percentname)) #print "DBG> ",language,repr(percentname),repr(x) # Try utf-8 first. It almost cannot succeed by accident! for encoding in ('utf-8',)+site.encodings(): try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = decode_func(x) #print "DBG> ",encoding,repr(x) return x except UnicodeError: pass raise UnicodeError("Could not decode %s" % repr(percentname))
'minnan':'zh-min-nan.wiktionary.org', 'nb':'no.wiktionary.org', 'zh-cn':'zh.wiktionary.org', 'zh-tw':'zh.wiktionary.org'
'minnan':'zh-min-nan.wikibooks.org', 'nb':'no.wikibooks.org', 'zh-cn':'zh.wikibooks.org', 'zh-tw':'zh.wikibooks.org'
def __init__(self): family.Family.__init__(self) self.name = 'wikibooks' # Known wikibooks languages, given as a dictionary mapping the language code # to the hostname of the site hosting that wiktibooks. For human consumption, # the full name of the language is given behind each line as a comment self.langs = { 'minnan':'zh-min-nan.wiktionary.org', 'nb':'no.wiktionary.org', 'zh-cn':'zh.wiktionary.org', 'zh-tw':'zh.wiktionary.org' } for lang in self.knownlanguages: self.langs[lang] = lang+'.wiktionary.org'
self.langs[lang] = lang+'.wiktionary.org'
self.langs[lang] = '%s.wikibooks.org' % lang
def __init__(self): family.Family.__init__(self) self.name = 'wikibooks' # Known wikibooks languages, given as a dictionary mapping the language code # to the hostname of the site hosting that wiktibooks. For human consumption, # the full name of the language is given behind each line as a comment self.langs = { 'minnan':'zh-min-nan.wiktionary.org', 'nb':'no.wiktionary.org', 'zh-cn':'zh.wiktionary.org', 'zh-tw':'zh.wiktionary.org' } for lang in self.knownlanguages: self.langs[lang] = lang+'.wiktionary.org'
if wikipedia.config.disambiguation_comment[ self.mysite.family.name][self.mylang]:
if wikipedia.config.disambiguation_comment.has_key(self.mysite.family.name) and wikipedia.config.disambiguation_comment[self.mysite.family.name].has_key(self.mylang):
def run(self): if self.main_only: if not ignore_title.has_key(self.mysite.family.name): ignore_title[self.mysite.family.name] = {} if not ignore_title[self.mysite.family.name].has_key(self.mylang): ignore_title[self.mysite.family.name][self.mylang] = [] ignore_title[self.mysite.family.name][self.mylang] += [ u'%s:' % namespace for namespace in self.mysite.namespaces()] for disambTitle in self.page_list: # first check whether user has customized the edit comment if wikipedia.config.disambiguation_comment[ self.mysite.family.name][self.mylang]: comment = wikipedia.translate(self.mysite, wikipedia.config.disambiguation_comment[ self.mysite.family.name] ) % disambTitle elif self.solve_redirect: # when run with -redir argument, there's another summary message comment = wikipedia.translate(self.mysite, msg_redir) \ % disambTitle else: comment = wikipedia.translate(self.mysite, msg) % disambTitle
self.subentries[self.wikilang] = subentry
self.subentries.setdefault(subentry.subentrylang, []).append(subentry)
def addSubEntry(self,subentry):
while 1:
while i< len(self.sortedsubentries):
def sortSubentries(self): print self.subentries if not self.subentries == {}: self.sortedsubentries = self.subentries.keys() self.sortedsubentries.sort(sortonname(langnames[self.wikilang])) print "should now be sorted: %s"%self.sortedsubentries i = 0 while 1: x = self.sortedsubentries[i] if x == self.wikilang: # search the subentry of the same language of the Wiktionary samelangsubentry = self.sortedsubentries[i] del self.sortedsubentries[i] self.sortedsubentries.reverse() self.sortedsubentries.append(samelangsubentry) self.sortedsubentries.reverse() # and put it before all the others break
for subentry in self.sortedsubentries: entry= entry + self.subentries[subentry].wikiwrap(self.wikilang) + '\n'
for index in self.sortedsubentries: for subentry in self.subentries[index]: entry= entry + subentry.wikiwrap(self.wikilang) + '\n----\n'
def wikiwrap(self): entry = '' self.sortSubentries() print "sorted: %s",self.sortedsubentries for subentry in self.sortedsubentries: entry= entry + self.subentries[subentry].wikiwrap(self.wikilang) + '\n'
self.subentries[subentrieskey].showcontents(indentation+2)
for subentry in self.subentries[subentrieskey]:
def showcontents(self): indentation = 0 print ' ' * indentation + 'wikilang = %s'% self.wikilang
subentry.showcontents(indentation+2)
def showcontents(self): indentation = 0 print ' ' * indentation + 'wikilang = %s'% self.wikilang
result = ''
result = u''
def removeEntity(name): import re, htmlentitydefs Rentity = re.compile(r'&([A-Za-z]+);') result = '' i = 0 while i < len(name): m = Rentity.match(name[i:]) if m: if htmlentitydefs.entitydefs.has_key(m.group(1)): result = result + htmlentitydefs.entitydefs[m.group(1)] i += m.end() else: result += name[i] i += 1 else: result += name[i] i += 1 return result
if htmlentitydefs.entitydefs.has_key(m.group(1)): result = result + htmlentitydefs.entitydefs[m.group(1)]
if htmlentitydefs.name2codepoint.has_key(m.group(1)): x = htmlentitydefs.name2codepoint[m.group(1)] result = result + unichr(x)
def removeEntity(name): import re, htmlentitydefs Rentity = re.compile(r'&([A-Za-z]+);') result = '' i = 0 while i < len(name): m = Rentity.match(name[i:]) if m: if htmlentitydefs.entitydefs.has_key(m.group(1)): result = result + htmlentitydefs.entitydefs[m.group(1)] i += m.end() else: result += name[i] i += 1 else: result += name[i] i += 1 return result
name = unicodeName(name, language, altlanguage)
def html2unicode(name, language, altlanguage=None): name = removeEntity(name) name = unicodeName(name, language, altlanguage) import re Runi = re.compile('&#(\d+);') result = u'' i=0 while i < len(name): m = Runi.match(name[i:]) if m: result += unichr(int(m.group(1))) i += m.end() else: try: result += name[i] i += 1 except UnicodeDecodeError: print repr(name) raise return result
choice = wikipedia.input(u'Do you want to work on pages linking to %s?' % refpl.linkname(), ['yes', 'no', 'change redirect'], ['y', 'N', 'c'], 'N')
choice = wikipedia.inputChoice(u'Do you want to work on pages linking to %s?' % refpl.linkname(), ['yes', 'no', 'change redirect'], ['y', 'N', 'c'], 'N')
def treat(self, refpl, disambPl): """ Parameters: disambPl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to disambPl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """
pre='((\r?\n)*---- *)?(\r?\n)?(\'\'\')?'
pre='((\r?\n)*---- *)?(\r?\n)*(\'\'\')?'
def header(year): s=[] cent=(int(year)-1)/100+1 s.append('<!-- robot -->') s.append('<table align=center><tr><td align=center>') s.append("[[Eeuwen]]: [[%s]] -- '''[[%s]]''' -- [[%s]]"%cmap(beforeandafter(cent))) s.append('</td></tr><tr><td align=center>') s.append(('Jaren: '+'[[%s]] -- '*5+"'''%s'''"+' -- [[%s]]'*5)%ymap(range(year-5,year+6))) s.append('</td></tr></table>') s.append('<!-- /robot -->') s.append("----") return '\r\n'.join(s)
R2=re.compile(pre+'(Overleden|Sterfdata|Gestorven)'+post,re.MULTILINE)
R2=re.compile(pre+'(Overleden|Sterf(te)?data|Gestorven)'+post,re.MULTILINE)
def header(year): s=[] cent=(int(year)-1)/100+1 s.append('<!-- robot -->') s.append('<table align=center><tr><td align=center>') s.append("[[Eeuwen]]: [[%s]] -- '''[[%s]]''' -- [[%s]]"%cmap(beforeandafter(cent))) s.append('</td></tr><tr><td align=center>') s.append(('Jaren: '+'[[%s]] -- '*5+"'''%s'''"+' -- [[%s]]'*5)%ymap(range(year-5,year+6))) s.append('</td></tr></table>') s.append('<!-- /robot -->') s.append("----") return '\r\n'.join(s)
pre='((\r?\n)+---- *)?(\r?\n)'
pre='((\r?\n)+---- *)?(\r?\n)+'
def header(year): s=[] cent=(int(year)-1)/100+1 s.append('<!-- robot -->') s.append('<table align=center><tr><td align=center>') s.append("[[Eeuwen]]: [[%s]] -- '''[[%s]]''' -- [[%s]]"%cmap(beforeandafter(cent))) s.append('</td></tr><tr><td align=center>') s.append(('Jaren: '+'[[%s]] -- '*5+"'''%s'''"+' -- [[%s]]'*5)%ymap(range(year-5,year+6))) s.append('</td></tr></table>') s.append('<!-- /robot -->') s.append("----") return '\r\n'.join(s)
R8=re.compile(pre+'\[\[[Ee]euwen\]\]:? *[-\\[\\]\\<\\>\'\| 0-9euwvChr\.]{14,70}'+post,re.MULTILINE)
R8=re.compile(pre+'\[\[[Ee]euwen\]\]:? *(\\<\\/?b\\>|eeuw|[-\\[\\]\\<\\>\'\| 0-9evChr\.]){14,70}'+post,re.MULTILINE)
def header(year): s=[] cent=(int(year)-1)/100+1 s.append('<!-- robot -->') s.append('<table align=center><tr><td align=center>') s.append("[[Eeuwen]]: [[%s]] -- '''[[%s]]''' -- [[%s]]"%cmap(beforeandafter(cent))) s.append('</td></tr><tr><td align=center>') s.append(('Jaren: '+'[[%s]] -- '*5+"'''%s'''"+' -- [[%s]]'*5)%ymap(range(year-5,year+6))) s.append('</td></tr></table>') s.append('<!-- /robot -->') s.append("----") return '\r\n'.join(s)
text=wikipedia.getPage(mylang,page)
try: text=wikipedia.getPage(mylang,page) except wikipedia.NoPage: return
def do(year): page=str(year) if debug: page='Robottest' text=wikipedia.getPage(mylang,page) orgtext=text # Replace all of these by the standardized formulae text=R4.sub("\r\n",text) text=R5.sub("\r\n\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R11.sub("\r\n",text) text=R12.sub("\r\n",text) #if R6.search(text): #m=R6.search(text).group(0) #print "MATCH:", len(m),repr(m) text=R6.sub("\r\n",text) text=R7.sub("\r\n",text) text=R8.sub("\r\n",text) text=R9.sub("\r\n",text) # Must be last text=R3.sub("\r\n"+header(year)+"\r\n'''Gebeurtenissen''':\r\n",text) text=R1.sub("\r\n\r\n----\r\n'''Geboren''':\r\n",text) text=R2.sub("\r\n\r\n----\r\n'''Overleden''':\r\n",text) if debug: print text else: print "="*70 f=open('/tmp/wik.in','w') f.write(orgtext) f.close() f=open('/tmp/wik.out','w') f.write(text) f.close() f=os.popen('diff -u /tmp/wik.in /tmp/wik.out','r') print f.read() print "="*70 answer=raw_input('submit y/n ?') if answer=='y': status,reason,data=wikipedia.putPage(mylang,page,text) print status,reason else: print "===Not changed==="
f=open('/tmp/wik.in','w') f.write(orgtext) f.close() f=open('/tmp/wik.out','w') f.write(text) f.close() f=os.popen('diff -u /tmp/wik.in /tmp/wik.out','r') print f.read()
if 0: f=open('/tmp/wik.in','w') f.write(orgtext) f.close() f=open('/tmp/wik.out','w') f.write(text) f.close() f=os.popen('diff -u /tmp/wik.in /tmp/wik.out','r') print f.read() else: print text
def do(year): page=str(year) if debug: page='Robottest' text=wikipedia.getPage(mylang,page) orgtext=text # Replace all of these by the standardized formulae text=R4.sub("\r\n",text) text=R5.sub("\r\n\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R11.sub("\r\n",text) text=R12.sub("\r\n",text) #if R6.search(text): #m=R6.search(text).group(0) #print "MATCH:", len(m),repr(m) text=R6.sub("\r\n",text) text=R7.sub("\r\n",text) text=R8.sub("\r\n",text) text=R9.sub("\r\n",text) # Must be last text=R3.sub("\r\n"+header(year)+"\r\n'''Gebeurtenissen''':\r\n",text) text=R1.sub("\r\n\r\n----\r\n'''Geboren''':\r\n",text) text=R2.sub("\r\n\r\n----\r\n'''Overleden''':\r\n",text) if debug: print text else: print "="*70 f=open('/tmp/wik.in','w') f.write(orgtext) f.close() f=open('/tmp/wik.out','w') f.write(text) f.close() f=os.popen('diff -u /tmp/wik.in /tmp/wik.out','r') print f.read() print "="*70 answer=raw_input('submit y/n ?') if answer=='y': status,reason,data=wikipedia.putPage(mylang,page,text) print status,reason else: print "===Not changed==="