rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
def setpage(self, new): """Sets page and pagelink"""
|
def setpage(self): """Sets page and page title""" site = wikipedia.getSite()
|
def setpage(self, new): """Sets page and pagelink""" pageTitle = self.options.page or wikipedia.input(u"Page to edit:") self.page = wikipedia.Page(self.site, pageTitle) if not self.options.edit_redirect and self.page.isRedirectPage(): self.page = wikipedia.Page(self.site, self.page.getRedirectTarget())
|
self.page = wikipedia.Page(self.site, pageTitle)
|
self.page = wikipedia.Page(site, pageTitle)
|
def setpage(self, new): """Sets page and pagelink""" pageTitle = self.options.page or wikipedia.input(u"Page to edit:") self.page = wikipedia.Page(self.site, pageTitle) if not self.options.edit_redirect and self.page.isRedirectPage(): self.page = wikipedia.Page(self.site, self.page.getRedirectTarget())
|
self.page = wikipedia.Page(self.site, self.page.getRedirectTarget())
|
self.page = wikipedia.Page(site, self.page.getRedirectTarget())
|
def setpage(self, new): """Sets page and pagelink""" pageTitle = self.options.page or wikipedia.input(u"Page to edit:") self.page = wikipedia.Page(self.site, pageTitle) if not self.options.edit_redirect and self.page.isRedirectPage(): self.page = wikipedia.Page(self.site, self.page.getRedirectTarget())
|
self.initialise_data()
|
def run(self): self.initialise_data() try: old = self.page.get(get_redirect = self.options.edit_redirect) except wikipedia.NoPage: old = "" textEditor = TextEditor(self.options) new = textEditor.edit(old) if new and old != new: new = self.repair(new) wikipedia.showDiff(old, new) comment = wikipedia.input(u"What did you change? ") + sig try: self.page.put(new, comment = comment, minorEdit = False, watchArticle=self.options.watch) except wikipedia.EditConflict: self.handle_edit_conflict(new) else: wikipedia.output(u"Nothing changed")
|
|
textEditor = TextEditor(self.options)
|
textEditor = TextEditor()
|
def run(self): self.initialise_data() try: old = self.page.get(get_redirect = self.options.edit_redirect) except wikipedia.NoPage: old = "" textEditor = TextEditor(self.options) new = textEditor.edit(old) if new and old != new: new = self.repair(new) wikipedia.showDiff(old, new) comment = wikipedia.input(u"What did you change? ") + sig try: self.page.put(new, comment = comment, minorEdit = False, watchArticle=self.options.watch) except wikipedia.EditConflict: self.handle_edit_conflict(new) else: wikipedia.output(u"Nothing changed")
|
name = url2unicode(name, language = code)
|
try: name = url2unicode(name, language = code) except UnicodeEncodeError: name = html2unicode(name, language = code, altlanguage = incode)
|
def link2url(name, code, incode = None): """Convert a interwiki link name of a page to the proper name to be used in a URL for that page. code should specify the language for the link""" if '%' in name: name = url2unicode(name, language = code) else: name = html2unicode(name, language = code, altlanguage = incode) #print "DBG>",repr(name) # Remove spaces from beginning and the end name = name.strip() # Standardize capitalization if name: name = name[0].upper()+name[1:] #print "DBG>",repr(name) try: result = str(name.encode(code2encoding(code))) except UnicodeError: print "Cannot convert %s into a URL for %s" % (repr(name), code) # Put entities in there. result = addEntity(name) #raise result = space2underline(result) return urllib.quote(result)
|
self.maxdelay = delay
|
def setDelay(self, delay = config.minthrottle, absolute = False): if absolute: self.mindelay = delay self.maxdelay = delay self.delay = delay # Don't count the time we already waited as part of our waiting time :-0 self.now = time.time()
|
|
allNotFound = True
|
def run(self): dt=15 while True: try: data = self.getData() except (socket.error, httplib.BadStatusLine): # Print the traceback of the caught exception print ''.join(traceback.format_exception(*sys.exc_info())) output(u'DBG> got network error in GetAll.run. Sleeping for %d seconds'%dt) time.sleep(dt) if dt <= 60: dt += 15 elif dt < 360: dt += 60 else: break if not data: return handler = xmlreader.MediaWikiXmlHandler() handler.setCallback(self.oneDone) handler.setHeaderCallback(self.headerDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print >>sys.stderr, "Dumped invalid XML to sax_parse_bug.dat" raise except PageNotFound: return # All of the ones that have not been found apparently do not exist allNotFound = True for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): pl._getexception = NoPage else: allNotFound = False if allNotFound: f = codecs.open('pageNotFound.txt', 'a', 'utf-8') f.write('##################################################\n') f.write('##################################################\n') f.write('##################################################\n') f.write('%s\n' % self.pages) f.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n') f.write('%s\n' % data) f.write('##################################################\n') f.close()
|
|
else: allNotFound = False if allNotFound: f = codecs.open('pageNotFound.txt', 'a', 'utf-8') f.write(' f.write(' f.write(' f.write('%s\n' % self.pages) f.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n') f.write('%s\n' % data) f.write(' f.close()
|
def run(self): dt=15 while True: try: data = self.getData() except (socket.error, httplib.BadStatusLine): # Print the traceback of the caught exception print ''.join(traceback.format_exception(*sys.exc_info())) output(u'DBG> got network error in GetAll.run. Sleeping for %d seconds'%dt) time.sleep(dt) if dt <= 60: dt += 15 elif dt < 360: dt += 60 else: break if not data: return handler = xmlreader.MediaWikiXmlHandler() handler.setCallback(self.oneDone) handler.setHeaderCallback(self.headerDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print >>sys.stderr, "Dumped invalid XML to sax_parse_bug.dat" raise except PageNotFound: return # All of the ones that have not been found apparently do not exist allNotFound = True for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): pl._getexception = NoPage else: allNotFound = False if allNotFound: f = codecs.open('pageNotFound.txt', 'a', 'utf-8') f.write('##################################################\n') f.write('##################################################\n') f.write('##################################################\n') f.write('%s\n' % self.pages) f.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n') f.write('%s\n' % data) f.write('##################################################\n') f.close()
|
|
cats = pl.categories(withSortKeys = True)
|
cats = pl.categories()
|
def include(pl,checklinks=True,realinclude=True,linkterm=None): cl = checklinks if realinclude: try: text = pl.get() except wikipedia.NoPage: pass except wikipedia.IsRedirectPage: cl = True pass else: cats = pl.categories() if not workingcat in cats: cats = pl.categories(withSortKeys = True) for c in cats: if rawtoclean(c) in parentcats: cats.remove(c) if linkterm: pl.put(wikipedia.replaceCategoryLinks(text, cats + [wikipedia.Page(mysite,"%s|%s"%(workingcat.title(),linkterm))])) else: pl.put(wikipedia.replaceCategoryLinks(text, cats + [workingcat])) if cl: if checkforward: try: pl.get() except wikipedia.IsRedirectPage: pl2 = wikipedia.Page(mysite,pl.getRedirectTarget()) if needcheck(pl2): tocheck.append(pl2) checked[pl2]=pl2 except wikipedia.Error: pass else: for page2 in pl.linkedPages(): if needcheck(page2): tocheck.append(page2) checked[page2] = page2 if checkbackward: for refPage in pl.getReferences(): if needcheck(refPage): tocheck.append(refPage) checked[refPage] = refPage
|
if rawtoclean(c) in parentcats: cats.remove(c) if linkterm: pl.put(wikipedia.replaceCategoryLinks(text, cats + [wikipedia.Page(mysite,"%s|%s"%(workingcat.title(),linkterm))]))
|
if c in parentcats: catlib.change_category(pl,c,workingcat) break
|
def include(pl,checklinks=True,realinclude=True,linkterm=None): cl = checklinks if realinclude: try: text = pl.get() except wikipedia.NoPage: pass except wikipedia.IsRedirectPage: cl = True pass else: cats = pl.categories() if not workingcat in cats: cats = pl.categories(withSortKeys = True) for c in cats: if rawtoclean(c) in parentcats: cats.remove(c) if linkterm: pl.put(wikipedia.replaceCategoryLinks(text, cats + [wikipedia.Page(mysite,"%s|%s"%(workingcat.title(),linkterm))])) else: pl.put(wikipedia.replaceCategoryLinks(text, cats + [workingcat])) if cl: if checkforward: try: pl.get() except wikipedia.IsRedirectPage: pl2 = wikipedia.Page(mysite,pl.getRedirectTarget()) if needcheck(pl2): tocheck.append(pl2) checked[pl2]=pl2 except wikipedia.Error: pass else: for page2 in pl.linkedPages(): if needcheck(page2): tocheck.append(page2) checked[page2] = page2 if checkbackward: for refPage in pl.getReferences(): if needcheck(refPage): tocheck.append(refPage) checked[refPage] = refPage
|
print print("==%s==")%pl.title()
|
wikipedia.output(u'') wikipedia.output(u"==%s=="%pl.title())
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check")
|
wikipedia.output(u"t: Give the beginning of the text of the page") wikipedia.output(u"z: Add under another title (as [[Category|Title]])") wikipedia.output(u"x: Add the page, but do not check links to and from it") wikipedia.output(u"c: Do not add the page, but do check links") wikipedia.output(u"a: Add another page") wikipedia.output(u"l: Give a list of the pages to check")
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print("Redirect page. Will be included normally.")
|
wikipedia.output(u"Redirect page. Will be included normally.")
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print("Page does not exist; not added.")
|
wikipedia.output(u"Page does not exist; not added.")
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title()
|
wikipedia.output(u"Number of pages still to check: %s"%len(tocheck)) wikipedia.output(u"Pages to be checked:") wikipedia.output(u" - ".join(page.title() for page in tocheck)) wikipedia.output(u"==%s=="%pl.title())
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print("==%s==")%pl.title()
|
wikipedia.output(u"==%s=="%pl.title())
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
wikipedia.output(pl.get(get_redirect=True)[0:ctoshow])
|
wikipedia.output(u''+pl.get(get_redirect=True)[0:ctoshow])
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print "Page does not exist."
|
wikipedia.output(u"Page does not exist.")
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print("Not understood.")
|
wikipedia.output(u"Not understood.")
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
workingcat = catlib.Category(mysite,workingcatname)
|
workingcat = catlib.Category(mysite,mysite.category_namespace()+':'+workingcatname)
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
print answer
|
wikipedia.output(u''+answer)
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
if not checkbackward:
|
if not checkbroken:
|
def asktoadd(pl): ctoshow = 500 print print("==%s==")%pl.title() while 1: answer = raw_input("y(es)/n(o)/i(gnore)/(o)ther options? ") if answer=='y': include(pl) break if answer=='c': include(pl,realinclude=False) break if answer=='z': if pl.exists(): if not pl.isRedirectPage(): linkterm = wikipedia.input(u"In what manner should it be alphabetized?") include(pl,linkterm=linkterm) break include(pl) break elif answer=='n': exclude(pl) break elif answer=='i': exclude(pl,real_exclude=False) break elif answer=='o': print("t: Give the beginning of the text of the page") print("z: Add under another title (as [[Category|Title]])") print("x: Add the page, but do not check links to and from it") print("c: Do not add the page, but do check links") print("a: Add another page") print("l: Give a list of the pages to check") elif answer=='a': pagetitle = raw_input("Specify page to add:") page=wikipedia.Page(wikipedia.getSite(),pagetitle) if not page in checked.keys(): include(page) elif answer=='x': if pl.exists(): if pl.isRedirectPage(): print("Redirect page. Will be included normally.") include(pl,realinclude=False) else: include(pl,checklinks=False) else: print("Page does not exist; not added.") exclude(pl,real_exclude=False) break elif answer=='l': print("Number of pages still to check: %s")%len(tocheck) print("Pages to be checked:") print tocheck print("==%s==")%pl.title() elif answer=='t': print("==%s==")%pl.title() try: wikipedia.output(pl.get(get_redirect=True)[0:ctoshow]) except wikipedia.NoPage: print "Page does not exist." ctoshow += 500 else: print("Not understood.")
|
self.seriouslangs = biglangs4
|
self.seriouslangs = self.biglangs4
|
def __init__(self): family.Family.__init__(self) self.name = 'wiktionary' self.langs = { 'minnan':'zh-min-nan.wiktionary.org', 'nb':'no.wiktionary.org', 'zh-cn':'zh.wiktionary.org', 'zh-tw':'zh.wiktionary.org' } for lang in self.knownlanguages: self.langs[lang] = lang+'.wiktionary.org' # Most namespaces are inherited from family.Family. self.namespaces[4] = { '_default': u'Wiktionary', } self.namespaces[5] = { '_default': u'Wiktionary talk', 'de': u'Wiktionary Diskussion', 'pt': u'Wiktionary Discussão', 'es': u'Wiktionary Discusión', }
|
if replaceit or new_page_title == link_text:
|
if replaceit and trailing_chars: newlink = "[[%s]]%s" % (new_page_title, trailing_chars) elif new_page_title == link_text or replaceit:
|
def treat(refpl, thispl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: wikipedia.output(u'%s is a redirect to %s' % (refpl.linkname(), thispl.linkname())) choice = wikipedia.input(u'Do you want to work on pages linking to %s? [y|N]' % refpl.linkname()) if choice == 'y': for ref_redir in getReferences(refpl): refpl_redir=wikipedia.PageLink(wikipedia.mylang, ref_redir) treat(refpl_redir, refpl) choice2 = wikipedia.input(u'Do you want to make redirect %s point to %s? [y|N]' % (refpl.linkname(), target)) if choice2 == 'y': redir_text = '#REDIRECT [[%s]]' % target refpl.put(redir_text) else: n = 0 curpos = 0 while 1: m=linkR.search(reftxt, pos = curpos) if not m: if n == 0: wikipedia.output(u"Not found in %s:%s" % (refpl.code(), refpl.linkname())) elif not debug: refpl.put(reftxt) return True # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): linkpl = None else: linkpl=wikipedia.PageLink(thispl.code(), m.group(1), incode = refpl.code()) # Check whether the link found is to thispl. if linkpl != thispl: continue
|
self.inPlace = inPlace
|
def __init__(self, oldCatTitle, newCatTitle, batchMode = False, editSummary = '', inPlace = False, moveCatPage = True): self.editSummary = editSummary self.inPlace = inPlace self.oldCat = catlib.Category(wikipedia.getSite(), 'Category:' + oldCatTitle) self.newCatTitle = newCatTitle self.inPlace = inPlace self.moveCatPage = moveCatPage # set edit summary message
|
|
catlib.change_category(subcategory, self.oldCat, newCat, inPlace=inPlace)
|
catlib.change_category(subcategory, self.oldCat, newCat, inPlace=self.inPlace)
|
def run(self): newCat = catlib.Category(wikipedia.getSite(), 'Category:' + self.newCatTitle) gen = pagegenerators.CategorizedPageGenerator(self.oldCat, recurse = False) preloadingGen = pagegenerators.PreloadingGenerator(gen) for article in preloadingGen: catlib.change_category(article, self.oldCat, newCat, inPlace=self.inPlace) # TODO: create subcategory generator subcategories = self.oldCat.subcategories(recurse = 0) if len(subcategories) == 0: wikipedia.output(u'There are no subcategories in category ' + self.oldCat.title()) else: for subcategory in subcategories: catlib.change_category(subcategory, self.oldCat, newCat, inPlace=inPlace) if self.oldCat.exists() and self.moveCatPage: # try to copy page contents to new cat page if self.oldCat.copyAndKeep(self.newCatTitle, wikipedia.translate(wikipedia.getSite(), cfd_templates)): if self.oldCat.isEmpty(): reason = wikipedia.translate(wikipedia.getSite(), deletion_reason_move) % self.newCatTitle if batchMode == True: self.oldCat.delete(reason, False) else: self.oldCat.delete(reason, True) else: wikipedia.output('Couldn\'t copy contents of %s because %s already exists.' % (self.oldCatTitle, self.newCatTitle))
|
redirR = re.compile('
|
redirR = wikipedia.redirectRe(wikipedia.mylang)
|
def get_redirects_from_dump(sqlfilename): dict = {} # open sql dump and read page titles out of it dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) # case-insensitive regular expression which takes a redirect's text # (that is: '#REDIRECT [[bla]]') and extracts its target (that is: bla). redirR = re.compile('#REDIRECT[^\[]*\[\[([^\]]+)\]\]', re.IGNORECASE) for entry in dump.entries(): if entry.redirect == '1': m = redirR.search(entry.text) if m == None: # NOTE: due to a MediaWiki bug, many articles are falsely marked with the # redirect flag, so this warning will eventually show up several times. # Ask a MediaWiki developer to fix the SQL database. print 'WARNING: can\'t extract the target of redirect %s, ignoring' % (entry.full_title()) else: target = m.group(1) target = target.replace(' ', '_') dict[entry.full_title()] = target return dict
|
if entry.redirect == '1':
|
if entry.redirect:
|
def get_redirects_from_dump(sqlfilename): dict = {} # open sql dump and read page titles out of it dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) # case-insensitive regular expression which takes a redirect's text # (that is: '#REDIRECT [[bla]]') and extracts its target (that is: bla). redirR = re.compile('#REDIRECT[^\[]*\[\[([^\]]+)\]\]', re.IGNORECASE) for entry in dump.entries(): if entry.redirect == '1': m = redirR.search(entry.text) if m == None: # NOTE: due to a MediaWiki bug, many articles are falsely marked with the # redirect flag, so this warning will eventually show up several times. # Ask a MediaWiki developer to fix the SQL database. print 'WARNING: can\'t extract the target of redirect %s, ignoring' % (entry.full_title()) else: target = m.group(1) target = target.replace(' ', '_') dict[entry.full_title()] = target return dict
|
print 'NOTE: This function is not working yet. Press Enter to continue.' raw_input()
|
def retrieve_broken_redirects(source): if source == None: # retrieve information from the live wiki's maintenance page host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.maintenance_address(wikipedia.mylang, 'brokenredirects', default_limit = False) print 'Retrieving maintenance page...' maintenance_txt, charset = wikipedia.getUrl(host,url) # regular expression which finds redirects which point to a non-existing page inside the HTML Rredir = re.compile('\<li\>\<a href=\"\/w\/wiki.phtml\?title=(.*?)&redirect=no\"') redir_names = Rredir.findall(maintenance_txt) print 'Retrieved %d redirects from maintenance page.\n' % len(redir_names) for redir_name in redir_names: yield redir_name else: print 'NOTE: This function is not working yet. Press Enter to continue.' raw_input() print 'Step 1: Getting a list of all redirects' redirs = get_redirects_from_dump(sqlfilename) print 'Step 2: Getting a list of all page titles' dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) pagetitles = [] for entry in dump.entries(): print entry.full_title() pagetitles.append(entry.full_title()) print 'Step 3: Comparing. This might take a while (or two).' brokenredirs = [] for (key, value) in redirs.popitem(): print key if not value in pagetitles: brokenredirs.append(key) print brokenredirs
|
|
pagetitles = []
|
pagetitles = {}
|
def retrieve_broken_redirects(source): if source == None: # retrieve information from the live wiki's maintenance page host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.maintenance_address(wikipedia.mylang, 'brokenredirects', default_limit = False) print 'Retrieving maintenance page...' maintenance_txt, charset = wikipedia.getUrl(host,url) # regular expression which finds redirects which point to a non-existing page inside the HTML Rredir = re.compile('\<li\>\<a href=\"\/w\/wiki.phtml\?title=(.*?)&redirect=no\"') redir_names = Rredir.findall(maintenance_txt) print 'Retrieved %d redirects from maintenance page.\n' % len(redir_names) for redir_name in redir_names: yield redir_name else: print 'NOTE: This function is not working yet. Press Enter to continue.' raw_input() print 'Step 1: Getting a list of all redirects' redirs = get_redirects_from_dump(sqlfilename) print 'Step 2: Getting a list of all page titles' dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) pagetitles = [] for entry in dump.entries(): print entry.full_title() pagetitles.append(entry.full_title()) print 'Step 3: Comparing. This might take a while (or two).' brokenredirs = [] for (key, value) in redirs.popitem(): print key if not value in pagetitles: brokenredirs.append(key) print brokenredirs
|
print entry.full_title() pagetitles.append(entry.full_title()) print 'Step 3: Comparing. This might take a while (or two).'
|
pagetitles[entry.full_title()] = None print 'Step 3: Comparing.'
|
def retrieve_broken_redirects(source): if source == None: # retrieve information from the live wiki's maintenance page host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.maintenance_address(wikipedia.mylang, 'brokenredirects', default_limit = False) print 'Retrieving maintenance page...' maintenance_txt, charset = wikipedia.getUrl(host,url) # regular expression which finds redirects which point to a non-existing page inside the HTML Rredir = re.compile('\<li\>\<a href=\"\/w\/wiki.phtml\?title=(.*?)&redirect=no\"') redir_names = Rredir.findall(maintenance_txt) print 'Retrieved %d redirects from maintenance page.\n' % len(redir_names) for redir_name in redir_names: yield redir_name else: print 'NOTE: This function is not working yet. Press Enter to continue.' raw_input() print 'Step 1: Getting a list of all redirects' redirs = get_redirects_from_dump(sqlfilename) print 'Step 2: Getting a list of all page titles' dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) pagetitles = [] for entry in dump.entries(): print entry.full_title() pagetitles.append(entry.full_title()) print 'Step 3: Comparing. This might take a while (or two).' brokenredirs = [] for (key, value) in redirs.popitem(): print key if not value in pagetitles: brokenredirs.append(key) print brokenredirs
|
for (key, value) in redirs.popitem(): print key if not value in pagetitles: brokenredirs.append(key) print brokenredirs
|
for (key, value) in redirs.iteritems(): if not pagetitles.has_key(value): yield key
|
def retrieve_broken_redirects(source): if source == None: # retrieve information from the live wiki's maintenance page host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.maintenance_address(wikipedia.mylang, 'brokenredirects', default_limit = False) print 'Retrieving maintenance page...' maintenance_txt, charset = wikipedia.getUrl(host,url) # regular expression which finds redirects which point to a non-existing page inside the HTML Rredir = re.compile('\<li\>\<a href=\"\/w\/wiki.phtml\?title=(.*?)&redirect=no\"') redir_names = Rredir.findall(maintenance_txt) print 'Retrieved %d redirects from maintenance page.\n' % len(redir_names) for redir_name in redir_names: yield redir_name else: print 'NOTE: This function is not working yet. Press Enter to continue.' raw_input() print 'Step 1: Getting a list of all redirects' redirs = get_redirects_from_dump(sqlfilename) print 'Step 2: Getting a list of all page titles' dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) pagetitles = [] for entry in dump.entries(): print entry.full_title() pagetitles.append(entry.full_title()) print 'Step 3: Comparing. This might take a while (or two).' brokenredirs = [] for (key, value) in redirs.popitem(): print key if not value in pagetitles: brokenredirs.append(key) print brokenredirs
|
except (wikipedia.IsNotRedirectPage, wikipedia.NoPage): wikipedia.output('%s doesn\'t exist or is not a redirect.')
|
except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % redir_page.linkname()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % redir_page.linkname())
|
def delete_broken_redirects(source): # get reason for deletion text reason = reason_broken[wikipedia.chooselang(wikipedia.mylang, reason_broken)] for redir_name in retrieve_broken_redirects(source): redir_page = wikipedia.PageLink(wikipedia.mylang, redir_name) try: target_page = redir_page.getRedirectTo() except (wikipedia.IsNotRedirectPage, wikipedia.NoPage): wikipedia.output('%s doesn\'t exist or is not a redirect.') else: try: target_name = str(redir_page.getRedirectTo()) target_page = wikipedia.PageLink(wikipedia.mylang, target_name) target_page.get() except wikipedia.NoPage: #wikipedia.output('Deleting %s...' % redir_page.linkname()) wikipedia.deletePage(redir_page, reason, prompt = False) except wikipedia.IsRedirectPage(): wikipedia.output('Redirect target is also a redirect! Won\'t delete anything.') else: wikipedia.output('Redirect target does exist! Won\'t delete anything.') # idle for 1 minute print '' wikipedia.put_throttle()
|
except wikipedia.IsRedirectPage(): wikipedia.output('Redirect target is also a redirect! Won\'t delete anything.')
|
except wikipedia.IsRedirectPage: wikipedia.output(u'Redirect target is also a redirect! Won\'t delete anything.')
|
def delete_broken_redirects(source): # get reason for deletion text reason = reason_broken[wikipedia.chooselang(wikipedia.mylang, reason_broken)] for redir_name in retrieve_broken_redirects(source): redir_page = wikipedia.PageLink(wikipedia.mylang, redir_name) try: target_page = redir_page.getRedirectTo() except (wikipedia.IsNotRedirectPage, wikipedia.NoPage): wikipedia.output('%s doesn\'t exist or is not a redirect.') else: try: target_name = str(redir_page.getRedirectTo()) target_page = wikipedia.PageLink(wikipedia.mylang, target_name) target_page.get() except wikipedia.NoPage: #wikipedia.output('Deleting %s...' % redir_page.linkname()) wikipedia.deletePage(redir_page, reason, prompt = False) except wikipedia.IsRedirectPage(): wikipedia.output('Redirect target is also a redirect! Won\'t delete anything.') else: wikipedia.output('Redirect target does exist! Won\'t delete anything.') # idle for 1 minute print '' wikipedia.put_throttle()
|
wikipedia.output('Redirect target does exist! Won\'t delete anything.')
|
wikipedia.output(u'Redirect target does exist! Won\'t delete anything.')
|
def delete_broken_redirects(source): # get reason for deletion text reason = reason_broken[wikipedia.chooselang(wikipedia.mylang, reason_broken)] for redir_name in retrieve_broken_redirects(source): redir_page = wikipedia.PageLink(wikipedia.mylang, redir_name) try: target_page = redir_page.getRedirectTo() except (wikipedia.IsNotRedirectPage, wikipedia.NoPage): wikipedia.output('%s doesn\'t exist or is not a redirect.') else: try: target_name = str(redir_page.getRedirectTo()) target_page = wikipedia.PageLink(wikipedia.mylang, target_name) target_page.get() except wikipedia.NoPage: #wikipedia.output('Deleting %s...' % redir_page.linkname()) wikipedia.deletePage(redir_page, reason, prompt = False) except wikipedia.IsRedirectPage(): wikipedia.output('Redirect target is also a redirect! Won\'t delete anything.') else: wikipedia.output('Redirect target does exist! Won\'t delete anything.') # idle for 1 minute print '' wikipedia.put_throttle()
|
second_redir = wikipedia.PageLink(wikipedia.mylang, target) second_target = str(second_redir.getRedirectTo()) except (wikipedia.IsNotRedirectPage, wikipedia.NoPage): print 'The specified page is not a double redirect.\n' continue txt = " redir.put(txt) print ''
|
except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % redir.linkname()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % redir.linkname()) except wikipedia.LockedPage: wikipedia.output(u'%s is locked, skipping.' % redir.linkname()) else: try: second_redir = wikipedia.PageLink(wikipedia.mylang, target) second_target = str(second_redir.getRedirectTo()) except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % second_redir.linkname()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % second_redir.linkname()) else: txt = " redir.put(txt)
|
def fix_double_redirects(source): for redir_name in retrieve_double_redirects(source): redir = wikipedia.PageLink(wikipedia.mylang, redir_name) try: target = str(redir.getRedirectTo()) second_redir = wikipedia.PageLink(wikipedia.mylang, target) second_target = str(second_redir.getRedirectTo()) except (wikipedia.IsNotRedirectPage, wikipedia.NoPage): print 'The specified page is not a double redirect.\n' continue txt = "#REDIRECT [[%s]]" % second_target redir.put(txt) print ''
|
sqlfilename = wikipedia.input('Please enter the SQL dump\'s filename: ')
|
sqlfilename = wikipedia.input(u'Please enter the SQL dump\'s filename: ')
|
def fix_double_redirects(source): for redir_name in retrieve_double_redirects(source): redir = wikipedia.PageLink(wikipedia.mylang, redir_name) try: target = str(redir.getRedirectTo()) second_redir = wikipedia.PageLink(wikipedia.mylang, target) second_target = str(second_redir.getRedirectTo()) except (wikipedia.IsNotRedirectPage, wikipedia.NoPage): print 'The specified page is not a double redirect.\n' continue txt = "#REDIRECT [[%s]]" % second_target redir.put(txt) print ''
|
password = getpass.getpass(u'password: ')
|
password = getpass.getpass('password: ')
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
colors += [color[lastline[0]] for c in lastline] + [None]
|
lastcolors = [None for c in lastline] lastcolors[0] = color[lastline[0]] colors += lastcolors + [None]
|
def showDiff(oldtext, newtext): """ Prints a string showing the differences between oldtext and newtext. The differences are highlighted (only on Unix systems) to show which changes were made. """ # For information on difflib, see http://pydoc.org/2.3/difflib.html color = { '+': 10, # green '-': 12 # red } diff = '' colors = [] # This will store the last line beginning with + or -. lastline = None # For testing purposes only: show original, uncolored diff # for line in difflib.ndiff(oldtext.splitlines(), newtext.splitlines()): # print line for line in difflib.ndiff(oldtext.splitlines(), newtext.splitlines()): if line.startswith('?'): # initialize color vector with None, which means default color lastcolors = [None for c in lastline] # colorize the + or - sign lastcolors[0] = color[lastline[0]] # colorize changed parts in red or green for i in range(len(line)): if line[i] != ' ': lastcolors[i] = color[lastline[0]] diff += lastline + '\n' # append one None (default color) for the newline character colors += lastcolors + [None] elif lastline: diff += lastline + '\n' # colorize the entire line in red or green colors += [color[lastline[0]] for c in lastline] + [None] lastline = None if line[0] in ('+', '-'): lastline = line output(diff, colors = colors)
|
sysop = (not self.editRestriction)
|
sysop = not not self.editRestriction
|
def put(self, newtext, comment=None, watchArticle = None, minorEdit = True): """Replace the new page with the contents of the first argument. The second argument is a string that is to be used as the summary for the modification
|
return x
|
return x, isWatched
|
def getPage(site, name, get_edit_page = True, read_only = False, do_quote = True, get_redirect=False, throttle = True): """ Get the contents of page 'name' from the 'site' wiki Do not use this directly; for 99% of the possible ideas you can use the Page object instead. Arguments: site - the wiki site name - the page name get_edit_page - If true, gets the edit page, otherwise gets the normal page. read_only - If true, doesn't raise LockedPage exceptions. do_quote - ??? (TODO: what is this for?) get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text if get_edit_page is True; otherwise it returns a unicode string containing the entire page's HTML code. """ host = site.hostname() name = re.sub(' ', '_', name) output(url2unicode(u'Getting page %s' % site.linkto(name), site = site)) # A heuristic to encode the URL into %XX for characters that are not # allowed in a URL. if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if get_edit_page: address = site.edit_address(name) else: address = site.get_address(name) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() text, charset = getUrl(host, address, site) get_throttle.setDelay(time.time() - starttime)\ # Extract the actual text from the textedit field if charset is None: print "WARNING: No character set found" else: # Store character set for later reference site.checkCharset(charset) if get_edit_page: # Looking for the token R = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = R.search(text) if tokenloc: site.puttoken(tokenloc.group(1)) elif not site.getToken(getalways = False): site.puttoken('') if not read_only: # check if we're logged in p=re.compile('userlogin') if p.search(text) != None: output(u'Warning: You\'re probably not logged in on %s:' % repr(site)) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: edittime[repr(site), link2url(name, site = site)] = "0" try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %d minutes..." % (host, address, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(site, name) m = redirectRe(site).match(text[i1:i2]) if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (url2unicode(name, site = site), unicode(m.group(1), site.encoding()))) raise IsRedirectPage(m.group(1)) if edittime[repr(site), link2url(name, site = site)] == "0" and not read_only: print "DBG> page may be locked?!" raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] else: x = text # If not editing # Convert to a unicode string. If there's invalid unicode data inside # the page, replace it with question marks. x = unicode(x, charset, errors = 'replace') return x
|
wikipedia.output(u"* %s" % cat.aslink())
|
wikipedia.output(u"* %s" % pl.aslink())
|
def add_category(sort_by_last_name = False): ''' A robot to mass-add a category to a list of pages. ''' print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input(u'Wikipedia page with list of pages to change:') if listpage: try: pl = wikipedia.PageLink(wikipedia.getSite(), listpage) except NoPage: wikipedia.output(u'The page ' + listpage + ' could not be loaded from the server.') sys.exit() pagenames = pl.links() else: refpage = wikipedia.input(u'Wikipedia page that is now linked to:') pl = wikipedia.PageLink(wikipedia.getSite(), refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input(u'Category to add (do not give namespace):') newcat = newcat[:1].capitalize() + newcat[1:] # get edit summary message wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg_add) % newcat) cat_namespace = wikipedia.getSite().category_namespaces()[0] answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.getSite(), nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input(u'%s [y/n/a(ll)]:' % (pl2.aslink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input(u'This should be used if and only if you are sure that your links are correct! Are you sure? [y/n]:') if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: wikipedia.output(u"%s doesn't exist yet. Ignoring."%(pl2.aslocallink())) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.getSite(),arg.args[0]) wikipedia.output(u"WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink())) else: wikipedia.output(u"Current categories:") for curpl in cats: wikipedia.output(u"* %s" % cat.aslink()) catpl = wikipedia.PageLink(wikipedia.getSite(), cat_namespace + ':' + newcat) if sort_by_last_name: catpl = sorted_by_last_name(catpl, pl2) if catpl in cats: wikipedia.output(u"%s already has %s"%(pl2.aslocallink(), catpl.aslocallink())) else: wikipedia.output(u'Adding %s' % catpl.aslocallink()) cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text)
|
print 'WARNING: can\'t extract the target of redirect %s, ignoring' % (entry.full_title())
|
wikipedia.output(u'WARNING: can\'t extract the target of redirect %s, ignoring' % (entry.full_title()))
|
def get_redirects_from_dump(sqlfilename): ''' Loads a local sql dump file, looks at all pages which have the redirect flag set, and finds out where they're pointing at. Returns a dictionary where the redirect names are the keys and the redirect targets are the values. NOTE: if the redirect isn't in the main namespace, the returned key will be prefixed by the default namespace identifiers. See full_title() in dump.py. ''' dict = {} # open sql dump and read page titles out of it dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) redirR = wikipedia.redirectRe(wikipedia.getSite()) for entry in dump.entries(): if entry.redirect: m = redirR.search(entry.text) if m == None: # NOTE: due to a MediaWiki bug, many articles are falsely marked with the # redirect flag, so this warning will eventually show up several times. # Ask a MediaWiki developer to fix the SQL database. print 'WARNING: can\'t extract the target of redirect %s, ignoring' % (entry.full_title()) else: target = m.group(1) # There might be redirects to another wiki. Ignore these. for code in wikipedia.getSite().family.langs.keys(): if target.startswith(code + ':'): # TODO: doesn't seem to work wikipedia.output(u'NOTE: Ignoring %s which is a redirect to %s:' % (entry.full_title(), code)) target = None break # if the redirect does not link to another wiki if target: target = target.replace(' ', '_') # remove leading and trailing whitespace target = target.strip() # capitalize the first letter if not wikipedia.getSite().nocapitalize: target = target[0].upper() + target[1:] if target.find('#') != -1: target = target[:target.index('#')] if target.find('|') != -1: wikipedia.output(u'HINT: %s is a redirect with a pipelink.' % entry.full_title()) target = target[:target.index('|')] dict[entry.full_title()] = target return dict
|
redir.put(txt)
|
status, reason, data = redir.put(txt) print status, reason
|
def fix_double_redirects(source): mysite = wikipedia.getSite() for redir_name in retrieve_double_redirects(source): print '' redir = wikipedia.PageLink(mysite, redir_name) try: target = redir.getRedirectTo() except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % redir.linkname()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % redir.linkname()) except wikipedia.LockedPage: wikipedia.output(u'%s is locked, skipping.' % redir.linkname()) else: try: second_redir = wikipedia.PageLink(mysite, target) second_target = second_redir.getRedirectTo(read_only = True) except wikipedia.IsNotRedirectPage: wikipedia.output(u'%s is not a redirect.' % second_redir.linkname()) except wikipedia.NoPage: wikipedia.output(u'%s doesn\'t exist.' % second_redir.linkname()) else: txt = "#REDIRECT [[%s]]" % second_target redir.put(txt)
|
wikipedia.output(u"Killing remaining %i threads, please wait..." % (threading.activeCount() - 2))
|
wikipedia.output(u'Remaining %i thread will be killed.' % (threading.activeCount() - 2)) wikipedia.output(u'Saving history...') bot.history.save()
|
def main(): start = u'!' pageTitle = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'weblinkchecker') if arg: if arg.startswith('-start:'): start = arg[7:] else: pageTitle.append(arg) if pageTitle == []: gen = pagegenerators.AllpagesPageGenerator(start) else: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) gen = iter([page]) gen = pagegenerators.PreloadingGenerator(gen, pageNumber = 240) gen = pagegenerators.RedirectFilterPageGenerator(gen) bot = WeblinkCheckerRobot(gen) try: bot.run() finally: waitTime = 0 # Don't wait longer than 30 seconds for threads to finish. while threading.activeCount() > 2 and waitTime < 30: wikipedia.output(u"Waiting for remaining %i threads to finish, please wait..." % (threading.activeCount() - 2)) # don't count the main thread and report thread # wait 1 second time.sleep(1) waitTime += 1 if threading.activeCount() > 2: wikipedia.output(u"Killing remaining %i threads, please wait..." % (threading.activeCount() - 2)) # Threads will die automatically because they are daemonic. But the # killing might lag, so we wait some time. Also, we'll wait until # the report thread is shut down. if bot.history.reportThread: bot.history.reportThread.shutdown() while bot.history.reportThread.isAlive(): time.sleep(0.1) bot.history.save()
|
while bot.history.reportThread.isAlive(): time.sleep(0.1) bot.history.save()
|
while bot.history.reportThread.isAlive(): time.sleep(0.1)
|
def main(): start = u'!' pageTitle = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'weblinkchecker') if arg: if arg.startswith('-start:'): start = arg[7:] else: pageTitle.append(arg) if pageTitle == []: gen = pagegenerators.AllpagesPageGenerator(start) else: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) gen = iter([page]) gen = pagegenerators.PreloadingGenerator(gen, pageNumber = 240) gen = pagegenerators.RedirectFilterPageGenerator(gen) bot = WeblinkCheckerRobot(gen) try: bot.run() finally: waitTime = 0 # Don't wait longer than 30 seconds for threads to finish. while threading.activeCount() > 2 and waitTime < 30: wikipedia.output(u"Waiting for remaining %i threads to finish, please wait..." % (threading.activeCount() - 2)) # don't count the main thread and report thread # wait 1 second time.sleep(1) waitTime += 1 if threading.activeCount() > 2: wikipedia.output(u"Killing remaining %i threads, please wait..." % (threading.activeCount() - 2)) # Threads will die automatically because they are daemonic. But the # killing might lag, so we wait some time. Also, we'll wait until # the report thread is shut down. if bot.history.reportThread: bot.history.reportThread.shutdown() while bot.history.reportThread.isAlive(): time.sleep(0.1) bot.history.save()
|
if old[site] != new[site]:
|
if site not in new: removing.append(site) elif old[site] != new[site]:
|
def compareLanguages(old, new): removing = [] adding = [] modifying = [] mysite = wikipedia.getSite() for site in old.keys(): if old[site] != new[site]: modifying.append(site) for site2 in new.keys(): if site2 not in old: adding.append(site2) s = "" if adding: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[0]) + ",".join([x.lang for x in adding]) if removing: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[1]) + ",".join([x.lang for x in removing]) if modifying: s = s + " %s:" % (wikipedia.translate(mysite.lang, msg)[2]) + ",".join([x.lang for x in modifying]) return s,removing
|
linkR = re.compile(r'http[s]?://[^\]\s<>}"]*?[^\]\s\)\.:;,<>}"](?=\'\')|http[s]?://[^\]\s<>}"]*[^\]\s\)\.:;,<>}"]')
|
linkR = re.compile(r'http[s]?://[^\]\s<>}"]*?[^\]\s\)\.:;,<>}\|"](?=\'\')|http[s]?://[^\]\s<>}"]*[^\]\s\)\.:;,<>}"\|]')
|
def checkLinksIn(self, page): try: text = page.get() except wikipedia.NoPage: wikipedia.output(u'%s does not exist.' % page.title()) return # RFC 2396 says that URLs may only contain certain characters. # For this regex we also accept non-allowed characters, so that the bot # will later show these links as broken ('Non-ASCII Characters in URL'). # Note: While allowing parenthesis inside URLs, MediaWiki will regard # right parenthesis at the end of the URL as not part of that URL. # The same applies to dot, comma, colon and some other characters. # So characters inside the URL can be anything except whitespace, # closing squared brackets, quotation marks, greater than and less # than, and the last character also can't be parenthesis or another # character disallowed by MediaWiki. # MediaWiki allows closing curly braces inside links, but such braces # often come from templates where URLs are parameters, so as a # workaround we won't allow them inside links here. # The first half of this regular expression is required because '' is # not allowed inside links. linkR = re.compile(r'http[s]?://[^\]\s<>}"]*?[^\]\s\)\.:;,<>}"](?=\'\')|http[s]?://[^\]\s<>}"]*[^\]\s\)\.:;,<>}"]') # Remove HTML comments in URLs as well as URLs in HTML comments. # Also remove text inside nowiki links text = re.sub('(?s)<nowiki>.*?</nowiki>|<!--.*?-->', '', text) urls = linkR.findall(text) for url in urls: ignoreUrl = False for ignoreR in ignorelist: if ignoreR.match(url): ignoreUrl = True if not ignoreUrl: # Limit the number of threads started at the same time. Each # thread will check one page, then die. while threading.activeCount() >= config.max_external_links: # wait 100 ms time.sleep(0.1) thread = LinkCheckThread(page, url, self.history) # thread dies when program terminates thread.setDaemon(True) thread.start()
|
source = None
|
xmlfilename = None textfilename = None
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False source = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'table2wiki') if arg: if arg.startswith('-file:'): f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: articles.append(m.group(1)) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close() elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-skip:'): articles = articles[articles.index(arg[6:]):] elif arg.startswith('-auto'): config.table2wikiAskOnlyWarnings = True config.table2wikiSkipWarnings = True print "Automatic mode!\n" elif arg.startswith('-quiet'): quietMode = True elif arg.startswith('-debug'): debug = True else: page_title.append(arg) if source == 'xmldump': gen = pagegenerators.PreloadingGenerator(TableXmlDumpPageGenerator(xmlfilename)) # if the page is given as a command line argument, # connect the title's parts with spaces elif page_title != []: page_title = ' '.join(page_title) page = wikipedia.Page(wikipedia.getSite(), page_title) gen = pagegenerators.PreloadingGenerator(iter([page])) else: # show help wikipedia.output(__doc__, 'utf-8') sys.exit(0) bot = Table2WikiRobot(gen, debug, quietMode) bot.run()
|
f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: articles.append(m.group(1)) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close()
|
if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the textfile\'s name:') else: textfilename = arg[6:]
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False source = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'table2wiki') if arg: if arg.startswith('-file:'): f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: articles.append(m.group(1)) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close() elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-skip:'): articles = articles[articles.index(arg[6:]):] elif arg.startswith('-auto'): config.table2wikiAskOnlyWarnings = True config.table2wikiSkipWarnings = True print "Automatic mode!\n" elif arg.startswith('-quiet'): quietMode = True elif arg.startswith('-debug'): debug = True else: page_title.append(arg) if source == 'xmldump': gen = pagegenerators.PreloadingGenerator(TableXmlDumpPageGenerator(xmlfilename)) # if the page is given as a command line argument, # connect the title's parts with spaces elif page_title != []: page_title = ' '.join(page_title) page = wikipedia.Page(wikipedia.getSite(), page_title) gen = pagegenerators.PreloadingGenerator(iter([page])) else: # show help wikipedia.output(__doc__, 'utf-8') sys.exit(0) bot = Table2WikiRobot(gen, debug, quietMode) bot.run()
|
source = 'xmldump'
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False source = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'table2wiki') if arg: if arg.startswith('-file:'): f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: articles.append(m.group(1)) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close() elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-skip:'): articles = articles[articles.index(arg[6:]):] elif arg.startswith('-auto'): config.table2wikiAskOnlyWarnings = True config.table2wikiSkipWarnings = True print "Automatic mode!\n" elif arg.startswith('-quiet'): quietMode = True elif arg.startswith('-debug'): debug = True else: page_title.append(arg) if source == 'xmldump': gen = pagegenerators.PreloadingGenerator(TableXmlDumpPageGenerator(xmlfilename)) # if the page is given as a command line argument, # connect the title's parts with spaces elif page_title != []: page_title = ' '.join(page_title) page = wikipedia.Page(wikipedia.getSite(), page_title) gen = pagegenerators.PreloadingGenerator(iter([page])) else: # show help wikipedia.output(__doc__, 'utf-8') sys.exit(0) bot = Table2WikiRobot(gen, debug, quietMode) bot.run()
|
|
if source == 'xmldump': gen = pagegenerators.PreloadingGenerator(TableXmlDumpPageGenerator(xmlfilename))
|
if xmlfilename: gen = TableXmlDumpPageGenerator(xmlfilename) elif textfilename: gen = pagegenerators.TextfilePageGenerator(textfilename)
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False source = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'table2wiki') if arg: if arg.startswith('-file:'): f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: articles.append(m.group(1)) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close() elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-skip:'): articles = articles[articles.index(arg[6:]):] elif arg.startswith('-auto'): config.table2wikiAskOnlyWarnings = True config.table2wikiSkipWarnings = True print "Automatic mode!\n" elif arg.startswith('-quiet'): quietMode = True elif arg.startswith('-debug'): debug = True else: page_title.append(arg) if source == 'xmldump': gen = pagegenerators.PreloadingGenerator(TableXmlDumpPageGenerator(xmlfilename)) # if the page is given as a command line argument, # connect the title's parts with spaces elif page_title != []: page_title = ' '.join(page_title) page = wikipedia.Page(wikipedia.getSite(), page_title) gen = pagegenerators.PreloadingGenerator(iter([page])) else: # show help wikipedia.output(__doc__, 'utf-8') sys.exit(0) bot = Table2WikiRobot(gen, debug, quietMode) bot.run()
|
gen = pagegenerators.PreloadingGenerator(iter([page]))
|
gen = iter([page])
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False source = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'table2wiki') if arg: if arg.startswith('-file:'): f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: articles.append(m.group(1)) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close() elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-skip:'): articles = articles[articles.index(arg[6:]):] elif arg.startswith('-auto'): config.table2wikiAskOnlyWarnings = True config.table2wikiSkipWarnings = True print "Automatic mode!\n" elif arg.startswith('-quiet'): quietMode = True elif arg.startswith('-debug'): debug = True else: page_title.append(arg) if source == 'xmldump': gen = pagegenerators.PreloadingGenerator(TableXmlDumpPageGenerator(xmlfilename)) # if the page is given as a command line argument, # connect the title's parts with spaces elif page_title != []: page_title = ' '.join(page_title) page = wikipedia.Page(wikipedia.getSite(), page_title) gen = pagegenerators.PreloadingGenerator(iter([page])) else: # show help wikipedia.output(__doc__, 'utf-8') sys.exit(0) bot = Table2WikiRobot(gen, debug, quietMode) bot.run()
|
bot = Table2WikiRobot(gen, debug, quietMode)
|
preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = Table2WikiRobot(preloadingGen, debug, quietMode)
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False source = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'table2wiki') if arg: if arg.startswith('-file:'): f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: articles.append(m.group(1)) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close() elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename: ') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-skip:'): articles = articles[articles.index(arg[6:]):] elif arg.startswith('-auto'): config.table2wikiAskOnlyWarnings = True config.table2wikiSkipWarnings = True print "Automatic mode!\n" elif arg.startswith('-quiet'): quietMode = True elif arg.startswith('-debug'): debug = True else: page_title.append(arg) if source == 'xmldump': gen = pagegenerators.PreloadingGenerator(TableXmlDumpPageGenerator(xmlfilename)) # if the page is given as a command line argument, # connect the title's parts with spaces elif page_title != []: page_title = ' '.join(page_title) page = wikipedia.Page(wikipedia.getSite(), page_title) gen = pagegenerators.PreloadingGenerator(iter([page])) else: # show help wikipedia.output(__doc__, 'utf-8') sys.exit(0) bot = Table2WikiRobot(gen, debug, quietMode) bot.run()
|
s = StringIO.StringIO(data) rImage = re.compile('.*<a href="/wiki/.*?:(.*?)".*?[\n]*?.*?class="internal"') rThumb = re.compile('.*<a href="/wiki/.*?:(.*?)".*?class="image"')
|
rImage = re.compile('<a href=[\r\n]*?"/wiki/.*?:(.*?)".*?[\r\n]*?.*?class=[\r\n]*?"image"', re.MULTILINE)
|
def extractImages(data): """ takes a string with the complete HTML-file and returns the article which is contained in <div id='article'> and the pagestats which contain information on last change """ images = [] s = StringIO.StringIO(data) rImage = re.compile('.*<a href="/wiki/.*?:(.*?)".*?[\n]*?.*?class="internal"') rThumb = re.compile('.*<a href="/wiki/.*?:(.*?)".*?class="image"') last = "" for line in s: img = rImage.match(line) try: path = md5.new(html2txt(img.group(1))).hexdigest() if path != last: images.append( {'image':img.group(1), 'path': str(path[0])+"/"+str(path[0:2])+"/"}) last = path except: pass img = rThumb.match(line) try: path = md5.new(html2txt(img.group(1))).hexdigest() if path != last: images.append( {'image':img.group(1), 'path': str(path[0])+"/"+str(path[0:2])+"/"}) last = path except: pass images.sort() return images
|
for line in s: img = rImage.match(line) try: path = md5.new(html2txt(img.group(1))).hexdigest() if path != last: images.append( {'image':img.group(1), 'path': str(path[0])+"/"+str(path[0:2])+"/"}) last = path except: pass img = rThumb.match(line) try: path = md5.new(html2txt(img.group(1))).hexdigest() if path != last: images.append( {'image':img.group(1), 'path': str(path[0])+"/"+str(path[0:2])+"/"}) last = path except: pass
|
img = rImage.findall(data) print "Bilder: ", img for image in img: path = md5.new(html2txt(image)).hexdigest() images.append( {'image': image, 'path' : str(path[0])+"/"+str(path[0:2])+"/"})
|
def extractImages(data): """ takes a string with the complete HTML-file and returns the article which is contained in <div id='article'> and the pagestats which contain information on last change """ images = [] s = StringIO.StringIO(data) rImage = re.compile('.*<a href="/wiki/.*?:(.*?)".*?[\n]*?.*?class="internal"') rThumb = re.compile('.*<a href="/wiki/.*?:(.*?)".*?class="image"') last = "" for line in s: img = rImage.match(line) try: path = md5.new(html2txt(img.group(1))).hexdigest() if path != last: images.append( {'image':img.group(1), 'path': str(path[0])+"/"+str(path[0:2])+"/"}) last = path except: pass img = rThumb.match(line) try: path = md5.new(html2txt(img.group(1))).hexdigest() if path != last: images.append( {'image':img.group(1), 'path': str(path[0])+"/"+str(path[0:2])+"/"}) last = path except: pass images.sort() return images
|
try: main() conn.close() except: wikipedia.stopme() raise else: wikipedia.stopme()
|
main()
|
def main(): mysite = wikipedia.getSite() sa = [] output_directory = "" save_images = False overwrite_images = False overwrite_articles = False for arg in sys.argv[1:]: if arg.startswith("-lang:"): lang = arg[6:] elif arg.startswith("-file:"): f=open(arg[6:], 'r') R=re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: sa.append(string.replace(m.group(1), " ", "_")) else: print "ERROR: Did not understand %s line:\n%s" % ( arg[6:], repr(line)) f.close() elif arg.startswith("-o:"): output_directory = arg[3:] elif arg.startswith("-images"): save_images = True elif arg.startswith("-overwrite:"): if arg[11] == "I": overwrite_images = True elif arg[11] == "A": overwrite_articles = True elif arg[11] == "B": overwrite_images = True overwrite_articles = True else: sa.append(arg.replace(" ", "_")) headers = {"Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0"} print "opening connection to ",mysite.hostname(), conn = httplib.HTTPConnection(mysite.hostname()) print " done" R = re.compile('.*/wiki/(.*)') data = "" for article in sa: filename = article.replace("/", "_") if os.path.isfile(output_directory + filename + ".txt") and overwrite_articles == False: print "skipping " + article continue data = "" ua = article while len(data) < 2: url = '/wiki/'+ ua conn.request("GET", url, "", headers) response = conn.getresponse() data = response.read() if len(data) < 2: print ua + " failed. reading", result = R.match(response.getheader("Location", )) ua = result.group(1) print ua data = extractArticle(data) f = open (output_directory + filename + ".txt", 'w') f.write (data['article'] + '\n' + data['footer']) f.close() print "saved " + article if save_images: images = extractImages(data['article']) for i in images: if overwrite_images == False and os.path.isfile(output_directory + i['image']): print "skipping existing " + i['image'] continue print 'downloading ' + i['image'], uo = wikipedia.MyURLopener() file = uo.open( "http://upload.wikimedia.org/wikipedia/" +mysite.lang + '/' + i['path'] + i['image']) content = file.read() if (len(content) < 500): uo.close() print "downloading from commons", uo = wikipedia.MyURLopener() file = uo.open( "http://commons.wikimedia.org/upload/" + i['path'] + i['image']) #print "http://commons.wikimedia.org/upload/", i['path'] , i['image'], file content = file.read() f = open(output_directory + i['image'], "wb") f.write(content) f.close() print "\t\t", (len(content)/1024), "KB done"
|
return urllib.quote(self.title())
|
encodedTitle = self.title().encode(self.site().encoding()) return urllib.quote(encodedTitle)
|
def urlname(self): """The name of the page this Page refers to, in a form suitable for the URL of the page.""" return urllib.quote(self.title())
|
edittime[self.site, title] = timestamp
|
edittime[self.site, pl.urlname()] = timestamp
|
def oneDone(self, title, timestamp, text): pl = Page(self.site, title) for pl2 in self.pages: if Page(self.site, pl2.sectionFreeTitle()) == pl: if not hasattr(pl2,'_contents') and not hasattr(pl2,'_getexception'): break else: print repr(title) print repr(pl) print repr(self.pages) print "BUG: page not found in list"
|
if watchArticle and watchArticle != '0':
|
if watchArticle:
|
def putPage(site, name, text, comment = None, watchArticle = False, minorEdit = True, newPage = False, token = None, gettoken = False): """Upload 'text' on page 'name' to the 'site' wiki. Use of this routine can normally be avoided; use Page.put instead. """ safetuple = () # safetuple keeps the old value, but only if we did not get a token yet could if site.version() >= "1.4": if gettoken or not token: token = site.getToken(getagain = gettoken) else: safetuple = (site,name,text,comment,watchArticle,minorEdit,newPage) # Check whether we are not too quickly after the previous putPage, and # wait a bit until the interval is acceptable put_throttle() # Which web-site host are we submitting to? host = site.hostname() # Get the address of the page on that host. address = site.put_address(space2underline(name)) # If no comment is given for the change, use the default if comment is None: comment=action # Use the proper encoding for the comment comment = comment.encode(site.encoding()) try: # Encode the text into the right encoding for the wiki if type(text) != type(u''): print 'Warning: wikipedia.putPage() got non-unicode page content. Please report this.' print text text = text.encode(site.encoding()) predata = [ ('wpSave', '1'), ('wpSummary', comment), ('wpTextbox1', text)] # Except if the page is new, we need to supply the time of the # previous version to the wiki to prevent edit collisions if newPage: predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', edittime[site, name])) # Pass the minorEdit and watchArticle arguments to the Wiki. if minorEdit and minorEdit != '0': predata.append(('wpMinoredit', '1')) if watchArticle and watchArticle != '0': predata.append(('wpWatchthis', '1')) # Give the token, but only if one is supplied. if token: predata.append(('wpEditToken', token)) # Encode all of this into a HTTP request data = urlencode(tuple(predata)) except KeyError: print edittime raise if newPage: output(url2unicode("Creating page [[%s:%s]]" % (site.lang, name), site = site)) else: output(url2unicode("Changing page [[%s:%s]]" % (site.lang, name), site = site)) # Submit the prepared information conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "PythonWikipediaBot/1.0") if site.cookies(): conn.putheader('Cookie',site.cookies()) conn.endheaders() conn.send(data) # Prepare the return values response = conn.getresponse() data = response.read().decode(myencoding()) conn.close() if data != u'': editconflict = mediawiki_messages.get('editconflict').replace('$1', '') if '<title>%s' % editconflict in data: raise EditConflict() elif safetuple and "<" in data: # We might have been using an outdated token print "Changing page has failed. Retrying." putPage(safetuple[0], safetuple[1], safetuple[2], comment=safetuple[3], watchArticle=safetuple[4], minorEdit=safetuple[5], newPage=safetuple[6], token=None,gettoken=True) else: output(data) return response.status, response.reason, data
|
targetTitle = self.targetSite.image_namespace() + image.title().split(':', 1)[1]
|
targetTitle = '%s:%s' % (self.targetSite.image_namespace(), image.title().split(':', 1)[1])
|
def run(self): for page in self.generator: if self.interwiki: imagelist = [] for linkedPage in page.interwiki(): imagelist += linkedPage.imagelinks(followRedirects = True) elif page.isImage(): imagelist = [page] else: imagelist = page.imagelinks(followRedirects = True)
|
for i in range(len(refs)):
|
for i in range(len(refs)-1, -1, -1):
|
def getReferences(self): refs = self.disambPl.getReferences(follow_redirects = False) wikipedia.output(u"Found %d references." % len(refs)) # Remove ignorables if ignore_title.has_key(self.disambPl.site().lang): for ig in ignore_title[self.disambPl.site().lang]: for i in range(len(refs)): if re.match(ig, refs[i].linkname()): wikipedia.output('Ignoring page %s' % refs[i].linkname()) del refs[i] wikipedia.output(u"Will work on %d pages." % len(refs)) return refs
|
if choice[0] == 'r':
|
if len(choice)>0 and choice[0] == 'r':
|
def treat(refpl, thispl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: wikipedia.output('%s is a redirect to %s' % (refpl.linkname(), thispl.linkname())) choice = wikipedia.input('Do you want to work on pages linking to %s? [y|N]' % refpl.linkname()) if choice == 'y': for ref_redir in getReferences(refpl): refpl_redir=wikipedia.PageLink(wikipedia.mylang, ref_redir) treat(refpl_redir, refpl) pass else: n = 0 curpos = 0 while 1: m=linkR.search(reftxt, pos = curpos) if not m: if n == 0: wikipedia.output("Not found in %s:%s" % (refpl.code(), refpl.linkname())) elif not debug: refpl.put(reftxt) return True # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): linkpl = None else: linkpl=wikipedia.PageLink(thispl.code(), m.group(1), incode = refpl.code()) # Check whether the link found is to thispl. if linkpl != thispl: continue n += 1 context = 30 while 1: print '\n' wikipedia.output("== %s ==" % refpl.linkname()) wikipedia.output(reftxt[max(0,m.start()-context):m.end()+context]) if always == None: choice=wikipedia.input("Option (#,r#,s=skip link,n=next page,u=unlink,q=quit,\n" " m=more context,l=list,a=add new):") else: choice=always if choice=='n': if primary: # If run with the -primary argument, skip this occurence next time. filename = 'disambiguations/' + thispl.urlname() + '.txt' try: # Open file for appending. If none exists yet, create a new one. # The file is stored in the disambiguation/ subdir. Create if necessary. f = open(makepath(filename), 'a') f.write(refpl.urlname() + '\n') f.close() except IOError: pass return True elif choice=='s': choice=-1 break elif choice=='u': choice=-2 break elif choice=='a': ns=wikipedia.input('New alternative:') alternatives.append(ns) elif choice=='q': return False elif choice=='m': context*=2 elif choice=='l': print '\n' for i in range(len(alternatives)): wikipedia.output("%3d - %s" % (i, alternatives[i])) else: if choice[0] == 'r': replaceit = 1 choice = choice[1:] else: replaceit = 0 try: choice=int(choice) except ValueError: pass else: break if choice==-1: # Next link on this page continue page_title = m.group(1) link_text = m.group(2) if not link_text: link_text = page_title trailing_chars = m.group(3) if trailing_chars: link_text += trailing_chars if choice==-2: # unlink reftxt = reftxt[:m.start()] + link_text + reftxt[m.end():] else: # Normal replacement new_page_title = alternatives[choice] reppl = wikipedia.PageLink(thispl.code(), new_page_title, incode = refpl.code()) new_page_title = reppl.linkname() # There is a function that uncapitalizes the link target's first letter # if the link description starts with a small letter. This is useful on # nl: but annoying on de:. # At the moment the de: exclusion is only a workaround because I don't # know if other languages don't want this feature either. # We might want to introduce a list of languages that don't want to use # this feature. if wikipedia.mylang != 'de' and link_text[0] in 'abcdefghijklmnopqrstuvwxyz': new_page_title = new_page_title[0].lower() + new_page_title[1:] if replaceit or new_page_title == link_text: newlink = "[[%s]]" % new_page_title # check if we can create a link with trailing characters instead of a pipelink elif len(new_page_title) <= len(link_text) and link_text[:len(new_page_title)] == new_page_title and re.sub(trailR, '', link_text[len(new_page_title):]) == '': newlink = "[[%s]]%s" % (new_page_title, link_text[len(new_page_title):]) else: newlink = "[[%s|%s]]" % (new_page_title, link_text) reftxt = reftxt[:m.start()] + newlink + reftxt[m.end():] wikipedia.output(reftxt[max(0,m.start()-30):m.end()+30]) if not debug: refpl.put(reftxt) return True
|
yield PageLink(mylang, hit)
|
if family.version(mylang)=="1.2": yield PageLink(mylang, url2link(hit, code = mylang, incode = mylang)) else: yield PageLink(mylang, hit)
|
def allpages(start = '%21%200'): """Iterate over all Wikipedia pages in the home language, starting at the given page.""" start = link2url(start, code = mylang) m=0 while 1: text = getPage(mylang, family.allpagesname(mylang, start), do_quote=0, do_edit=0) #print text if family.version(mylang)=="1.2": R = re.compile('/wiki/(.*?)" *class=[\'\"]printable') else: R = re.compile('title =\"(.*?)\"') n = 0 for hit in R.findall(text): if not ':' in hit: # Some dutch exceptions. if not hit in ['Hoofdpagina','In_het_nieuws']: n = n + 1 yield PageLink(mylang, hit) start = hit + '%20%200' if n < 100: break m += n sys.stderr.write('AllPages: %d done; continuing from "%s";\n'%(m,url2link(start,code='nl',incode='ascii')))
|
if newPage:
|
if newPage=='1':
|
def putPage(code, name, text, comment = None, watchArticle = '0', minorEdit = '1', newPage = '0'): """Upload 'text' on page 'name' to the 'code' language wikipedia. Use of this routine can normally be avoided; use PageLink.put instead. """ import httplib put_throttle() host = family.hostname(code) address = family.put_address(code, space2underline(name)) if comment is None: comment=action if not loggedin or code != mylang: comment = username + ' - ' + comment try: text = forCode(text, code) if newPage: data = urlencode(( ('wpMinoredit', minorEdit), ('wpSave', '1'), ('wpWatchthis', watchArticle), ('wpEdittime', ''), ('wpSummary', comment), ('wpTextbox1', text))) else: data = urlencode(( ('wpMinoredit', minorEdit), ('wpSave', '1'), ('wpWatchthis', watchArticle), ('wpEdittime', edittime[code, link2url(name, code)]), ('wpSummary', comment), ('wpTextbox1', text))) except KeyError: print edittime raise if debug: print text print address print data return None, None, None conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "RobHooftWikiRobot/1.0") if cookies and code == mylang: conn.putheader('Cookie',cookies) conn.endheaders() conn.send(data) response = conn.getresponse() data = response.read() conn.close() return response.status, response.reason, data
|
if globalvar.shownew: wikipedia.output(u"%s: %s gives new interwiki %s"% (self.originPage.aslink(), page.aslink(True), linkedPage.aslink(True)))
|
lpsite=linkedPage.site() for prevPage in self.foundIn.keys(): if prevPage != linkedPage and prevPage.site() == lpsite: self.problem(u"%s: %s gives duplicate interwiki on same site %s" % (self.originPage.aslink(), page.aslink(True), linkedPage.aslink(True))) if globalvar.autonomous: self.todo = [] return break else: if globalvar.shownew: wikipedia.output(u"%s: %s gives new interwiki %s"% (self.originPage.aslink(), page.aslink(True), linkedPage.aslink(True)))
|
def workDone(self, counter): """ This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo. """ # Loop over all the pages that should have been taken care of for page in self.pending: # Mark the page as done self.done.append(page)
|
errorCount = 0
|
errorCount = self.problemfound
|
def assemble(self): # No errors have been seen so far errorCount = 0 mysite = wikipedia.getSite() # Build up a dictionary of all pages found, with the site as key. # Each value will be a list of pages. new = {} for page in self.done: site = page.site() if site == mysite and page.exists() and not page.isRedirectPage(): if page != self.originPage: self.problem("Found link to %s" % page.aslink(True) ) self.whereReport(page) errorCount += 1 elif page.exists() and not page.isRedirectPage(): if site in new: new[site].append(page) else: new[site] = [page] # See if new{} contains any problematic values result = {} for site, pages in new.items(): if len(pages) > 1: errorCount += 1 self.problem("Found more than one link for %s" % site) # If there are any errors, we need to go through all # items manually. if errorCount > 0 or globalvar.select:
|
assert charsets[code]==charset,"charset for %s changed from %s to %s"%(code,charsets(code),charset)
|
assert charsets[code].lower()==charset.lower(),"charset for %s changed from %s to %s"%(code,charsets[code],charset)
|
def getPage(code, name, do_edit=1, do_quote=1): """Get the contents of page 'name' from the 'code' language wikipedia""" host = langs[code] if host[-4:]=='.com': # Old algorithm name = re.sub('_', ' ', name) n=[] for x in name.split(): n.append(x[0].capitalize()+x[1:]) name='_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name!=urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if host[-4:] == '.org': # New software address = '/w/wiki.phtml?title='+name if do_edit: address += '&action=edit' elif host[-4:]=='.com': # Old software if not do_edit: raise "can not skip edit on old-software wikipedia" address = '/wiki.cgi?action=edit&id='+name if debug: print host,address text,charset = getUrl(host,address) if do_edit: if debug: print "Raw:",len(text),type(text),text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code]==charset,"charset for %s changed from %s to %s"%(code,charsets(code),charset) charsets[code]=charset if code2encoding(code).lower()!=charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code,space2underline(name)]=m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code,name]=m.group(1) else: edittime[code,name]=0 try: i1 = re.search('<textarea[^>]*>',text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>',text).start() if i2-i1 < 2: # new software raise NoPage() if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage() Rredirect=re.compile(r'\#redirect:? *\[\[(.*?)\]\]',re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) assert edittime[code,name]!=0 or host[-4:]=='.com', "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text) x=text[i1:i2] x=unescape(x) else: x=text # If not editing if charset=='utf-8': # Make it to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup('utf-8') try: x,l=decode_func(x) except UnicodeError: print code,name print repr(x) raise # Convert the unicode characters to &# references, and make it ascii. x=str(UnicodeToAsciiHtml(x)) return x
|
del self.historyDict[url]
|
try: del self.historyDict[url] except KeyError: pass
|
def setLinkAlive(self, url): """ If the link was previously found dead, removes it from the .dat file and returns True, else returns False. """ if self.historyDict.has_key(url): self.semaphore.acquire() del self.historyDict[url] self.semaphore.release() return True else: return False
|
return [catlib.Category(self.site(), title) for title in categoryTitles]
|
return [catlib.Category(self.site(), ':'.join(title.split(':')[1:])) for title in categoryTitles]
|
def categories(self, withSortKeys = False): """ A list of categories that the article is in. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method. The return value is a list of Category objects, one for each of the category links in the page text.
|
elif len(new_page_title) <= len(link_text) and link_text[:len(new_page_title)] == new_page_title:
|
elif len(new_page_title) <= len(link_text) and link_text[:len(new_page_title)] == new_page_title and re.sub(trailR, '', link_text[len(new_page_title):]) == '':
|
def treat(refpl, thispl): try: reftxt=refpl.get() except wikipedia.IsRedirectPage: pass else: n = 0 curpos = 0 while 1: m=linkR.search(reftxt, pos = curpos) if not m: if n == 0: print "Not found in %s"%refpl elif not debug: refpl.put(reftxt) return True # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # Try to standardize the page. if wikipedia.isInterwikiLink(m.group(1)): linkpl = None else: linkpl=wikipedia.PageLink(thispl.code(), m.group(1), incode = refpl.code()) # Check whether the link found is to thispl. if linkpl != thispl: continue n += 1 context = 30 while 1: print '\n' print "== %s =="%(refpl) print wikipedia.UnicodeToAsciiHtml(reftxt[max(0,m.start()-context):m.end()+context]) if always == None: choice=raw_input("Option (#,r#,s=skip link,n=next page,u=unlink,q=quit,\n" " m=more context,l=list,a=add new):") else: choice=always if choice=='n': return True elif choice=='s': choice=-1 break elif choice=='u': choice=-2 break elif choice=='a': ns=raw_input('New alternative:') alternatives.append(ns) elif choice=='q': return False elif choice=='m': context*=2 elif choice=='l': print '\n' for i in range(len(alternatives)): print "%3d" % i,repr(alternatives[i]) else: if choice[0] == 'r': replaceit = 1 choice = choice[1:] else: replaceit = 0 try: choice=int(choice) except ValueError: pass else: break if choice==-1: # Next link on this page continue page_title = m.group(1) link_text = m.group(2) if not link_text: link_text = page_title trailing_chars = m.group(3) if trailing_chars: link_text += trailing_chars if choice==-2: # unlink reftxt = reftxt[:m.start()] + link_text + reftxt[m.end():] else: # Normal replacement new_page_title = alternatives[choice] reppl = wikipedia.PageLink(thispl.code(), new_page_title, incode = refpl.code()) new_page_title = reppl.linkname() # There is a function that uncapitalizes the link target's first letter # if the link description starts with a small letter. This is useful on # nl: but annoying on de:. # At the moment the de: exclusion is only a workaround because I don't # know if other languages don't want this feature either. # We might want to introduce a list of languages that don't want to use # this feature. if wikipedia.mylang != 'de' and link_text[0] in 'abcdefghijklmnopqrstuvwxyz': new_page_title = new_page_title[0].lower() + new_page_title[1:] if replaceit or new_page_title == link_text: reptxt = new_page_title # check if we can create a link with trailing characters instead of a pipelink elif len(new_page_title) <= len(link_text) and link_text[:len(new_page_title)] == new_page_title: newlink = "[[%s]]%s" % (new_page_title, link_text[len(new_page_title):]) else: newlink = "[[%s|%s]]" % (new_page_title, link_text) reftxt = reftxt[:m.start()] + newlink + reftxt[m.end():] print wikipedia.UnicodeToAsciiHtml(reftxt[max(0,m.start()-30):m.end()+30]) if not debug: refpl.put(reftxt) return True
|
if line[i] != ' ':
|
if line[i] != ' ' and i<len(line)-1:
|
def printLastLine(lastline, lastcolor): # highlight the minus red or the plus green if lastline != None: lastline = '\x1b[' + lastcolor + ';1m' + lastline[0] + '\x1b[0m' + lastline[1:] output(lastline)
|
if site.version() >= "1.8": itemR = re.compile('<tr class="def" id=".*?">' + '\s*<td>' + '\s*<a id=".+?" name=".+?"></a>' + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key>.+?)</span></a><br />' + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' + '\s*</td><td>' + '\s*(?P<current>.+?)' + '\s*</td>' + '\s*</tr>' + '|' + '<tr class="orig" id=".*?">' + '\s*<td rowspan="2">' + '\s*<a id=".+?" name=".+?"></a>' + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key2>.+?)</span></a><br />' + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' + '\s*</td><td>' + '\s*.+?' + '\s*</td>' + '\s*</tr><tr class="new" id=".*?">' + '\s*<td>' + '\s*(?P<current2>.+?)' + '\s*</td>' + '\s*</tr>', re.DOTALL) elif site.version() >= "1.5": itemR = re.compile("<tr class='def' id='.*?'>\n"
|
if site.version() >= "1.5": itemR = re.compile("<tr class=('|\")def('|\") id=('|\").*?('|\")>\n"
|
def refresh_messages(site = None): site = site or wikipedia.getSite() # get 'all messages' special page's path path = site.allmessages_address() print 'Retrieving MediaWiki messages for %s' % repr(site) wikipedia.put_throttle() # It actually is a get, but a heavy one. allmessages = site.getUrl(path) print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. if site.version() >= "1.8": itemR = re.compile('<tr class="def" id=".*?">' # first possibility: original MediaWiki message used + '\s*<td>' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*(?P<current>.+?)' # current message + '\s*</td>' + '\s*</tr>' + '|' + '<tr class="orig" id=".*?">' # second possibility: custom message used + '\s*<td rowspan="2">' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key2>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*.+?' # original message + '\s*</td>' + '\s*</tr><tr class="new" id=".*?">' + '\s*<td>' + '\s*(?P<current2>.+?)' # current message + '\s*</td>' + '\s*</tr>', re.DOTALL) elif site.version() >= "1.5": # MediaWiki 1.5 had single quotation marks in some places. itemR = re.compile("<tr class='def' id='.*?'>\n" # first possibility: original MediaWiki message used + "\s*<td>\n" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*(?P<current>.+?)\n" # current message + "\s*</td>" + "\s*</tr>" + "|" + "<tr class='orig' id='.*?'>\n" # second possibility: custom message used + "\s*<td rowspan='2'>" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*.+?\n" # original message + "\s*</td>" + "\s*</tr><tr class='new' id='.*?'>" + "\s*<td>\n" + "\s*(?P<current2>.+?)\n" # current message + "\s*</td>" + "\s*</tr>", re.DOTALL) else: itemR = re.compile("<tr bgcolor=\"#[0-9a-f]{6}\"><td>\n" #+ "\s*(?:<script[^<>]+>[^<>]+<[^<>]+script>)?[^+<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>.+?<\/a>\n" + "\s*</td><td>\n" + "\s*.+?\n" + "\s*</td><td>\n" + "\s*(?P<current>.+?)\n" + "\s*<\/td><\/tr>", re.DOTALL) # we will save the found key:value pairs here dictionary = {} for match in itemR.finditer(allmessages): # Key strings only contain ASCII characters, so we can use them as dictionary keys key = match.group('key') or match.group('key2') current = match.group('current') or match.group('current2') dictionary[key] = current # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. if dictionary == {}: wikipedia.debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages ) sys.exit() else: f = open(makepath('mediawiki-messages/mediawiki-messages-%s-%s.dat' % (site.family.name, site.lang)), 'w') pickle.dump(dictionary, f) f.close() #print dictionary['addgroup'] #print dictionary['sitestatstext']
|
+ '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>'
|
+ '\s*<a href=".+?" title=".+?"><span id=(\'|").*?(\'|")>(?P<key>.+?)</span><\/a><br \/>'
|
def refresh_messages(site = None): site = site or wikipedia.getSite() # get 'all messages' special page's path path = site.allmessages_address() print 'Retrieving MediaWiki messages for %s' % repr(site) wikipedia.put_throttle() # It actually is a get, but a heavy one. allmessages = site.getUrl(path) print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. if site.version() >= "1.8": itemR = re.compile('<tr class="def" id=".*?">' # first possibility: original MediaWiki message used + '\s*<td>' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*(?P<current>.+?)' # current message + '\s*</td>' + '\s*</tr>' + '|' + '<tr class="orig" id=".*?">' # second possibility: custom message used + '\s*<td rowspan="2">' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key2>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*.+?' # original message + '\s*</td>' + '\s*</tr><tr class="new" id=".*?">' + '\s*<td>' + '\s*(?P<current2>.+?)' # current message + '\s*</td>' + '\s*</tr>', re.DOTALL) elif site.version() >= "1.5": # MediaWiki 1.5 had single quotation marks in some places. itemR = re.compile("<tr class='def' id='.*?'>\n" # first possibility: original MediaWiki message used + "\s*<td>\n" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*(?P<current>.+?)\n" # current message + "\s*</td>" + "\s*</tr>" + "|" + "<tr class='orig' id='.*?'>\n" # second possibility: custom message used + "\s*<td rowspan='2'>" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*.+?\n" # original message + "\s*</td>" + "\s*</tr><tr class='new' id='.*?'>" + "\s*<td>\n" + "\s*(?P<current2>.+?)\n" # current message + "\s*</td>" + "\s*</tr>", re.DOTALL) else: itemR = re.compile("<tr bgcolor=\"#[0-9a-f]{6}\"><td>\n" #+ "\s*(?:<script[^<>]+>[^<>]+<[^<>]+script>)?[^+<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>.+?<\/a>\n" + "\s*</td><td>\n" + "\s*.+?\n" + "\s*</td><td>\n" + "\s*(?P<current>.+?)\n" + "\s*<\/td><\/tr>", re.DOTALL) # we will save the found key:value pairs here dictionary = {} for match in itemR.finditer(allmessages): # Key strings only contain ASCII characters, so we can use them as dictionary keys key = match.group('key') or match.group('key2') current = match.group('current') or match.group('current2') dictionary[key] = current # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. if dictionary == {}: wikipedia.debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages ) sys.exit() else: f = open(makepath('mediawiki-messages/mediawiki-messages-%s-%s.dat' % (site.family.name, site.lang)), 'w') pickle.dump(dictionary, f) f.close() #print dictionary['addgroup'] #print dictionary['sitestatstext']
|
+ "<tr class='orig' id='.*?'>\n" + "\s*<td rowspan='2'>"
|
+ "<tr class=('|\")orig('|\") id=('|\").*?('|\")>\n" + "\s*<td rowspan=('|\")2('|\")>"
|
def refresh_messages(site = None): site = site or wikipedia.getSite() # get 'all messages' special page's path path = site.allmessages_address() print 'Retrieving MediaWiki messages for %s' % repr(site) wikipedia.put_throttle() # It actually is a get, but a heavy one. allmessages = site.getUrl(path) print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. if site.version() >= "1.8": itemR = re.compile('<tr class="def" id=".*?">' # first possibility: original MediaWiki message used + '\s*<td>' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*(?P<current>.+?)' # current message + '\s*</td>' + '\s*</tr>' + '|' + '<tr class="orig" id=".*?">' # second possibility: custom message used + '\s*<td rowspan="2">' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key2>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*.+?' # original message + '\s*</td>' + '\s*</tr><tr class="new" id=".*?">' + '\s*<td>' + '\s*(?P<current2>.+?)' # current message + '\s*</td>' + '\s*</tr>', re.DOTALL) elif site.version() >= "1.5": # MediaWiki 1.5 had single quotation marks in some places. itemR = re.compile("<tr class='def' id='.*?'>\n" # first possibility: original MediaWiki message used + "\s*<td>\n" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*(?P<current>.+?)\n" # current message + "\s*</td>" + "\s*</tr>" + "|" + "<tr class='orig' id='.*?'>\n" # second possibility: custom message used + "\s*<td rowspan='2'>" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*.+?\n" # original message + "\s*</td>" + "\s*</tr><tr class='new' id='.*?'>" + "\s*<td>\n" + "\s*(?P<current2>.+?)\n" # current message + "\s*</td>" + "\s*</tr>", re.DOTALL) else: itemR = re.compile("<tr bgcolor=\"#[0-9a-f]{6}\"><td>\n" #+ "\s*(?:<script[^<>]+>[^<>]+<[^<>]+script>)?[^+<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>.+?<\/a>\n" + "\s*</td><td>\n" + "\s*.+?\n" + "\s*</td><td>\n" + "\s*(?P<current>.+?)\n" + "\s*<\/td><\/tr>", re.DOTALL) # we will save the found key:value pairs here dictionary = {} for match in itemR.finditer(allmessages): # Key strings only contain ASCII characters, so we can use them as dictionary keys key = match.group('key') or match.group('key2') current = match.group('current') or match.group('current2') dictionary[key] = current # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. if dictionary == {}: wikipedia.debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages ) sys.exit() else: f = open(makepath('mediawiki-messages/mediawiki-messages-%s-%s.dat' % (site.family.name, site.lang)), 'w') pickle.dump(dictionary, f) f.close() #print dictionary['addgroup'] #print dictionary['sitestatstext']
|
+ '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>'
|
+ '\s*<a href=".+?" title=".+?"><span id=(\'|").*?(\'|")>(?P<key2>.+?)</span><\/a><br \/>'
|
def refresh_messages(site = None): site = site or wikipedia.getSite() # get 'all messages' special page's path path = site.allmessages_address() print 'Retrieving MediaWiki messages for %s' % repr(site) wikipedia.put_throttle() # It actually is a get, but a heavy one. allmessages = site.getUrl(path) print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. if site.version() >= "1.8": itemR = re.compile('<tr class="def" id=".*?">' # first possibility: original MediaWiki message used + '\s*<td>' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*(?P<current>.+?)' # current message + '\s*</td>' + '\s*</tr>' + '|' + '<tr class="orig" id=".*?">' # second possibility: custom message used + '\s*<td rowspan="2">' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key2>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*.+?' # original message + '\s*</td>' + '\s*</tr><tr class="new" id=".*?">' + '\s*<td>' + '\s*(?P<current2>.+?)' # current message + '\s*</td>' + '\s*</tr>', re.DOTALL) elif site.version() >= "1.5": # MediaWiki 1.5 had single quotation marks in some places. itemR = re.compile("<tr class='def' id='.*?'>\n" # first possibility: original MediaWiki message used + "\s*<td>\n" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*(?P<current>.+?)\n" # current message + "\s*</td>" + "\s*</tr>" + "|" + "<tr class='orig' id='.*?'>\n" # second possibility: custom message used + "\s*<td rowspan='2'>" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*.+?\n" # original message + "\s*</td>" + "\s*</tr><tr class='new' id='.*?'>" + "\s*<td>\n" + "\s*(?P<current2>.+?)\n" # current message + "\s*</td>" + "\s*</tr>", re.DOTALL) else: itemR = re.compile("<tr bgcolor=\"#[0-9a-f]{6}\"><td>\n" #+ "\s*(?:<script[^<>]+>[^<>]+<[^<>]+script>)?[^+<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>.+?<\/a>\n" + "\s*</td><td>\n" + "\s*.+?\n" + "\s*</td><td>\n" + "\s*(?P<current>.+?)\n" + "\s*<\/td><\/tr>", re.DOTALL) # we will save the found key:value pairs here dictionary = {} for match in itemR.finditer(allmessages): # Key strings only contain ASCII characters, so we can use them as dictionary keys key = match.group('key') or match.group('key2') current = match.group('current') or match.group('current2') dictionary[key] = current # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. if dictionary == {}: wikipedia.debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages ) sys.exit() else: f = open(makepath('mediawiki-messages/mediawiki-messages-%s-%s.dat' % (site.family.name, site.lang)), 'w') pickle.dump(dictionary, f) f.close() #print dictionary['addgroup'] #print dictionary['sitestatstext']
|
+ "\s*</tr><tr class='new' id='.*?'>"
|
+ "\s*</tr><tr class=('|\")new('|\") id=('|\").*?('|\")>"
|
def refresh_messages(site = None): site = site or wikipedia.getSite() # get 'all messages' special page's path path = site.allmessages_address() print 'Retrieving MediaWiki messages for %s' % repr(site) wikipedia.put_throttle() # It actually is a get, but a heavy one. allmessages = site.getUrl(path) print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. if site.version() >= "1.8": itemR = re.compile('<tr class="def" id=".*?">' # first possibility: original MediaWiki message used + '\s*<td>' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*(?P<current>.+?)' # current message + '\s*</td>' + '\s*</tr>' + '|' + '<tr class="orig" id=".*?">' # second possibility: custom message used + '\s*<td rowspan="2">' + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\".*?\">(?P<key2>.+?)</span></a><br />' # message link + '\s*<a href=".+?"( class="new")? title=".+?">.+?</a>' # talk link + '\s*</td><td>' + '\s*.+?' # original message + '\s*</td>' + '\s*</tr><tr class="new" id=".*?">' + '\s*<td>' + '\s*(?P<current2>.+?)' # current message + '\s*</td>' + '\s*</tr>', re.DOTALL) elif site.version() >= "1.5": # MediaWiki 1.5 had single quotation marks in some places. itemR = re.compile("<tr class='def' id='.*?'>\n" # first possibility: original MediaWiki message used + "\s*<td>\n" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*(?P<current>.+?)\n" # current message + "\s*</td>" + "\s*</tr>" + "|" + "<tr class='orig' id='.*?'>\n" # second possibility: custom message used + "\s*<td rowspan='2'>" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*.+?\n" # original message + "\s*</td>" + "\s*</tr><tr class='new' id='.*?'>" + "\s*<td>\n" + "\s*(?P<current2>.+?)\n" # current message + "\s*</td>" + "\s*</tr>", re.DOTALL) else: itemR = re.compile("<tr bgcolor=\"#[0-9a-f]{6}\"><td>\n" #+ "\s*(?:<script[^<>]+>[^<>]+<[^<>]+script>)?[^+<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>.+?<\/a>\n" + "\s*</td><td>\n" + "\s*.+?\n" + "\s*</td><td>\n" + "\s*(?P<current>.+?)\n" + "\s*<\/td><\/tr>", re.DOTALL) # we will save the found key:value pairs here dictionary = {} for match in itemR.finditer(allmessages): # Key strings only contain ASCII characters, so we can use them as dictionary keys key = match.group('key') or match.group('key2') current = match.group('current') or match.group('current2') dictionary[key] = current # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. if dictionary == {}: wikipedia.debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages ) sys.exit() else: f = open(makepath('mediawiki-messages/mediawiki-messages-%s-%s.dat' % (site.family.name, site.lang)), 'w') pickle.dump(dictionary, f) f.close() #print dictionary['addgroup'] #print dictionary['sitestatstext']
|
elif arg == '-home': refresh_messages()
|
def main(): debug = False refresh_all = False for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'mediawiki_messages') if arg: if arg == '-debug': debug = True elif arg == '-all': refresh_all = True if refresh_all: refresh_all_messages() else: refresh_messages(wikipedia.getSite()) if debug: print "DBG> successfulupload contains %s" % get('successfulupload') print "DBG> deletedtext contains %s" % get('deletedtext')
|
|
},
|
}
|
def __init__(self): family.Family.__init__(self) self.name = 'wikibooks' # Known wikibooks languages, given as a dictionary mapping the language code # to the hostname of the site hosting that wiktibooks. For human consumption, # the full name of the language is given behind each line as a comment self.langs = { 'minnan':'zh-min-nan.wiktionary.org', 'nb':'no.wiktionary.org', 'zh-cn':'zh.wiktionary.org', 'zh-tw':'zh.wiktionary.org' } for lang in self.knownlanguages: self.langs[lang] = lang+'.wiktionary.org'
|
raise self._getexception
|
if self._getexception == IsRedirectPage and get_redirect: pass else: raise self._getexception
|
def get(self, force = False, get_redirect=False, throttle = True, sysop = False): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
|
ImageRegex = re.compile(r'\[\[ *[Ii]mage:' + old + ' *(?P<parameters>\|[^}]+|) *\]\]')
|
ImageRegex = re.compile(r'\[\[ *[Ii]mage:' + old + ' *(?P<parameters>\|[^\n]+|) *\]\]')
|
def run(self): """ Starts the robot's action. """ # regular expression to find the original template. # {{vfd}} does the same thing as {{Vfd}}, so both will be found. # The old syntax, {{msg:vfd}}, will also be found. # The group 'parameters' will either match the parameters, or an # empty string if there are none.
|
def hashfreeLinkname(self): hn=self.hashname() if hn: return self.linkname()[:-len(hn)+1] else: return self.linkname()
|
def hashname(self): """The name of the subpage this PageLink refers to. Subpages are denominated by a # in the linkname(). If no subpage is referenced, None is returned.""" ln = self.linkname() ln = re.sub('&#', '&hash;', ln) if not '#' in ln: return None else: hn = ln[ln.find('#') + 1:] hn = re.sub('&hash;', '&#', hn) #print "hn=", hn return hn
|
|
class WikimediaXmlHandler(xml.sax.handler.ContentHandler): def setCallback(self, callback): self.callback = callback def startElement(self, name, attrs): self.destination = None if name == 'page': self.text=u'' self.title=u'' self.timestamp=u'' elif name == 'text': self.destination = 'text' elif name == 'title': self.destination = 'title' elif name == 'timestamp': self.destination = 'timestamp' def endElement(self, name): if name == 'revision': print "DBG> ",repr(self.title), self.timestamp, len(self.text) text = unescape(self.text) while text[-1] in '\n ': text = text[:-1] text = u'\r\n'.join(text.split('\n')) timestamp = (self.timestamp[0:4]+ self.timestamp[5:7]+ self.timestamp[8:10]+ self.timestamp[11:13]+ self.timestamp[14:16]+ self.timestamp[17:19]) self.callback(self.title.strip(), timestamp, text) def characters(self, data): if self.destination == 'text': self.text += data elif self.destination == 'title': self.title += data elif self.destination == 'timestamp': self.timestamp += data class GetAll: debug = 1 addr = '/wiki/%s:Export' def __init__(self, code, pages): self.code = code self.pages = pages def run(self): data = self.getData() handler = WikimediaXmlHandler() handler.setCallback(self.oneDone) xml.sax.parseString(data, handler) for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): pl._getexception = NoPage def oneDone(self, title, timestamp, text): print "DBG>", repr(title), timestamp, len(text) pl = PageLink(self.code, title) for pl2 in self.pages: if pl2 == pl: break else: raise "bug, page not found in list" if self.debug: xtext = pl2.get() if text != xtext: print " import difflib for line in difflib.ndiff(xtext.split('\r\n'), text.split('\r\n')): if line[0] in ['+', '-']: print repr(line)[2:-1] if edittime[self.code, link2url(title, self.code)] != timestamp: print " print "-",edittime[self.code, link2url(title, self.code)] print "+",timestamp else: m=Rredirect.match(xtext) if m: pl2._getexception = IsRedirectPage, m.group(1) else: hn = pl2.hashname() if hn: m = re.search("== *%s *==" % hn, xtext) if not m: pl2._getexception = SubpageError("Hashname does not exist: %s" % self) else: pl2._contents = xtext edittime[self.code, link2url(title, self.code)] = timestamp def getData(self): import httplib addr = self.addr%special[self.code] pagenames = '\r\n'.join([x.hashfreeLinkname() for x in self.pages]) data = urlencode(( ('action', 'submit'), ('pages', pagenames), ('curonly', 'True'), )) headers = {"Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0"} get_throttle() conn = httplib.HTTPConnection(langs[self.code]) conn.request("POST", addr, data, headers) response = conn.getresponse() data = response.read() conn.close() return data
|
def getRedirectTo(self):
|
|
Rredirect = re.compile(r'\
|
def getPage(code, name, do_edit = 1, do_quote = 1): """Get the contents of page 'name' from the 'code' language wikipedia Do not use this directly; use the PageLink object instead.""" host = langs[code] if code in oldsoftware: # Old algorithm name = re.sub('_', ' ', name) n = [] for x in name.split(): n.append(x[0].capitalize() + x[1:]) name = '_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if code not in oldsoftware: address = '/w/wiki.phtml?title='+name+"&redirect=no" if do_edit: address += '&action=edit&printable=yes' else: if not do_edit: raise Error("can not skip edit on old-software wikipedia") address = '/wiki.cgi?action=edit&id='+name if debug: print host, address # Make sure Brion doesn't get angry by slowing ourselves down. get_throttle() text, charset = getUrl(host,address) # Keep login status for external use if code == mylang: global loggedin if "Userlogin" in text: loggedin = False else: loggedin = True # Extract the actual text from the textedit field if do_edit: if debug: print "Raw:", len(text), type(text), text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower() == charset.lower(), "charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code] = charset if code2encoding(code).lower() != charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: edittime[code, link2url(name, code)] = 0 try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>', text).start() if i2-i1 < 2: # new software raise NoPage(code, name) if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage(code, name) Rredirect = re.compile(r'\#redirect:? *\[\[(.*?)\]\]', re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) if edittime[code, name] == 0 and code not in oldsoftware: print "DBG> page may be locked?!" pass #raise LockedPage() x = text[i1:i2] x = unescape(x) else: x = text # If not editing # Convert to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(charset) try: x,l = decode_func(x) except UnicodeError: print code,name print repr(x) raise return x
|
|
replacements.append(commandline_replacements[0], commandline_replacements[1])
|
replacements.append((commandline_replacements[0], commandline_replacements[1]))
|
def main(): # How we want to retrieve information on which pages need to be changed. # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # Array which will collect commandline parameters. # First element is original text, second element is replacement text. commandline_replacements = [] # A list of 2-tuples of original text and replacement text. replacements = [] # Don't edit pages which contain certain texts. exceptions = [] # Should the elements of 'replacements' and 'exceptions' be interpreted # as regular expressions? regex = False # Predefined fixes from dictionary 'fixes' (see above). fix = None # the dump's path, either absolute or relative, which will be used when source # is 'xmldump'. xmlfilename = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # a list of pages which will be used when source is 'userinput'. pagenames = [] # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None # Load default summary message. wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) # Read commandline parameters. for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'replace') if arg: if arg == '-regex': regex = True elif arg.startswith('-file'): if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the filename:') else: textfilename = arg[6:] source = 'textfile' elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] source = 'category' elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-page'): if len(arg) == 5: pagenames.append(wikipedia.input(u'Which page do you want to chage?')) else: pagenames.append(arg[6:]) source = 'userinput' elif arg.startswith('-start'): if len(arg) == 6: startpage = wikipedia.input(u'Which page do you want to chage?') else: startpage = arg[7:] source = 'allpages' elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-fix:'): fix = arg[5:] elif arg == '-always': acceptall = True elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) else: commandline_replacements.append(arg) if source == None or len(commandline_replacements) not in [0, 2]: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if (len(commandline_replacements) == 2 and fix == None): replacements.append(commandline_replacements[0], commandline_replacements[1]) wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg ) % ' (-' + commandline_replacements[0] + ' +' + commandline_replacements[1] + ')') elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') change = '(-' + old + ' +' + new replacements.append(old, new) while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': change = change + ')' break new = wikipedia.input(u'Please enter the new text:') change = change + ' & -' + old + ' +' + new replacements.append(old, new) default_summary_message = wikipedia.translate(wikipedia.getSite(), msg) % change wikipedia.output(u'The summary message will default to: %s' % default_summary_message) summary_message = wikipedia.input(u'Press Enter to use this default message, or enter a description of the changes your bot will make:') if summary_message == '': summary_message = default_summary_message wikipedia.setAction(summary_message) else: # Perform one of the predefined actions. try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) wikipedia.stopme() sys.exit() if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), fix['msg'])) if fix.has_key('exceptions'): exceptions = fix['exceptions'] replacements = fix['replacements'] gen = ReplacePageGenerator(source, replacements, exceptions, regex, textfilename, xmlfilename, categoryname, pagenames, startpage) if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = ReplaceRobot(preloadingGen, replacements, exceptions, regex, acceptall) bot.run()
|
replacements.append(old, new)
|
replacements.append((old, new))
|
def main(): # How we want to retrieve information on which pages need to be changed. # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # Array which will collect commandline parameters. # First element is original text, second element is replacement text. commandline_replacements = [] # A list of 2-tuples of original text and replacement text. replacements = [] # Don't edit pages which contain certain texts. exceptions = [] # Should the elements of 'replacements' and 'exceptions' be interpreted # as regular expressions? regex = False # Predefined fixes from dictionary 'fixes' (see above). fix = None # the dump's path, either absolute or relative, which will be used when source # is 'xmldump'. xmlfilename = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # a list of pages which will be used when source is 'userinput'. pagenames = [] # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None # Load default summary message. wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg)) # Read commandline parameters. for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'replace') if arg: if arg == '-regex': regex = True elif arg.startswith('-file'): if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the filename:') else: textfilename = arg[6:] source = 'textfile' elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] source = 'category' elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlfilename = arg[5:] source = 'xmldump' elif arg.startswith('-page'): if len(arg) == 5: pagenames.append(wikipedia.input(u'Which page do you want to chage?')) else: pagenames.append(arg[6:]) source = 'userinput' elif arg.startswith('-start'): if len(arg) == 6: startpage = wikipedia.input(u'Which page do you want to chage?') else: startpage = arg[7:] source = 'allpages' elif arg.startswith('-except:'): exceptions.append(arg[8:]) elif arg.startswith('-fix:'): fix = arg[5:] elif arg == '-always': acceptall = True elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) else: commandline_replacements.append(arg) if source == None or len(commandline_replacements) not in [0, 2]: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if (len(commandline_replacements) == 2 and fix == None): replacements.append(commandline_replacements[0], commandline_replacements[1]) wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), msg ) % ' (-' + commandline_replacements[0] + ' +' + commandline_replacements[1] + ')') elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') change = '(-' + old + ' +' + new replacements.append(old, new) while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': change = change + ')' break new = wikipedia.input(u'Please enter the new text:') change = change + ' & -' + old + ' +' + new replacements.append(old, new) default_summary_message = wikipedia.translate(wikipedia.getSite(), msg) % change wikipedia.output(u'The summary message will default to: %s' % default_summary_message) summary_message = wikipedia.input(u'Press Enter to use this default message, or enter a description of the changes your bot will make:') if summary_message == '': summary_message = default_summary_message wikipedia.setAction(summary_message) else: # Perform one of the predefined actions. try: fix = fixes[fix] except KeyError: wikipedia.output(u'Available predefined fixes are: %s' % fixes.keys()) wikipedia.stopme() sys.exit() if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), fix['msg'])) if fix.has_key('exceptions'): exceptions = fix['exceptions'] replacements = fix['replacements'] gen = ReplacePageGenerator(source, replacements, exceptions, regex, textfilename, xmlfilename, categoryname, pagenames, startpage) if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = ReplaceRobot(preloadingGen, replacements, exceptions, regex, acceptall) bot.run()
|
text = text.encode(myencoding())
|
text = text.encode(code2encoding(code))
|
def forCode(text, code): """Prepare the unicode string 'text' for inclusion into a page for language 'code'. All of the characters in the text should be encodable, otherwise this will fail! This condition is normally met, except if you would copy text verbatim from an UTF-8 language into a iso-8859-1 language, and none of the robots in the package should do such things""" if type(text) == type(u''): #if code == 'ascii': # return UnicodeToAsciiHtml(text) #encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(code2encoding(code)) #text,l = encode_func(text) text = text.encode(myencoding()) return text
|
x=urllib.unquote(percentname)
|
for c in percentname: if ord(c)>128: x=percentname break else: x=urllib.unquote(str(percentname))
|
def url2unicode(percentname, language): x=urllib.unquote(percentname) #print "DBG> ",language,repr(percentname),repr(x) # Try utf-8 first. It almost cannot succeed by accident! for encoding in ('utf-8',)+code2encodings(language): try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = decode_func(x) #print "DBG> ",encoding,repr(x) return x except UnicodeError: pass raise UnicodeError("Could not decode %s" % repr(percentname))
|
sourceImagePage.put(original_description + '\n\n' + nowCommonsTemplate[sourceSite.lang][0] % targetFilename, comment = nowCommonsMessage[sourceSite.lang][1])
|
sourceImagePage.put(original_description + '\n\n' + nowCommonsTemplate[sourceSite.lang] % targetFilename, comment = nowCommonsMessage[sourceSite.lang])
|
def transferImage(self, sourceImagePage, debug=False): """Gets a wikilink to an image, downloads it and its description, and uploads it to another wikipedia. Returns the filename which was used to upload the image This function is used by imagetransfer.py and by copy_table.py """ sourceSite = sourceImagePage.site() if debug: print "--------------------------------------------------" if debug: print "Found image: %s"% imageTitle # need to strip off "Afbeelding:", "Image:" etc. # we only need the substring following the first colon filename = sourceImagePage.title().split(":", 1)[1] # Spaces might occur, but internally they are represented by underscores. # Change the name now, because otherwise we get the wrong MD5 hash. filename = filename.replace(' ', '_') # Also, the first letter should be capitalized # TODO: Don't capitalize on non-capitalizing wikis filename = filename[0].upper()+filename[1:] if debug: print "Image filename is: %s " % filename encodedFilename = filename.encode(sourceSite.encoding()) md5sum = md5.new(encodedFilename).hexdigest() if debug: print "MD5 hash is: %s" % md5sum # TODO: This probably doesn't work on all wiki families url = 'http://%s/upload/%s/%s/%s' % (sourceSite.hostname(), md5sum[0], md5sum[:2], filename) if debug: print "URL should be: %s" % url # localize the text that should be printed on the image description page try: original_description = sourceImagePage.get() description = wikipedia.translate(self.targetSite, copy_message) % (sourceSite, original_description) # TODO: Only the page's version history is shown, but the file's # version history would be more helpful description += '\n\n' + sourceImagePage.getVersionHistoryTable() # add interwiki link if sourceImagePage.site().family == self.targetSite.family: description += "\r\n\r\n" + sourceImagePage.aslink(forceInterwiki = True) except wikipedia.NoPage: description='' print "Image does not exist or description page is empty." except wikipedia.IsRedirectPage: description='' print "Image description page is redirect." else: bot = upload.UploadRobot(url = url, description = description, targetSite = self.targetSite, urlEncoding = sourceSite.encoding()) # try to upload targetFilename = bot.run() if targetFilename and self.targetSite.family.name == 'commons' and self.targetSite.lang == 'commons': # upload to Commons was successful reason = wikipedia.translate(sourceSite, nowCommonsMessage) # try to delete the original image if we have a sysop account if config.sysopnames.has_key(sourceSite.family.name) and config.sysopnames[sourceSite.family.name].has_key(sourceSite.lang): if sourceImagePage.delete(reason): return if nowCommonsTemplate.has_key(sourceSite.lang) and config.usernames.has_key(sourceSite.family.name) and config.usernames[sourceSite.family.name].has_key(sourceSite.lang): # add the nowCommons template. wikipedia.output(u'Adding nowCommons template to %s' % sourceImagePage.title()) sourceImagePage.put(original_description + '\n\n' + nowCommonsTemplate[sourceSite.lang][0] % targetFilename, comment = nowCommonsMessage[sourceSite.lang][1])
|
path = self.get_address('Non-existing page')
|
path = self.get_address('Non-existing_page')
|
def loggedin(self, check = False): if not hasattr(self,'_loggedin'): self._fill() if check: path = self.get_address('Non-existing page') txt = getUrl(self, path) self._loggedin = 'Userlogin' not in txt return self._loggedin
|
ok, message = linkChecker.check()
|
try: ok, message = linkChecker.check() except: wikipedia.output('Exception while processing URL %s in page %s' % (self.url, self.title)) raise
|
def run(self): linkChecker = LinkChecker(self.url) ok, message = linkChecker.check() if ok: if self.history.setLinkAlive(self.url): wikipedia.output('*Link to %s in [[%s]] is back alive.' % (self.url, self.title)) else: wikipedia.output('*[[%s]] links to %s - %s.' % (self.title, self.url, message)) self.history.setLinkDead(self.url, message, self.title)
|
cats = pl.categories(catsWithSortKeys = True)
|
cats = pl.categories(withSortKeys = True)
|
def include(pl,checklinks=True,realinclude=True,linkterm=None): cl = checklinks if realinclude: try: text = pl.get() except wikipedia.NoPage: pass except wikipedia.IsRedirectPage: cl = True pass else: cats = pl.categories() if not workingcat in cats: cats = pl.categories(catsWithSortKeys = True) for c in cats: if rawtoclean(c) in parentcats: cats.remove(c) if linkterm: pl.put(wikipedia.replaceCategoryLinks(text, cats + [wikipedia.Page(mysite,"%s|%s"%(workingcat.title(),linkterm))])) else: pl.put(wikipedia.replaceCategoryLinks(text, cats + [workingcat])) if cl: if checkforward: try: pl.get() except wikipedia.IsRedirectPage: pl2 = wikipedia.Page(mysite,pl.getRedirectTarget()) if needcheck(pl2): tocheck.append(pl2) checked[pl2]=pl2 except wikipedia.Error: pass else: for page2 in pl.linkedPages(): if needcheck(page2): tocheck.append(page2) checked[page2] = page2 if checkbackward: for refPage in pl.getReferences(): if needcheck(refPage): tocheck.append(refPage) checked[refPage] = refPage
|
output(u'Page %s is semi-protected. Getting edit page to find out if we are allowed to edit.' % self)
|
output(u'Page %s is semi-protected. Getting edit page to find out if we are allowed to edit.' % self.aslink())
|
def put(self, newtext, comment=None, watchArticle = None, minorEdit = True): """Replace the new page with the contents of the first argument. The second argument is a string that is to be used as the summary for the modification
|
x=urllib.unquote(percentname)
|
x=urllib.unquote(str(percentname))
|
def url2unicode(percentname, language): x=urllib.unquote(percentname) for encoding in code2encodings(language): try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = decode_func(x) return x except UnicodeError: pass raise UnicodeError("Could not decode %s" % repr(percentname))
|
x,l = encode_func(x)
|
y,l = encode_func(x)
|
def unicode2html(x, encoding='latin1'): # We have a unicode string. We can attempt to encode it into the desired # format, and if that doesn't work, we encode the unicode into html # # entities. try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = encode_func(x) except UnicodeError: x = UnicodeToAsciiHtml(x) return str(x)
|
return str(x)
|
return x
|
def unicode2html(x, encoding='latin1'): # We have a unicode string. We can attempt to encode it into the desired # format, and if that doesn't work, we encode the unicode into html # # entities. try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = encode_func(x) except UnicodeError: x = UnicodeToAsciiHtml(x) return str(x)
|
wikipedia.output(u'You did not give me anything to do, quitting.')
|
wikipedia.showHelp('image')
|
def main(): oldImage = None newImage = None summary = '' always = False # read command line parameters for arg in wikipedia.handleArgs(): if arg == '-always': always = True elif arg.startswith('-summary'): if len(arg) == len('-summary'): summary = wikipedia.input(u'Choose an edit summary: ') else: summary = arg[len('-summary:'):] else: if oldImage: newImage = arg else: oldImage = arg if not oldImage: wikipedia.output(u'You did not give me anything to do, quitting.') else: mysite = wikipedia.getSite() ns = mysite.image_namespace() oldImagePage = wikipedia.Page(mysite, ns + ':' + oldImage) gen = pagegenerators.FileLinksGenerator(oldImagePage) preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = ImageRobot(preloadingGen, oldImage, newImage, summary, always) bot.run()
|
'is': u'Kerfissíður',
|
'is': u'Kerfissíða',
|
def __init__(self):
|
pl = wikipedia.PageLink(wikipedia.mylang, listpage)
|
try: pl = wikipedia.PageLink(wikipedia.mylang, listpage) except NoPage: print 'The page ' + listpage + ' could not be loaded from the server.' sys.exit()
|
def add_category(): print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input('Wikipedia page with list of pages to change: ') if listpage: pl = wikipedia.PageLink(wikipedia.mylang, listpage) pagenames = pl.links() else: refpage = wikipedia.input('Wikipedia page that is now linked to: ') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input('Category to add (do not give namespace) : ', encode = True) newcat = newcat newcat = newcat.encode(wikipedia.code2encoding(wikipedia.mylang)) newcat = newcat[:1].capitalize() + newcat[1:] print newcat ns = wikipedia.family.category_namespaces(wikipedia.mylang) catpl = wikipedia.PageLink(wikipedia.mylang, ns[0].encode(wikipedia.code2encoding(wikipedia.mylang))+':'+newcat) print "Will add %s"%catpl.aslocallink() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input("%s [y/n/a(ll)] : "%(pl2.asasciilink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input("This should be used if and only if you are sure that your links are correct !!! Are you sure ? [y/n] : ") if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: print "%s doesn't exit yet. Ignoring."%(pl2.aslocallink()) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) print "WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink()) else: print "Current categories: ",cats if catpl in cats: print "%s already has %s"%(pl.aslocallink(),catpl.aslocallink()) else: cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text, comment = catpl.aslocallink().encode(wikipedia.code2encoding(wikipedia.mylang)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.