rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
answer=raw_input('submit y/n ?')
|
if ask: answer=raw_input('submit y/n ?') else: answer='y'
|
def do(year): page=str(year) if debug: page='Robottest' text=wikipedia.getPage(mylang,page) orgtext=text # Replace all of these by the standardized formulae text=R4.sub("\r\n",text) text=R5.sub("\r\n\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R10.sub("\r\n",text) text=R11.sub("\r\n",text) text=R12.sub("\r\n",text) #if R6.search(text): #m=R6.search(text).group(0) #print "MATCH:", len(m),repr(m) text=R6.sub("\r\n",text) text=R7.sub("\r\n",text) text=R8.sub("\r\n",text) text=R9.sub("\r\n",text) # Must be last text=R3.sub("\r\n"+header(year)+"\r\n'''Gebeurtenissen''':\r\n",text) text=R1.sub("\r\n\r\n----\r\n'''Geboren''':\r\n",text) text=R2.sub("\r\n\r\n----\r\n'''Overleden''':\r\n",text) if debug: print text else: print "="*70 f=open('/tmp/wik.in','w') f.write(orgtext) f.close() f=open('/tmp/wik.out','w') f.write(text) f.close() f=os.popen('diff -u /tmp/wik.in /tmp/wik.out','r') print f.read() print "="*70 answer=raw_input('submit y/n ?') if answer=='y': status,reason,data=wikipedia.putPage(mylang,page,text) print status,reason else: print "===Not changed==="
|
self.site._fill()
|
self.site._loadCookies()
|
def login(self):#, anonymous): """Initialises site and username data"""#, or anonymous""" if False:#anonymous: self.site = wikipedia.getSite(user=None) else: self.username = self.options.username or wikipedia.input(u"Username:") self.site = wikipedia.getSite(user=self.username) self.site._fill() # load cookies if not self.site._loggedin: password = getpass.getpass("Password: ") cookie = login.login(self.site, self.username, password) if not cookie: sys.exit("Login failed") login.storecookiedata(cookie, self.site, self.username) wikipedia.output(u"Login succesful")
|
cookie = login.login(self.site, self.username, password)
|
cookie = login.LoginManager(self.username, password, self.site)
|
def login(self):#, anonymous): """Initialises site and username data"""#, or anonymous""" if False:#anonymous: self.site = wikipedia.getSite(user=None) else: self.username = self.options.username or wikipedia.input(u"Username:") self.site = wikipedia.getSite(user=self.username) self.site._fill() # load cookies if not self.site._loggedin: password = getpass.getpass("Password: ") cookie = login.login(self.site, self.username, password) if not cookie: sys.exit("Login failed") login.storecookiedata(cookie, self.site, self.username) wikipedia.output(u"Login succesful")
|
login.storecookiedata(cookie, self.site, self.username)
|
def login(self):#, anonymous): """Initialises site and username data"""#, or anonymous""" if False:#anonymous: self.site = wikipedia.getSite(user=None) else: self.username = self.options.username or wikipedia.input(u"Username:") self.site = wikipedia.getSite(user=self.username) self.site._fill() # load cookies if not self.site._loggedin: password = getpass.getpass("Password: ") cookie = login.login(self.site, self.username, password) if not cookie: sys.exit("Login failed") login.storecookiedata(cookie, self.site, self.username) wikipedia.output(u"Login succesful")
|
|
('pages', pagenames),
|
('pages', UnicodeToAsciiHtml(pagenames)),
|
def getData(self): import httplib try: addr = self.addr%special[self.code] except KeyError: print "BUG: Can not find name of Special in %s:" % self.code raise pagenames = u'\r\n'.join([x.hashfreeLinkname() for x in self.pages]) data = urlencode(( ('action', 'submit'), ('pages', pagenames), ('curonly', 'True'), )) headers = {"Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0"} # Slow ourselves down get_throttle(requestsize = len(self.pages)) # Now make the actual request to the server conn = httplib.HTTPConnection(langs[self.code]) conn.request("POST", addr, data, headers) response = conn.getresponse() data = response.read() conn.close() return data
|
ccToolkit = CosmeticChangesToolkit(page.site(), debug = True) changedText = ccToolkit.change(page.get()) if changedText != page.get(): if not self.acceptall: choice = wikipedia.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice in ['a', 'A']: self.acceptall = True if self.acceptall or choice in ['y', 'Y']: page.put(changedText)
|
try: ccToolkit = CosmeticChangesToolkit(page.site(), debug = True) changedText = ccToolkit.change(page.get()) if changedText != page.get(): if not self.acceptall: choice = wikipedia.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice in ['a', 'A']: self.acceptall = True if self.acceptall or choice in ['y', 'Y']: page.put(changedText) except wikipedia.NoPage: print "Page %s does not exist?!" % page.aslink() except wikipedia.IsRedirectPage: print "Page %s is a redirect; skipping." % page.aslink() except wikipedia.LockedPage: print "Page %s is locked?!" % page.aslink()
|
def run(self): for page in self.generator: ccToolkit = CosmeticChangesToolkit(page.site(), debug = True) changedText = ccToolkit.change(page.get()) if changedText != page.get(): if not self.acceptall: choice = wikipedia.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N') if choice in ['a', 'A']: self.acceptall = True if self.acceptall or choice in ['y', 'Y']: page.put(changedText)
|
for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'cosmetic_changes') if arg: if arg.startswith('-start:'): gen = pagegenerators.AllpagesPageGenerator(arg[7:]) elif arg.startswith('-ref:'): referredPage = wikipedia.Page(wikipedia.getSite(), arg[5:]) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links:'): linkingPage = wikipedia.Page(wikipedia.getSite(), arg[7:]) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-file:'): gen = pagegenerators.TextfilePageGenerator(arg[6:]) elif arg.startswith('-cat:'): cat = catlib.Category(wikipedia.getSite(), arg[5:]) gen = pagegenerators.CategorizedPageGenerator(cat) else: pageTitle.append(arg)
|
for arg in wikipedia.handleArgs(): if arg.startswith('-start:'): gen = pagegenerators.AllpagesPageGenerator(arg[7:]) elif arg.startswith('-ref:'): referredPage = wikipedia.Page(wikipedia.getSite(), arg[5:]) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links:'): linkingPage = wikipedia.Page(wikipedia.getSite(), arg[7:]) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-file:'): gen = pagegenerators.TextfilePageGenerator(arg[6:]) elif arg.startswith('-cat:'): cat = catlib.Category(wikipedia.getSite(), arg[5:]) gen = pagegenerators.CategorizedPageGenerator(cat) else: pageTitle.append(arg)
|
def main(): #page generator gen = None pageTitle = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'cosmetic_changes') if arg: if arg.startswith('-start:'): gen = pagegenerators.AllpagesPageGenerator(arg[7:]) elif arg.startswith('-ref:'): referredPage = wikipedia.Page(wikipedia.getSite(), arg[5:]) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links:'): linkingPage = wikipedia.Page(wikipedia.getSite(), arg[7:]) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-file:'): gen = pagegenerators.TextfilePageGenerator(arg[6:]) elif arg.startswith('-cat:'): cat = catlib.Category(wikipedia.getSite(), arg[5:]) gen = pagegenerators.CategorizedPageGenerator(cat) else: pageTitle.append(arg) if pageTitle: page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle)) gen = iter([page]) if not gen: wikipedia.showHelp('cosmetic_changes') else: preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = CosmeticChangesBot(preloadingGen) bot.run()
|
wikipedia.showHelp('cosmetic_changes')
|
wikipedia.showHelp()
|
def main(): #page generator gen = None pageTitle = [] for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, 'cosmetic_changes') if arg: if arg.startswith('-start:'): gen = pagegenerators.AllpagesPageGenerator(arg[7:]) elif arg.startswith('-ref:'): referredPage = wikipedia.Page(wikipedia.getSite(), arg[5:]) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links:'): linkingPage = wikipedia.Page(wikipedia.getSite(), arg[7:]) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-file:'): gen = pagegenerators.TextfilePageGenerator(arg[6:]) elif arg.startswith('-cat:'): cat = catlib.Category(wikipedia.getSite(), arg[5:]) gen = pagegenerators.CategorizedPageGenerator(cat) else: pageTitle.append(arg) if pageTitle: page = wikipedia.Page(wikipedia.getSite(), ' '.join(pageTitle)) gen = iter([page]) if not gen: wikipedia.showHelp('cosmetic_changes') else: preloadingGen = pagegenerators.PreloadingGenerator(gen) bot = CosmeticChangesBot(preloadingGen) bot.run()
|
wikipedia.output(u'ERROR: Username for %s:%s is undefined.\nIf you have an account for that site, please add such a line to user-config.py:\n\nusernames[\'%s\'][\'%s\'] = \'myUsername\'')
|
wikipedia.output(u'ERROR: Username for %s:%s is undefined.\nIf you have an account for that site, please add such a line to user-config.py:\n\nusernames[\'%s\'][\'%s\'] = \'myUsername\'' % (self.site.family.name, self.site.lang, self.site.family.name, self.site.lang))
|
def __init__(self, username = None, password = None, site = None): self.site = site or wikipedia.getSite() try: self.username = username or config.usernames[self.site.family.name][self.site.lang] except: wikipedia.output(u'ERROR: Username for %s:%s is undefined.\nIf you have an account for that site, please add such a line to user-config.py:\n\nusernames[\'%s\'][\'%s\'] = \'myUsername\'') sys.exit(1) self.password = password
|
print 'testing'
|
def wikiwrap(self): print 'testing' for subentry in self.subentries: print 'testing' entry='test'
|
|
print 'testing' entry='test'
|
entry=subentry.wikiwrap(self.wikilang) + '\n\n'
|
def wikiwrap(self): print 'testing' for subentry in self.subentries: print 'testing' entry='test'
|
subentry=langheader(wikilang) + '\n'
|
subentry=langheader[wikilang] + '\n'
|
def wikiwrap(self,wikilang): for meaning in self.meanings: subentry=langheader(wikilang) + '\n' # langheader is a dictionary that has the proper way to create a header indicating the language of a subentry for this Wiktionary term=meaning.term subentry+=posheader(wikilang,term.pos) # posheader is a dictionary that has the proper way to create headers indicating part of speech subentry+='\n' subentry+=term.getTerm + ' ' + term.getGender + ' ' + meaning.definition subentry+='\n\n' return subentry
|
subentry+=posheader(wikilang,term.pos)
|
subentry+=posheader[wikilang]
|
def wikiwrap(self,wikilang): for meaning in self.meanings: subentry=langheader(wikilang) + '\n' # langheader is a dictionary that has the proper way to create a header indicating the language of a subentry for this Wiktionary term=meaning.term subentry+=posheader(wikilang,term.pos) # posheader is a dictionary that has the proper way to create headers indicating part of speech subentry+='\n' subentry+=term.getTerm + ' ' + term.getGender + ' ' + meaning.definition subentry+='\n\n' return subentry
|
subentry+=term.getTerm + ' ' + term.getGender + ' ' + meaning.definition
|
subentry= subentry + term.getTerm() + ' ' + term.getGender() + ' ' + meaning.definition()
|
def wikiwrap(self,wikilang): for meaning in self.meanings: subentry=langheader(wikilang) + '\n' # langheader is a dictionary that has the proper way to create a header indicating the language of a subentry for this Wiktionary term=meaning.term subentry+=posheader(wikilang,term.pos) # posheader is a dictionary that has the proper way to create headers indicating part of speech subentry+='\n' subentry+=term.getTerm + ' ' + term.getGender + ' ' + meaning.definition subentry+='\n\n' return subentry
|
return(gender)
|
return(self.gender)
|
def getGender(self): return(gender)
|
apage = WiktionaryEntry('nl',u'iets')
|
apage = WiktionaryEntry('nl',u'iemand')
|
def wikiwrap(self,wikilang): return()
|
frtrans = Noun('fr',u"quelque'chose")
|
frtrans = Noun('fr',u"quelqu'un")
|
def wikiwrap(self,wikilang): return()
|
entrans = Noun('en',u'something')
|
entrans = Noun('en',u'somebody')
|
def wikiwrap(self,wikilang): return()
|
ameaning = Meaning(u'een ding',aword)
|
ameaning = Meaning(u'een persoon',aword)
|
def wikiwrap(self,wikilang): return()
|
t= apage.wikiwrap
|
t=apage.wikiwrap()
|
def wikiwrap(self,wikilang): return()
|
if choice2 == 'y': redir_text = '
|
if choice == 'y': redir_text = '
|
def treat(self, refpl, disambPl): """ Parameters: disambPl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to disambPl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """ if self.mylang in link_trail: linktrail=link_trail[self.mylang] else: linktrail='[a-z]*' trailR=re.compile(linktrail) # The regular expression which finds links. Results consist of three groups: # group(1) is the target page title, that is, everything before | or ]. # group(2) is the alternative link title, that's everything between | and ]. # group(3) is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. linkR=re.compile(r'\[\[([^\]\|]*)(?:\|([^\]]*))?\]\](' + linktrail + ')')
|
if code in ['meta','bs','ru','eo','ja','zh','hi','he','hu','pl','ko','cs','el','sl','ro']:
|
if code in ['meta','bs','ru','eo','ja','zh','hi','he','hu','pl','ko','cs','el','sl','ro','hr','tr']:
|
def code2encoding(code): if code in ['meta','bs','ru','eo','ja','zh','hi','he','hu','pl','ko','cs','el','sl','ro']: return 'utf-8' return 'iso-8859-1'
|
('wpOldTitle', self.title()), ('wpNewTitle', newtitle),
|
('wpOldTitle', self.title().encode(self.site().encoding())), ('wpNewTitle', newtitle.encode(self.site().encoding())),
|
def move(self, newtitle, reason = None, movetalkpage = True, sysop = False): if reason == None: reason = "Pagemove by bot" if self.namespace() // 2 == 1: movetalkpage = False host = self.site().hostname() address = self.site().move_address() self.site().forceLogin(sysop = sysop) token = self.site().getToken(self, sysop = sysop) predata = [ ('wpOldTitle', self.title()), ('wpNewTitle', newtitle), ('wpReason', reason), ] if movetalkpage: predata.append(('wpMovetalk','1')) else: predata.append(('wpMovetalk','0')) if token: predata.append(('wpEditToken', token)) if self.site().hostname() in config.authenticate.keys(): predata.append(("Content-type","application/x-www-form-urlencoded")) predata.append(("User-agent", "PythonWikipediaBot/1.0")) data = urlencode(tuple(predata)) response = urllib2.urlopen(urllib2.Request('http://' + self.site().hostname() + address, data)) data = '' else: data = urlencode(tuple(predata)) conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.5) Gecko/20041107 Firefox/1.0") if self.site().cookies(sysop = sysop): conn.putheader('Cookie', self.site().cookies(sysop = sysop)) conn.endheaders() conn.send(data)
|
v = urllib.quote(v.encode('utf8'))
|
v = urllib.quote(v)
|
def urlencode(query): """This can encode a query so that it can be sent as a query using a http POST request""" l=[] for k, v in query: k = urllib.quote(k) v = urllib.quote(v.encode('utf8')) l.append(k + '=' + v) return '&'.join(l)
|
Returns cookie data if succesful, False otherwise."""
|
Returns cookie data if succesful, None otherwise."""
|
def getCookie(self, remember=True): """Login to wikipedia. remember Remember login (default: True) Returns cookie data if succesful, False otherwise.""" data = {"wpName": self.username, "wpPassword": self.password, "wpLoginattempt": "Aanmelden & Inschrijven", # dutch button label seems to work for all wikis "wpRemember": str(int(bool(remember)))} data = wikipedia.urlencode(data.items()) headers = { "Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0" } pagename = self.site.login_address() conn = httplib.HTTPConnection(self.site.hostname()) conn.request("POST", pagename, data, headers) response = conn.getresponse() conn.close() data = response.read() n=0 Reat=re.compile(': (.*?);') L = [] for eat in response.msg.getallmatchingheaders('set-cookie'): m = Reat.search(eat) if m: n += 1 L.append(m.group(1))
|
return False
|
return None
|
def getCookie(self, remember=True): """Login to wikipedia. remember Remember login (default: True) Returns cookie data if succesful, False otherwise.""" data = {"wpName": self.username, "wpPassword": self.password, "wpLoginattempt": "Aanmelden & Inschrijven", # dutch button label seems to work for all wikis "wpRemember": str(int(bool(remember)))} data = wikipedia.urlencode(data.items()) headers = { "Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0" } pagename = self.site.login_address() conn = httplib.HTTPConnection(self.site.hostname()) conn.request("POST", pagename, data, headers) response = conn.getresponse() conn.close() data = response.read() n=0 Reat=re.compile(': (.*?);') L = [] for eat in response.msg.getallmatchingheaders('set-cookie'): m = Reat.search(eat) if m: n += 1 L.append(m.group(1))
|
filename = 'login-data/%s-%s-%slogin.data' % (self.site.family.name, self.site.lang, self.username)
|
filename = 'login-data/%s-%s-login.data' % (self.site.family.name, self.site.lang)
|
def storecookiedata(self, data): """ Stores cookie data.
|
break
|
return
|
def finish(self, sa = None): """Round up the subject, making any necessary changes. This method should be called exactly once after the todo list has gone empty.
|
loc = len(page)
|
loc = len(text)
|
def spellcheck(page): text = page if correct_html_codes: text = removeHTML(text) loc = 0 while True: wordsearch = re.compile(r'([\s\=\<\>\_]*)([^\s\=\<\>\_]+)') match = wordsearch.search(text,loc) if not match: # No more words on this page break loc += len(match.group(1)) bigword = Word(match.group(2)) smallword = bigword.derive() if not Word(smallword).isCorrect(): replacement = askAlternative(smallword,context=text[max(0,loc-40):loc+len(match.group(2))+40]) if replacement == edit: import gui edit_window = gui.EditBoxWindow() newtxt = edit_window.edit(text,search=smallword) if newtxt: text = newtxt elif replacement == endpage: loc = len(page) else: replacement = bigword.replace(replacement) text = text[:loc] + replacement + text[loc+len(match.group(2)):] loc += len(replacement) else: loc += len(match.group(2)) if correct_html_codes: text = removeHTML(text) return text
|
yield PageLink(mylang, url2link(hit, code = mylang, incode = mylang))
|
yield PageLink(mylang, hit)
|
def allpages(start = '%21%200'): """Iterate over all Wikipedia pages in the home language, starting at the given page.""" start = link2url(start, code = mylang) m=0 while 1: text = getPage(mylang, family.allpagesname(mylang, start), do_quote=0, do_edit=0) #print text if family.version(mylang)=="1.2": R = re.compile('/wiki/(.*?)" *class=[\'\"]printable') else: R = re.compile('title =\"(.*?)\"') n = 0 for hit in R.findall(text): if not ':' in hit: # Some dutch exceptions. if not hit in ['Hoofdpagina','In_het_nieuws']: n = n + 1 yield PageLink(mylang, url2link(hit, code = mylang, incode = mylang)) start = hit + '%20%200' if n < 100: break m += n sys.stderr.write('AllPages: %d done; continuing from "%s";\n'%(m,url2link(start,code='nl',incode='ascii')))
|
def autonomous_problem(pl):
|
def autonomous_problem(pl,reason=''):
|
def autonomous_problem(pl): if autonomous: f=open('autonomous_problem.dat','a') f.write("%s\n"%pl) f.close() sys.exit(1)
|
f.write("%s\n"%pl)
|
f.write("%s {%s}\n"%(pl,reason))
|
def autonomous_problem(pl): if autonomous: f=open('autonomous_problem.dat','a') f.write("%s\n"%pl) f.close() sys.exit(1)
|
newcode,newname=h.split(':') x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None
|
codes,newname=h.split(':') if codes=='all': codes=wikipedia.biglangs else: codes=codes.split(',') for newcode in codes: x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None
|
def autotranslate(pl,arr,same=0): if same: return sametranslate(pl,arr) if hints: for h in hints: newcode,newname=h.split(':') x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None # Autotranslate dates into some other languages, the rest will come from # existing interwiki links. Rdate=re.compile('(\d+)_(%s)'%('|'.join(datetable.keys()))) m=Rdate.match(pl.linkname()) if m: for newcode,fmt in datetable[m.group(2)].items(): newname=fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return # Autotranslate years A.D. Ryear=re.compile('^\d+$') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearADfmt.get(newcode,'%d') newname = fmt%int(m.group(0)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return # Autotranslate years B.C. Ryear=re.compile('^(\d+)_v._Chr.') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearBCfmt.get(newcode) if fmt: newname = fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return
|
Rdate=re.compile('(\d+)_(%s)'%('|'.join(datetable.keys()))) m=Rdate.match(pl.linkname()) if m: for newcode,fmt in datetable[m.group(2)].items(): newname=fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return
|
if mylang==datetablelang: Rdate=re.compile('(\d+)_(%s)'%('|'.join(datetable.keys()))) m=Rdate.match(pl.linkname()) if m: for newcode,fmt in datetable[m.group(2)].items(): newname=fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return
|
def autotranslate(pl,arr,same=0): if same: return sametranslate(pl,arr) if hints: for h in hints: newcode,newname=h.split(':') x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None # Autotranslate dates into some other languages, the rest will come from # existing interwiki links. Rdate=re.compile('(\d+)_(%s)'%('|'.join(datetable.keys()))) m=Rdate.match(pl.linkname()) if m: for newcode,fmt in datetable[m.group(2)].items(): newname=fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return # Autotranslate years A.D. Ryear=re.compile('^\d+$') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearADfmt.get(newcode,'%d') newname = fmt%int(m.group(0)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return # Autotranslate years B.C. Ryear=re.compile('^(\d+)_v._Chr.') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearBCfmt.get(newcode) if fmt: newname = fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return
|
Ryear=re.compile('^(\d+)_v._Chr.') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearBCfmt.get(newcode) if fmt: newname = fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return
|
if mylang=='nl': Ryear=re.compile('^(\d+)_v._Chr.') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearBCfmt.get(newcode) if fmt: newname = fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return
|
def autotranslate(pl,arr,same=0): if same: return sametranslate(pl,arr) if hints: for h in hints: newcode,newname=h.split(':') x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None # Autotranslate dates into some other languages, the rest will come from # existing interwiki links. Rdate=re.compile('(\d+)_(%s)'%('|'.join(datetable.keys()))) m=Rdate.match(pl.linkname()) if m: for newcode,fmt in datetable[m.group(2)].items(): newname=fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return # Autotranslate years A.D. Ryear=re.compile('^\d+$') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearADfmt.get(newcode,'%d') newname = fmt%int(m.group(0)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return # Autotranslate years B.C. Ryear=re.compile('^(\d+)_v._Chr.') m=Ryear.match(pl.linkname()) if m: for newcode in wikipedia.langs: fmt = yearBCfmt.get(newcode) if fmt: newname = fmt%int(m.group(1)) x=wikipedia.PageLink(newcode,newname) if x not in arr: arr[x]=None return
|
for newpl in pl.interwiki(): if newpl not in arr: print "NOTE: from %s we got the new %s"%(pl,newpl) arr[newpl]=None n+=1
|
if bigger(pl): print "NOTE: %s is bigger than %s, not following references"%(pl,inpl) else: for newpl in pl.interwiki(): if newpl not in arr: print "NOTE: from %s we got the new %s"%(pl,newpl) arr[newpl]=None n+=1
|
def treestep(arr,pl,abort_on_redirect=0): assert arr[pl] is None print "Getting %s"%pl n=0 try: text=pl.get() except wikipedia.NoPage: print "---> Does not actually exist" arr[pl]='' return 0 except wikipedia.LockedPage: print "---> Locked" arr[pl]=1 return 0 except wikipedia.IsRedirectPage,arg: if abort_on_redirect and pl.code()==mylang: raise newpl=wikipedia.PageLink(pl.code(),str(arg)) arr[pl]='' print "NOTE: %s is a redirect to %s"%(pl,newpl) if not newpl in arr: arr[newpl]=None return 1 return 0 arr[pl]=text for newpl in pl.interwiki(): if newpl not in arr: print "NOTE: from %s we got the new %s"%(pl,newpl) arr[newpl]=None n+=1 return n
|
if pl.code()==mylang:
|
if pl.code()==mylang and m[pl]:
|
def treesearch(pl): arr={pl:None} # First make one step based on the language itself try: n=treestep(arr,pl,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[pl]: print "Mother doesn't exist" return if untranslated: if len(arr)>1: print "Already has translations" return else: newhint=raw_input("Hint:") if not newhint: return hints.append(newhint) # Then add translations if we survived. autotranslate(pl,arr,same=same) modifications=1 while modifications: modifications=0 for newpl in arr.keys(): if arr[newpl] is None: modifications+=treestep(arr,newpl) return arr
|
autonomous_problem(inpl)
|
autonomous_problem(inpl,'Someone refers to another page with us')
|
def treesearch(pl): arr={pl:None} # First make one step based on the language itself try: n=treestep(arr,pl,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[pl]: print "Mother doesn't exist" return if untranslated: if len(arr)>1: print "Already has translations" return else: newhint=raw_input("Hint:") if not newhint: return hints.append(newhint) # Then add translations if we survived. autotranslate(pl,arr,same=same) modifications=1 while modifications: modifications=0 for newpl in arr.keys(): if arr[newpl] is None: modifications+=treestep(arr,newpl) return arr
|
autonomous_problem(inpl)
|
autonomous_problem(inpl,'There are alternatives in %s'%pl.code())
|
def treesearch(pl): arr={pl:None} # First make one step based on the language itself try: n=treestep(arr,pl,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[pl]: print "Mother doesn't exist" return if untranslated: if len(arr)>1: print "Already has translations" return else: newhint=raw_input("Hint:") if not newhint: return hints.append(newhint) # Then add translations if we survived. autotranslate(pl,arr,same=same) modifications=1 while modifications: modifications=0 for newpl in arr.keys(): if arr[newpl] is None: modifications+=treestep(arr,newpl) return arr
|
autonomous_problem(inname)
|
autonomous_problem(inpl,'removing a language')
|
def treesearch(pl): arr={pl:None} # First make one step based on the language itself try: n=treestep(arr,pl,abort_on_redirect=1) except wikipedia.IsRedirectPage: print "Is redirect page" return if n==0 and not arr[pl]: print "Mother doesn't exist" return if untranslated: if len(arr)>1: print "Already has translations" return else: newhint=raw_input("Hint:") if not newhint: return hints.append(newhint) # Then add translations if we survived. autotranslate(pl,arr,same=same) modifications=1 while modifications: modifications=0 for newpl in arr.keys(): if arr[newpl] is None: modifications+=treestep(arr,newpl) return arr
|
supercats = get_supercats(cat)
|
supercats = get_supercats(cat)[:]
|
def treeview(cat, max_depth, current_depth = 0, supercat = None): ''' Returns a multi-line string which contains a tree view of all subcategories of cat, up to level max_depth. Parameters: * cat - a CatLink which will be the tree's root * current_depth - the current level in the tree (for recursion) * supercat - the CatLink of the category we're coming from * max_depth - the limit beyond which no subcategories will be listed ''' # Translations to say that the current category is in more categories than # the one we're coming from also_in_cats = { 'de': u'(auch in %s)', 'en': u'(also in %s)', } result = '' result += ('#' * current_depth) result += '[[:%s|%s]]' % (cat.linkname(), cat.linkname().split(':', 1)[1]) result += ' (%d)' % len(get_articles(cat)) supercats = get_supercats(cat) # If the current cat is not our tree's root if supercat != None: # Find out which other cats are supercats of the current cat try: supercats.remove(supercat) except: pass if supercats != []: supercat_names = [] for i in range(len(supercats)): # create a list of wiki links to the supercategories supercat_names.append('[[:%s|%s]]' % (supercats[i].linkname(), supercats[i].linkname().split(':', 1)[1])) # print this list, seperated with commas, using translations given in also_in_cats result += ' ' + also_in_cats[wikipedia.mylang] % ', '.join(supercat_names) result += '\n' if current_depth < max_depth: for subcat in get_subcats(cat): result += treeview(subcat, max_depth, current_depth + 1, supercat = cat) else: if get_subcats(cat) != []: result += '#' * (current_depth + 1) + '[...]' return result
|
print 'bla2'
|
def __init__(self, parent = None): print 'bla2' if parent == None: # create a new window parent = Tk() self.myParent = parent
|
|
def getVersionHistory(self, force = False):
|
def getVersionHistory(self, forceReload = False):
|
def getVersionHistory(self, force = False): """ Loads the version history page and returns a list of tuples, where each tuple represents one edit and is built of edit date/time, user name, and edit summary. """ site = self.site() host = site.hostname() url = site.family.version_history_address(site, self.urlname())
|
if not hasattr(self, '_versionhistory') or force = True:
|
if not hasattr(self, '_versionhistory') or forceReload:
|
def getVersionHistory(self, force = False): """ Loads the version history page and returns a list of tuples, where each tuple represents one edit and is built of edit date/time, user name, and edit summary. """ site = self.site() host = site.hostname() url = site.family.version_history_address(site, self.urlname())
|
Page(self, "Wikipedia:Sandbox").get(force = True, sysop = sysop)
|
Page(self, "%s:Sandbox" % self.family.namespace(self.lang, 4)).get(force = True, sysop = sysop)
|
def getToken(self, getalways = True, getagain = False, sysop = False): if getagain or (getalways and ((sysop and not self._sysoptoken) or (not sysop and not self._token))): output(u"Getting page to get a token.") try: Page(self, "Wikipedia:Sandbox").get(force = True, sysop = sysop) except UserBlocked: raise except Error: pass if sysop: if not self._sysoptoken: return False else: return self._sysoptoken else: if not self._token: return False else: return self._token
|
codes, newname=h.split(':')
|
codes, newname = h.split(':', 1)
|
def autotranslate(pl, arr, same=0): if same: return sametranslate(pl, arr) if hints: for h in hints: codes, newname=h.split(':') if codes == 'all': codes = wikipedia.biglangs else: codes = codes.split(',') for newcode in codes: x = wikipedia.PageLink(newcode, newname) if x not in arr: arr[x] = None # Autotranslate dates into some other languages, the rest will come from # existing interwiki links. if wikipedia.mylang == datetablelang: Rdate = re.compile('(\d+)_(%s)' % ('|'.join(datetable.keys()))) m = Rdate.match(pl.linkname()) if m: for newcode, fmt in datetable[m.group(2)].items(): newname = fmt % int(m.group(1)) x = wikipedia.PageLink(newcode,newname) if x not in arr: arr[x] = None return # Autotranslate years A.D. Ryear = re.compile('^\d+$') m = Ryear.match(pl.linkname()) if m: for newcode in wikipedia.seriouslangs: if newcode!='ja': fmt = '%d' newname = fmt%int(m.group(0)) x=wikipedia.PageLink(newcode, newname) if x not in arr: arr[x] = None return # Autotranslate years B.C. if wikipedia.mylang == 'nl': Ryear = re.compile('^(\d+)_v._Chr.') m = Ryear.match(pl.linkname()) if m: for newcode in wikipedia.seriouslangs: fmt = yearBCfmt.get(newcode) if fmt: newname = fmt % int(m.group(1)) x=wikipedia.PageLink(newcode, newname) if x not in arr: arr[x] = None return
|
print data
|
output(data, decoder = code2encoding(mylang))
|
def putPage(code, name, text, comment = None, watchArticle = False, minorEdit = True, newPage = False): """Upload 'text' on page 'name' to the 'code' language wikipedia. Use of this routine can normally be avoided; use PageLink.put instead. """ import httplib put_throttle() host = family.hostname(code) address = family.put_address(code, space2underline(name)) if comment is None: comment=action if not loggedin or code != mylang: comment = username + ' - ' + comment try: text = forCode(text, code) predata = [ ('wpSave', '1'), ('wpSummary', comment), ('wpTextbox1', text)] if newPage and newPage != '0': predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', edittime[code, link2url(name, code)])) if minorEdit and minorEdit != '0': predata.append(('wpMinoredit', '1')) if watchArticle and watchArticle != '0': predata.append(('wpWatchthis', '1')) data = urlencode(tuple(predata)) except KeyError: print edittime raise if debug: print text print address print data return None, None, None output(url2unicode("Changing page %s:%s"%(code,name), language = code)) conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "RobHooftWikiRobot/1.0") if cookies and code == mylang: conn.putheader('Cookie',cookies) conn.endheaders() conn.send(data) response = conn.getresponse() data = response.read() conn.close() print data return response.status, response.reason, data
|
category called newCatTitle. Moves subcategories of oldCat as well. oldCat should be a Category object, newCatTitle should be the new name as a string, without namespace. If newCatTitle is None, the category will be removed.
|
category newCat. Moves subcategories of oldCat as well. oldCat and newCat should be Category objects. If newCat is None, the category will be removed.
|
def change_category(article, oldCat, newCat, comment=None): """ Given an article which is in category oldCat, moves it to category called newCatTitle. Moves subcategories of oldCat as well. oldCat should be a Category object, newCatTitle should be the new name as a string, without namespace. If newCatTitle is None, the category will be removed. """ cats = article.categories() site = article.site() removed = False # Iterate over a copy of the list of categories, as we may # remove elements from the original list while iterating for cat in cats[:]: if cat == oldCat: sortKey = cat.sortKey cats.remove(cat) removed = True if not removed: wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return if newCat is not None: newCat = Category(site, newCat.title(), sortKey = sortKey) cats.append(newCat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text, comment)
|
for cat in cats[:]:
|
for i in range(len(cats)): cat = cats[i]
|
def change_category(article, oldCat, newCat, comment=None): """ Given an article which is in category oldCat, moves it to category called newCatTitle. Moves subcategories of oldCat as well. oldCat should be a Category object, newCatTitle should be the new name as a string, without namespace. If newCatTitle is None, the category will be removed. """ cats = article.categories() site = article.site() removed = False # Iterate over a copy of the list of categories, as we may # remove elements from the original list while iterating for cat in cats[:]: if cat == oldCat: sortKey = cat.sortKey cats.remove(cat) removed = True if not removed: wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return if newCat is not None: newCat = Category(site, newCat.title(), sortKey = sortKey) cats.append(newCat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text, comment)
|
sortKey = cat.sortKey cats.remove(cat) removed = True if not removed: wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return if newCat is not None: newCat = Category(site, newCat.title(), sortKey = sortKey) cats.append(newCat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text, comment)
|
sortKey = cat.sortKey if not newCat: cats = cats[:i] + cats[i+1:] else: newCat = Category(site, newCat.title(), sortKey = sortKey) cats = cats[:i] + [newCat] + cats[i+1:] text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text, comment) return wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return
|
def change_category(article, oldCat, newCat, comment=None): """ Given an article which is in category oldCat, moves it to category called newCatTitle. Moves subcategories of oldCat as well. oldCat should be a Category object, newCatTitle should be the new name as a string, without namespace. If newCatTitle is None, the category will be removed. """ cats = article.categories() site = article.site() removed = False # Iterate over a copy of the list of categories, as we may # remove elements from the original list while iterating for cat in cats[:]: if cat == oldCat: sortKey = cat.sortKey cats.remove(cat) removed = True if not removed: wikipedia.output(u'ERROR: %s is not in category %s!' % (article.aslink(), oldCat.title())) return if newCat is not None: newCat = Category(site, newCat.title(), sortKey = sortKey) cats.append(newCat) text = article.get() text = wikipedia.replaceCategoryLinks(text, cats) article.put(text, comment)
|
else: newURL = '%s://%s/%s' % (self.protocol, self.host, redirTarget)
|
else: directory = self.path[:self.path.rindex('/') + 1] while redirTarget.startswith('../'): redirTarget = redirTarget[3:] directory = directory[:-1] directory = directory[:directory.rindex('/') + 1] newURL = '%s://%s%s%s' % (self.protocol, self.host, directory, redirTarget)
|
def resolveRedirect(self): ''' Requests the header from the server. If the page is an HTTP redirect, returns the redirect target URL as a string. Otherwise returns None. ''' conn = httplib.HTTPConnection(self.host) conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse() newURL = None if response.status >= 300 and response.status <= 399: redirTarget = response.getheader('Location') if redirTarget: if redirTarget.startswith('http://') or redirTarget.startswith('https://'): newURL = redirTarget elif redirTarget.startswith('/'): newURL = '%s://%s%s' % (self.protocol, self.host, redirTarget) else: newURL = '%s://%s/%s' % (self.protocol, self.host, redirTarget) # wikipedia.output(u'%s is a redirect to %s' % (self.url, newURL)) return newURL
|
print num,code
|
def _addlang(self, code, location, namespaces): """Add a new language to the langs and namespaces of the family. This is supposed to be called in the constructor of the family.""" self.langs[code] = location for num, val in namespaces.items(): print num,code self.namespaces[num][code]=val
|
|
if not config.categories_last:
|
if not code in config.categories_last:
|
def replaceCategoryLinks(oldtext, new, code = None): """Replace the category links given in the wikitext given in oldtext by the new links given in new. 'new' should be a list of category pagelink objects. """ if code is None: code = mylang # first remove interwiki links and add them later, so that # interwiki tags appear below category tags if both are set # to appear at the bottom of the article if not code in config.categories_last: interwiki_links = getLanguageLinks(oldtext) oldtext = removeLanguageLinks(oldtext) s = categoryFormat(new) s2 = removeCategoryLinks(oldtext, code) if s: if mylang in config.category_attop: newtext = s + config.category_text_separator + s2 else: newtext = s2 + config.category_text_separator + s else: newtext = s2 # now re-add interwiki links if not config.categories_last: newtext = replaceLanguageLinks(newtext, interwiki_links) return newtext
|
newcat = wikipedia.input('Category to add (do not give namespace) : ') newcat = unicode(newcat, config.console_encoding)
|
newcat = wikipedia.input('Category to add (do not give namespace) : ', encode = True) newcat = newcat
|
def add_category(): print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input('Wikipedia page with list of pages to change: ') if listpage: pl = wikipedia.PageLink(wikipedia.mylang, listpage) pagenames = pl.links() else: refpage = wikipedia.input('Wikipedia page that is now linked to: ') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input('Category to add (do not give namespace) : ') newcat = unicode(newcat, config.console_encoding) newcat = newcat.encode(wikipedia.code2encoding(wikipedia.mylang)) newcat = newcat[:1].capitalize() + newcat[1:] print newcat ns = wikipedia.family.category_namespaces(wikipedia.mylang) catpl = wikipedia.PageLink(wikipedia.mylang, ns[0].encode(wikipedia.code2encoding(wikipedia.mylang))+':'+newcat) print "Will add %s"%catpl.aslocallink() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input("%s [y/n/a(ll)] : "%(pl2.asasciilink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input("This should be used if and only if you are sure that your links are correct !!! Are you sure ? [y/n] : ") if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: print "%s doesn't exit yet. Ignoring."%(pl2.aslocallink()) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) print "WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink()) else: print "Current categories: ",cats if catpl in cats: print "%s already has %s"%(pl.aslocallink(),catpl.aslocallink()) else: cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text, comment = catpl.aslocallink().encode(wikipedia.code2encoding(wikipedia.mylang)))
|
conn = httplib.HTTPConnection(self.host)
|
if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host)
|
def resolveRedirect(self): ''' Requests the header from the server. If the page is an HTTP redirect, returns the redirect target URL as a string. Otherwise returns None. ''' conn = httplib.HTTPConnection(self.host) conn.request('HEAD', '%s%s' % (self.path, self.query), None, self.header) response = conn.getresponse() if response.status >= 300 and response.status <= 399: redirTarget = response.getheader('Location') if redirTarget: if redirTarget.startswith('http://') or redirTarget.startswith('https://'): self.changeUrl(redirTarget) return True elif redirTarget.startswith('/'): self.changeUrl('%s://%s%s' % (self.protocol, self.host, redirTarget)) return True else: # redirect to relative position # cut off filename directory = self.path[:self.path.rindex('/') + 1] # handle redirect to parent directory while redirTarget.startswith('../'): redirTarget = redirTarget[3:] # change /foo/bar/ to /foo/ directory = directory[:-1] directory = directory[:directory.rindex('/') + 1] self.changeUrl('%s://%s%s%s' % (self.protocol, self.host, directory, redirTarget)) return True else: return False # not a redirect
|
conn = httplib.HTTPConnection(self.host)
|
if self.scheme == 'http': conn = httplib.HTTPConnection(self.host) elif self.scheme == 'https': conn = httplib.HTTPSConnection(self.host)
|
def check(self): """ Returns True and the server status message if the page is alive. Otherwise returns false """ try: wasRedirected = self.resolveRedirect() except httplib.error, arg: return False, u'HTTP Error: %s' % arg except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg if wasRedirected: if self.url in self.redirectChain: return False, u'HTTP Redirect Loop: %s' % ' -> '.join(self.redirectChain + [self.url]) elif len(self.redirectChain) >= 19: return False, u'Long Chain of Redirects: %s' % ' -> '.join(self.redirectChain + [self.url]) else: redirChecker = LinkChecker(self.url, self.redirectChain) return redirChecker.check() else: try: conn = httplib.HTTPConnection(self.host) except httplib.error, arg: return False, u'HTTP Error: %s' % arg try: conn.request('GET', '%s%s' % (self.path, self.query), None, self.header) except socket.error, arg: return False, u'Socket Error: %s' % arg except UnicodeEncodeError, arg: return False, u'Non-ASCII Characters in URL: %s' % arg try: response = conn.getresponse() except Exception, arg: return False, u'Error: %s' % arg #wikipedia.output('%s: %s' % (self.url, response.status)) # site down if the server status is between 400 and 499 siteDown = response.status in range(400, 500) return not siteDown, '%s %s' % (response.status, response.reason)
|
edittime[code, link2url(name, code)] = 0
|
edittime[code, link2url(name, code)] = "0"
|
def getPage(code, name, do_edit = 1, do_quote = 1): """Get the contents of page 'name' from the 'code' language wikipedia Do not use this directly; use the PageLink object instead.""" output(url2unicode("Getting page %s:%s"%(code,name), language = code)) host = family.hostname(code) name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) address = family.get_address(code, name) if do_edit: address += '&action=edit&printable=yes' if debug: print host, address # Make sure Brion doesn't get angry by slowing ourselves down. get_throttle() text, charset = getUrl(host,address) # Extract the actual text from the textedit field if do_edit: if debug: print "Raw:", len(text), type(text), text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower() == charset.lower(), "charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code] = charset if code2encoding(code).lower() != charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: edittime[code, link2url(name, code)] = 0 try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: print "BUG: Yikes: No text area.",host,address print repr(text) raise NoPage(code, name) i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(code, name) if debug: print text[i1:i2] m = redirectRe(code).match(text[i1:i2]) if m: output(u"DBG> %s is redirect to %s" % (url2unicode(name, language = code), unicode(m.group(1), code2encoding(code)))) raise IsRedirectPage(m.group(1)) if edittime[code, name] == 0: print "DBG> page may be locked?!" pass #raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] else: x = text # If not editing # Convert to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(charset) try: x,l = decode_func(x) except UnicodeError: print code,name print repr(x) raise return x
|
if edittime[code, name] == 0:
|
if edittime[code, name] == "0":
|
def getPage(code, name, do_edit = 1, do_quote = 1): """Get the contents of page 'name' from the 'code' language wikipedia Do not use this directly; use the PageLink object instead.""" output(url2unicode("Getting page %s:%s"%(code,name), language = code)) host = family.hostname(code) name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) address = family.get_address(code, name) if do_edit: address += '&action=edit&printable=yes' if debug: print host, address # Make sure Brion doesn't get angry by slowing ourselves down. get_throttle() text, charset = getUrl(host,address) # Extract the actual text from the textedit field if do_edit: if debug: print "Raw:", len(text), type(text), text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower() == charset.lower(), "charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code] = charset if code2encoding(code).lower() != charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: edittime[code, link2url(name, code)] = 0 try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: print "BUG: Yikes: No text area.",host,address print repr(text) raise NoPage(code, name) i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(code, name) if debug: print text[i1:i2] m = redirectRe(code).match(text[i1:i2]) if m: output(u"DBG> %s is redirect to %s" % (url2unicode(name, language = code), unicode(m.group(1), code2encoding(code)))) raise IsRedirectPage(m.group(1)) if edittime[code, name] == 0: print "DBG> page may be locked?!" pass #raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] else: x = text # If not editing # Convert to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(charset) try: x,l = decode_func(x) except UnicodeError: print code,name print repr(x) raise return x
|
pass
|
def getPage(code, name, do_edit = 1, do_quote = 1): """Get the contents of page 'name' from the 'code' language wikipedia Do not use this directly; use the PageLink object instead.""" output(url2unicode("Getting page %s:%s"%(code,name), language = code)) host = family.hostname(code) name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) address = family.get_address(code, name) if do_edit: address += '&action=edit&printable=yes' if debug: print host, address # Make sure Brion doesn't get angry by slowing ourselves down. get_throttle() text, charset = getUrl(host,address) # Extract the actual text from the textedit field if do_edit: if debug: print "Raw:", len(text), type(text), text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower() == charset.lower(), "charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code] = charset if code2encoding(code).lower() != charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: edittime[code, link2url(name, code)] = 0 try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: print "BUG: Yikes: No text area.",host,address print repr(text) raise NoPage(code, name) i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(code, name) if debug: print text[i1:i2] m = redirectRe(code).match(text[i1:i2]) if m: output(u"DBG> %s is redirect to %s" % (url2unicode(name, language = code), unicode(m.group(1), code2encoding(code)))) raise IsRedirectPage(m.group(1)) if edittime[code, name] == 0: print "DBG> page may be locked?!" pass #raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] else: x = text # If not editing # Convert to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(charset) try: x,l = decode_func(x) except UnicodeError: print code,name print repr(x) raise return x
|
|
title = url2unicode(title, site = site)
|
title = url2unicode(title, site = insite)
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. # Replace underscores by spaces, also multiple underscores title = re.sub('_+', ' ', title) # Convert HTML entities to unicode title = html2unicode(title) # Convert URL-encoded characters to unicode title = url2unicode(title, site = site) # Remove double spaces title = re.sub(' +', ' ', title) # Remove leading colon if title.startswith(':'): title = title[1:] # Capitalize first letter try: if not site.nocapitalize: title = title[0].upper() + title[1:] except IndexError: # title is empty pass # split up into namespace and rest title = title.split(':', 1) # if the page is not in namespace 0: if len(title) > 1: # translate a default namespace name into the local namespace name for ns in site.family.namespaces.keys(): if title[0] == site.family.namespace('_default', ns): title[0] = site.namespace(ns) # Capitalize the first non-namespace part for ns in site.family.namespaces.keys(): if title[0] == site.namespace(ns): # Remove leading and trailing whitespace from namespace and from rest for i in range(len(title)): title[i] = title[i].strip() if not site.nocapitalize: try: title[1] = title[1][0].upper()+title[1][1:] except IndexError: # title[1] is empty print "WARNING: Strange title %s"%'%3A'.join(title) # In case the part before the colon was not a namespace, we need to # remove leading and trailing whitespace now. title = ':'.join(title).strip() self._title = title self.editRestriction = None self._permalink = None
|
result[insite.getSite(code = lang)] = Page(insite.getSite(code = lang), pagetitle)
|
result[insite.getSite(code = lang)] = Page(insite.getSite(code = lang), pagetitle, insite=insite)
|
def getLanguageLinks(text, insite = None, pageLink = "[[]]"): """ Returns a dictionary with language codes as keys and Page objects as values for each interwiki link found in the text. Do not call this routine directly, use Page objects instead""" if insite == None: insite = getSite() result = {} # Ignore interwiki links within nowiki tags and HTML comments nowikiOrHtmlCommentR = re.compile(r'<nowiki>.*?</nowiki>|<!--.*?-->', re.IGNORECASE | re.DOTALL) match = nowikiOrHtmlCommentR.search(text) while match: text = text[:match.start()] + text[match.end():] match = nowikiOrHtmlCommentR.search(text) # This regular expression will find every link that is possibly an # interwiki link. # NOTE: language codes are case-insensitive and only consist of basic latin # letters and hyphens. interwikiR = re.compile(r'\[\[([a-zA-Z\-]+)\s?:([^\[\]\n]*)\]\]') for lang, pagetitle in interwikiR.findall(text): lang = lang.lower() # Check if it really is in fact an interwiki link to a known # language, or if it's e.g. a category tag or an internal link if lang in insite.family.obsolete: lang = insite.family.obsolete[lang] if lang in insite.family.langs: if '|' in pagetitle: # ignore text after the pipe pagetitle = pagetitle[:pagetitle.index('|')] if not pagetitle: output(u"ERROR: %s - ignoring impossible link to %s:%s" % (pageLink, lang, pagetitle)) else: # we want the actual page objects rather than the titles result[insite.getSite(code = lang)] = Page(insite.getSite(code = lang), pagetitle) return result
|
self.preload(somePages) for refpl in somePages: yield refpl
|
if somePages != []: self.preload(somePages) for refpl in somePages: yield refpl
|
def generate(self): # this array will contain up to 20 pages and will be flushed # after these pages have been preloaded. somePages = [] i = 0 for pl in self.generator.generate(): i += 1 somePages.append(pl) # We don't want to load too many pages at once using XML export. # We only get 20 at a time. if i >= self.pageNumber: self.preload(somePages) for refpl in somePages: yield refpl i = 0 somePages = [] # preload remaining pages self.preload(somePages) for refpl in somePages: yield refpl
|
The URLs are dict's keys, and values are lists of tuples where each tuple
|
The URLs are dictionary keys, and values are lists of tuples where each tuple
|
def run(self): linkChecker = LinkChecker(self.url) try: ok, message = linkChecker.check() except: wikipedia.output('Exception while processing URL %s in page %s' % (self.url, self.page.title())) raise if ok: if self.history.setLinkAlive(self.url): wikipedia.output('*Link to %s in [[%s]] is back alive.' % (self.url, self.page.title())) else: wikipedia.output('*[[%s]] links to %s - %s.' % (self.page.title(), self.url, message)) self.history.setLinkDead(self.url, message, self.page)
|
sure that two LinkCheckerThreads can't access the queue at the same time.
|
sure that two LinkCheckerThreads can not access the queue at the same time.
|
def save(self): """ Saves the .dat file to disk. """ datfile = open(self.datfilename, 'w') self.historyDict = pickle.dump(self.historyDict, datfile) datfile.close()
|
if config.report_dead_links_on_talk:
|
if globalvar.talk:
|
def __init__(self, generator, start ='!'): self.generator = generator self.start = start if config.report_dead_links_on_talk: reportThread = DeadLinkReportThread() # thread dies when program terminates # reportThread.setDaemon(True) reportThread.start() else: reportThread = None self.history = History(reportThread)
|
for arg in wikipedia.handleArgs():
|
args = wikipedia.handleArgs() args = globalvar.handleArgs(args) for arg in args:
|
def main(): gen = None pageTitle = [] for arg in wikipedia.handleArgs(): if arg.startswith('-start:'): start = arg[7:] gen = pagegenerators.AllpagesPageGenerator(start) else: pageTitle.append(arg) if pageTitle: pageTitle = ' '.join(pageTitle) page = wikipedia.Page(wikipedia.getSite(), pageTitle) gen = iter([page]) if gen: gen = pagegenerators.PreloadingGenerator(gen, pageNumber = 240) gen = pagegenerators.RedirectFilterPageGenerator(gen) bot = WeblinkCheckerRobot(gen) try: bot.run() finally: waitTime = 0 # Don't wait longer than 30 seconds for threads to finish. while threading.activeCount() > 2 and waitTime < 30: wikipedia.output(u"Waiting for remaining %i threads to finish, please wait..." % (threading.activeCount() - 2)) # don't count the main thread and report thread # wait 1 second time.sleep(1) waitTime += 1 if threading.activeCount() > 2: wikipedia.output(u'Remaining %i threads will be killed.' % (threading.activeCount() - 2)) # Threads will die automatically because they are daemonic. wikipedia.output(u'Saving history...') bot.history.save() if bot.history.reportThread: bot.history.reportThread.shutdown() # wait until the report thread is shut down; the user can interrupt # it by pressing CTRL-C. #try: try: while bot.history.reportThread.isAlive(): time.sleep(0.1) except KeyboardInterrupt: print 'INTERRUPT' bot.history.reportThread.kill() else: wikipedia.showHelp()
|
site = insite.getSite(code = code) if site in ar: del ar[ar.index(site)] ar2 = ar2 + [site]
|
if code in getSite().family.langs: site = insite.getSite(code = code) if site in ar: del ar[ar.index(site)] ar2 = ar2 + [site]
|
def interwikiFormat(links, insite = None): """Create a suitable string encoding all interwiki links for a wikipedia page. 'links' should be a dictionary with the language names as keys, and either PageLink objects or the link-names of the pages as values. The string is formatted for inclusion in insite (defaulting to your own). """ if insite is None: insite = getSite() if not links: return '' # Security check: site may not refer to itself. for pl in links.values(): if pl.site()==insite: raise ValueError("Trying to add interwiki link to self") s = [] ar = links.keys() ar.sort() putfirst = insite.interwiki_putfirst() if putfirst: #In this case I might have to change the order ar2 = [] for code in putfirst: site = insite.getSite(code = code) if site in ar: del ar[ar.index(site)] ar2 = ar2 + [site] ar = ar2 + ar for site in ar: try: s.append(links[site].aslink()) except AttributeError: s.append('[[%s:%s]]' % (site.linkto(links[site],othersite=insite))) if insite.lang in config.interwiki_on_one_line: sep = ' ' else: sep = '\r\n' s=sep.join(s) + '\r\n' return s
|
if site.category_on_one_line():
|
if Site(default_code).category_on_one_line():
|
def categoryFormat(links, insite = None): """Create a suitable string encoding all category links for a wikipedia page. 'links' should be a list of category pagelink objects. The string is formatted for inclusion in insite. """ if not links: return '' if insite is None: insite.getSite() s = [] for pl in links: s.append(pl.aslink()) if site.category_on_one_line(): sep = ' ' else: sep = '\r\n' s.sort() s=sep.join(s) + '\r\n' return s
|
return self.lang in self.family.category_on_one_line
|
return self.lang in config.category_on_one_line
|
def category_on_one_line(self): return self.lang in self.family.category_on_one_line
|
'zh-yue','zu',]
|
'zh-yue','zu',],
|
def __init__(self): family.Family.__init__(self) self.name = 'wikipedia'
|
login.storecookiedata(cookie, self.site, username)
|
login.storecookiedata(cookie, self.site, self.username)
|
def login(self, anonymous): """Initialises site and username data, or anonymous""" if anonymous: self.site = wikipedia.getSite(user=None) else: self.username = self.options.username or wikipedia.input(u"Username: ", encode=True) self.site = wikipedia.getSite(user=self.username) self.site._fill() # load cookies if not self.site._loggedin: password = getpass.getpass("Password: ") cookie = login.login(self.site, self.username, password) if not cookie: sys.exit("Login failed") login.storecookiedata(cookie, self.site, username) wikipedia.output(u"Login succesful")
|
@staticmethod
|
def handle_edit_conflict(self): fn = os.path.join(tempfile.gettempdir(), self.page) fp = open(fn, 'w') fp.write(new) fp.close() wikipedia.output(u"An edit conflict has arisen. Your edit has been saved to %s. Please try again." % fn)
|
|
hn = underline2space(self.hashname())
|
hn = self.hashname()
|
def get(self): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
|
'aa','ab','af','am','ang','ar','an','roa-rup','as','ast','av','ay',
|
'aa','ab','af','als','am','ang','ar','an','roa-rup','as','ast','av','ay',
|
def __init__(self):
|
'dv','dz','et','el','als','en','es','eo','eu','ee','fa','fo','fj','fr',
|
'dv','dz','et','el','en','es','eo','eu','ee','fa','fo','fj','fr',
|
def __init__(self):
|
if l.code() in ['zh-cn','zh-tw','zh'] and xpl.code() in ['zh-cn','zh-tw']:
|
if pl.code() in ['zh-cn','zh-tw','zh'] and xpl.code() in ['zh-cn','zh-tw']:
|
def reportBacklinks(self, new): """Report missing back links. This will be called from finish() if needed.""" for code in new.keys(): pl = new[code] if not unequal.bigger(self.inpl, pl): shouldlink = new.values() + [self.inpl] linked = pl.interwiki() for xpl in shouldlink: if xpl != pl and not xpl in linked: for l in linked: if l.code() == xpl.code(): print "WARNING:", pl.asasciiselflink(), "does not link to", xpl.asasciilink(), "but to", l.asasciilink() break else: print "WARNING:", pl.asasciiselflink(), "does not link to", xpl.asasciilink() # Check for superfluous links for xpl in linked: # Chinese internal links are ok. if l.code() in ['zh-cn','zh-tw','zh'] and xpl.code() in ['zh-cn','zh-tw']: pass elif not xpl in shouldlink: # Check whether there is an alternative page on that language. for l in shouldlink: if l.code() == xpl.code(): # Already reported above. break else: # New warning print "WARNING:", pl.asasciiselflink(), "links to incorrect", xpl.asasciilink()
|
wikipedia.output(u"NOTE: Ignoring non-disambiguation page %s for %s because disambiguation page %s has already been found."%(pl.aslink(True),self.inpl.aslink(True),self.hasdisambig(pl.site())))
|
wikipedia.output(u"NOTE: Ignoring non-disambiguation page %s for %s because disambiguation page %s has already been found."%(pl.aslink(True),self.inpl.aslink(True),self.hasdisambig(pl.site()).aslink(True)))
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site()
|
wikipedia.output(u"NOTE: Ignoring disambiguation page %s for %s because non-disambiguation page %s has already been found."%(pl.aslink(True),self.inpl.aslink(True),self.hasnondisambig(pl.site())))
|
wikipedia.output(u"NOTE: Ignoring disambiguation page %s for %s because non-disambiguation page %s has already been found."%(pl.aslink(True),self.inpl.aslink(True),self.hasnondisambig(pl.site()).aslink(True)))
|
def workDone(self, counter): """This is called by a worker to tell us that the promised work was completed as far as possible. The only argument is an instance of a counter class, that has methods minus() and plus() to keep counts of the total work todo.""" # Loop over all the pages that should have been taken care of for pl in self.pending: # Mark the page as done self.done[pl] = pl.site()
|
'en' : [u'disambig', u'tLAdisambig', u'disambiguation',u'2LCdisambig'],
|
'en' : [u'disambig', u'LND', u'2LA', u'TLAdisambig', u'disambiguation', u'2LCdisambig', u'4LA', u'acrocandis', u'hndis', u'numberdis', u'roadis'],
|
def __init__(self):
|
def ReadWarnfile(filename, sa):
|
def readWarnfile(filename, sa):
|
def ReadWarnfile(filename, sa): import warnfile reader = warnfile.WarnfileReader(filename) # we won't use removeHints hints = reader.getHints()[0] for pagename in hints.iterkeys(): pl = wikipedia.Page(wikipedia.getSite(), pagename) # The WarnfileReader gives us a list of pagelinks, but titletranslate.py expects a list of strings, so we convert it back. # TODO: This is a quite ugly hack, in the future we should maybe make titletranslate expect a list of pagelinks. hintStrings = [] for hint in hints[pagename]: #lang = hintStrings.append('%s:%s' % (hint.site().language(), hint.linkname())) sa.add(pl, hints = hintStrings)
|
hints = reader.getHints()[0]
|
(hints, removeHints) = reader.getHints()
|
def ReadWarnfile(filename, sa): import warnfile reader = warnfile.WarnfileReader(filename) # we won't use removeHints hints = reader.getHints()[0] for pagename in hints.iterkeys(): pl = wikipedia.Page(wikipedia.getSite(), pagename) # The WarnfileReader gives us a list of pagelinks, but titletranslate.py expects a list of strings, so we convert it back. # TODO: This is a quite ugly hack, in the future we should maybe make titletranslate expect a list of pagelinks. hintStrings = [] for hint in hints[pagename]: #lang = hintStrings.append('%s:%s' % (hint.site().language(), hint.linkname())) sa.add(pl, hints = hintStrings)
|
ReadWarnfile(arg[10:], sa)
|
warnfile = arg[10:]
|
def ReadWarnfile(filename, sa): import warnfile reader = warnfile.WarnfileReader(filename) # we won't use removeHints hints = reader.getHints()[0] for pagename in hints.iterkeys(): pl = wikipedia.Page(wikipedia.getSite(), pagename) # The WarnfileReader gives us a list of pagelinks, but titletranslate.py expects a list of strings, so we convert it back. # TODO: This is a quite ugly hack, in the future we should maybe make titletranslate expect a list of pagelinks. hintStrings = [] for hint in hints[pagename]: #lang = hintStrings.append('%s:%s' % (hint.site().language(), hint.linkname())) sa.add(pl, hints = hintStrings)
|
choice=raw_input("? ")
|
choice=wikipedia.input("?")
|
def choosecats(pagetext): chosen=[] flag=False length=1000 print ("Give the new categories, one per line.") print ("Empty line: if the first, don't change. Otherwise: Ready.") print ("-: I made a mistake, let me start over.") print ("?: Give (more of) the text of the page.") print ("xx: if the first, remove all categories and add no new.") while flag == False: choice=raw_input("? ") if choice=="": flag=True elif choice=="-": chosen=choosecats(pagetext) flag=True elif choice=="?": wikipedia.output(pagetext[0:length]) length = length+500 elif choice=="xx" and chosen==[]: chosen = None flag=True else: chosen.append(choice) return chosen
|
editR = re.compile('<li>.*?<a href=".*?" title=".*?">([^<]*)</a> <span class=\'user\'><a href=".*?" title=".*?">([^<]*?)</a></span>.*?(?:<span class=\'comment\'>(.*?)</span>)?</li>')
|
if self.site().version() < "1.4": editR = re.compile('<li>.*?<a href=".*?" title=".*?">([^<]*)</a> <span class=\'user\'><a href=".*?" title=".*?">([^<]*?)</a></span>.*?(?:<span class=\'comment\'>(.*?)</span>)?</li>') else: editR = re.compile('<li>.*?<a href=".*?" title=".*?">([^<]*)</a> <span class=\'history-user\'><a href=".*?" title=".*?">([^<]*?)</a></span>.*?(?:<span class=\'comment\'>(.*?)</span>)?</li>')
|
def getVersionHistory(self, forceReload = False): """ Loads the version history page and returns a list of tuples, where each tuple represents one edit and is built of edit date/time, user name, and edit summary. """ site = self.site() path = site.family.version_history_address(self.site().language(), self.urlname())
|
s = categoryFormat(new, site = site)
|
s = categoryFormat(new, insite = site)
|
def replaceCategoryLinks(oldtext, new, site = None): """Replace the category links given in the wikitext given in oldtext by the new links given in new. 'new' should be a list of category pagelink objects. """ if site is None: site = getSite() # first remove interwiki links and add them later, so that # interwiki tags appear below category tags if both are set # to appear at the bottom of the article if not site.lang in config.categories_last: interwiki_links = getLanguageLinks(oldtext, insite = site) oldtext = removeLanguageLinks(oldtext, site = site) s = categoryFormat(new, site = site) s2 = removeCategoryLinks(oldtext, site = site) if s: if site.lang in config.category_attop: newtext = s + config.category_text_separator + s2 else: newtext = s2 + config.category_text_separator + s else: newtext = s2 # now re-add interwiki links if not site.lang in config.categories_last: newtext = replaceLanguageLinks(newtext, interwiki_links) return newtext
|
title = url2unicode(title, site = insite)
|
title = url2unicode(title, site = insite, site2 = site)
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki if not insite: insite = site
|
line = line.split(' ') pid = int(line[0]) ptime = int(line[1].split('.')[0]) if now - ptime <= self.releasepid: if now - ptime <= self.dropdelay and pid != self.pid: count += 1 processes[pid] = ptime if pid >= my_pid: my_pid = pid+1
|
try: line = line.split(' ') pid = int(line[0]) ptime = int(line[1].split('.')[0]) if now - ptime <= self.releasepid: if now - ptime <= self.dropdelay and pid != self.pid: count += 1 processes[pid] = ptime if pid >= my_pid: my_pid = pid+1 except IndexError: pass
|
def checkMultiplicity(self): processes = {} my_pid = 1 count = 1 try: f = open('throttle.log','r') except IOError: if not self.pid: pass else: raise else: now = time.time() for line in f.readlines(): line = line.split(' ') pid = int(line[0]) ptime = int(line[1].split('.')[0]) if now - ptime <= self.releasepid: if now - ptime <= self.dropdelay and pid != self.pid: count += 1 processes[pid] = ptime if pid >= my_pid: my_pid = pid+1 if not self.pid: self.pid = my_pid self.checktime = time.time() processes[self.pid] = self.checktime f = open('throttle.log','w') for p in processes.keys(): f.write(str(p)+' '+str(processes[p])+'\n') f.close() self.process_multiplicity = count print("Checked for running processes. %s processes currently running, "%count + "including the current process.")
|
def url2unicode(title, site): try: t = title.encode(site.encoding()) t = urllib.unquote(t) return unicode(t, site.encoding()) except UnicodeError: for enc in site.encodings(): try: t = title.encode(enc) t = urllib.unquote(t) return unicode(t, enc) except UnicodeError: pass raise
|
def url2unicode(title, site, site2 = None): encList = [site.encoding()] + list(site.encodings()) if site2 and site2 <> site: encList.append(site.encoding()) encList += list(site2.encodings()) firstException = None for enc in encList: try: t = title.encode(enc) t = urllib.unquote(t) return unicode(t, enc) except UnicodeError, ex: if not firstException: firstException = ex pass raise firstException
|
def url2unicode(title, site): try: t = title.encode(site.encoding()) t = urllib.unquote(t) return unicode(t, site.encoding()) except UnicodeError: # try to handle all encodings (will probably retry utf-8) for enc in site.encodings(): try: t = title.encode(enc) t = urllib.unquote(t) return unicode(t, enc) except UnicodeError: pass # Couldn't convert, raise the original exception raise
|
except wikipedia.isRedirectPage:
|
except wikipedia.IsRedirectPage:
|
def workon(page): try: text = page.get() except wikipedia.isRedirectPage: return # Here go edit text in whatever way you want. If you find you do not # want to edit this page, just return if text != page.get(): page.put(text) # Adding a summary text would be good
|
for arg in wikipedia.handleArgs: start.append(arg)
|
test = False for arg in wikipedia.handleArgs(): if arg.startswith("-test"): test = True else: start.append(arg)
|
def workon(page): try: text = page.get() except wikipedia.isRedirectPage: return # Here go edit text in whatever way you want. If you find you do not # want to edit this page, just return if text != page.get(): page.put(text) # Adding a summary text would be good
|
putPage(safetuple[0], safetuple[1], safetuple[2], comment=safetuple[3], watchArticle=safetuple[4], minorEdit=safetuple[5], newPage=safetuple[6], token=None,gettoken=True, sysop=safetuple[7])
|
self.putPage(safetuple[0], comment=safetuple[1], watchArticle=safetuple[2], minorEdit=safetuple[3], newPage=safetuple[4], token=None, gettoken=True, sysop=safetuple[5])
|
def putPage(self, text, comment = None, watchArticle = False, minorEdit = True, newPage = False, token = None, gettoken = False, sysop = False): """ Upload 'text' as new contents for this Page by filling out the edit page. Don't use this directly, use put() instead. """ safetuple = () # safetuple keeps the old value, but only if we did not get a token yet could if self.site().version() >= "1.4": if gettoken or not token: token = self.site().getToken(getagain = gettoken, sysop = sysop) else: safetuple = (text, comment, watchArticle, minorEdit, newPage, sysop) # Check whether we are not too quickly after the previous putPage, and # wait a bit until the interval is acceptable put_throttle() # Which web-site host are we submitting to? host = self.site().hostname() # Get the address of the page on that host. address = self.site().put_address(self.urlname()) # If no comment is given for the change, use the default if comment is None: comment=action # Use the proper encoding for the comment comment = comment.encode(self.site().encoding()) # Encode the text into the right encoding for the wiki text = text.encode(self.site().encoding()) predata = [ ('wpSave', '1'), ('wpSummary', comment), ('wpTextbox1', text)] # Except if the page is new, we need to supply the time of the # previous version to the wiki to prevent edit collisions if newPage: predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', self._editTime)) # Pass the minorEdit and watchArticle arguments to the Wiki. if minorEdit: predata.append(('wpMinoredit', '1')) if watchArticle: predata.append(('wpWatchthis', '1')) # Give the token, but only if one is supplied. if token: predata.append(('wpEditToken', token)) # Encode all of this into a HTTP request data = urlencode(tuple(predata)) if newPage: output('Creating page %s' % self.aslink()) else: output('Changing page %s' % self.aslink()) # Submit the prepared information conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "PythonWikipediaBot/1.0") if self.site().cookies(): conn.putheader('Cookie', self.site().cookies(sysop = sysop)) conn.endheaders() conn.send(data) # Prepare the return values try: response = conn.getresponse() except httplib.BadStatusLine, line: raise PageNotSaved('Bad status line: %s' % line) data = response.read().decode(self.site().encoding()) conn.close() if data != u'': # Saving unsuccessful. Possible reasons: edit conflict or invalid edit token. editconflict = mediawiki_messages.get('editconflict', site = self.site()).replace('$1', '') if '<title>%s' % editconflict in data: raise EditConflict(u'An edit conflict has occured.') elif safetuple and "<" in data: # We might have been using an outdated token print "Changing page has failed. Retrying." putPage(safetuple[0], safetuple[1], safetuple[2], comment=safetuple[3], watchArticle=safetuple[4], minorEdit=safetuple[5], newPage=safetuple[6], token=None,gettoken=True, sysop=safetuple[7]) else: output(data) return response.status, response.reason, data
|
pagename = wikipedia.input('Which page to check:', encode = True)
|
pagename = wikipedia.input('Which page to check:')
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
wikipedia.setAction(msg_redir[wikipedia.chooselang(wikipedia.mylang,msg_redir)]+': '+wrd)
|
wikipedia.setAction(msg_redir[wikipedia.chooselang(wikipedia.mylang,msg_redir)] + u': ' + wrd)
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
wikipedia.setAction(msg[wikipedia.chooselang(wikipedia.mylang,msg)]+': '+wrd)
|
wikipedia.setAction(msg[wikipedia.chooselang(wikipedia.mylang,msg)] + u': ' + wrd)
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
alternatives.append(str(thispl.getRedirectTo()))
|
target = str(thispl.getRedirectTo()) target = unicode(target, wikipedia.myencoding()) alternatives.append(target)
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
wikipedia.output("%3d - %s" % (i, alternatives[i]), wikipedia.myencoding())
|
wikipedia.output("%3d - %s" % (i, alternatives[i]))
|
def makepath(path): """ creates missing directories for the given path and returns a normalized absolute version of the path. - if the given path already exists in the filesystem the filesystem is not modified. - otherwise makepath creates directories along the given path using the dirname() of the path. You may append a '/' to the path if you want it to be a directory path. from [email protected] 2002/03/18 """ from os import makedirs from os.path import normpath,dirname,exists,abspath dpath = normpath(dirname(path)) if not exists(dpath): makedirs(dpath) return normpath(abspath(path))
|
templateName = self.template.title().split(':', 1)[1] templateRegex = re.compile('\{\{([mM][sS][gG]:)?[' + templateName[0].upper() + templateName[0].lower() + ']' + templateName[1:] + '}}')
|
templateName = self.template.titleWithoutNamespace() if wikipedia.getSite().nocapitalize: old = self.old else: templateName = '[' + templateName[0].upper() + templateName[0].lower() + ']' + templateName[1:] templateName = re.sub(' ', '[_ ]', templateName) templateRegex = re.compile(r'\{\{([mM][sS][gG]:)?' + templateName + '(?P<parameters>\|[^}]+|)}}')
|
def __iter__(self): import xmlreader mysite = wikipedia.getSite() dump = xmlreader.XmlDump(self.xmlfilename) # regular expression to find the original template. # {{msg:vfd}} does the same thing as {{msg:Vfd}}, so both will be found. # The new syntax, {{vfd}}, will also be found. templateName = self.template.title().split(':', 1)[1] templateRegex = re.compile('\{\{([mM][sS][gG]:)?[' + templateName[0].upper() + templateName[0].lower() + ']' + templateName[1:] + '}}') for entry in dump.parse(): if templateRegex.search(entry.text): page = wikipedia.Page(mysite, entry.title) yield page
|
if not wikipedia.getSite().nocapitalize:
|
if wikipedia.getSite().nocapitalize: old = self.old else:
|
def run(self): # regular expression to find the original template. # {{msg:vfd}} does the same thing as {{msg:Vfd}}, so both will be found. # The new syntax, {{vfd}}, will also be found. # The group 'parameters' will either match the parameters, or an # empty string if there are none. if not wikipedia.getSite().nocapitalize: old = '[' + self.old[0].upper() + self.old[0].lower() + ']' + self.old[1:] else: old = self.old old = re.sub('[_ ]', '[_ ]', old) templateR=re.compile(r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}') replacements = [] if self.remove: replacements.append((templateR, '')) elif self.resolve: replacements.append((templateR, '{{subst:' + self.old + '\g<parameters>}}')) else: replacements.append((templateR, '{{' + self.new + '\g<parameters>}}')) replaceBot = replace.ReplaceRobot(self.generator, replacements) replaceBot.run()
|
else: old = self.old
|
def run(self): # regular expression to find the original template. # {{msg:vfd}} does the same thing as {{msg:Vfd}}, so both will be found. # The new syntax, {{vfd}}, will also be found. # The group 'parameters' will either match the parameters, or an # empty string if there are none. if not wikipedia.getSite().nocapitalize: old = '[' + self.old[0].upper() + self.old[0].lower() + ']' + self.old[1:] else: old = self.old old = re.sub('[_ ]', '[_ ]', old) templateR=re.compile(r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}') replacements = [] if self.remove: replacements.append((templateR, '')) elif self.resolve: replacements.append((templateR, '{{subst:' + self.old + '\g<parameters>}}')) else: replacements.append((templateR, '{{' + self.new + '\g<parameters>}}')) replaceBot = replace.ReplaceRobot(self.generator, replacements) replaceBot.run()
|
|
templateR=re.compile(r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}')
|
templateRegex = re.compile(r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}')
|
def run(self): # regular expression to find the original template. # {{msg:vfd}} does the same thing as {{msg:Vfd}}, so both will be found. # The new syntax, {{vfd}}, will also be found. # The group 'parameters' will either match the parameters, or an # empty string if there are none. if not wikipedia.getSite().nocapitalize: old = '[' + self.old[0].upper() + self.old[0].lower() + ']' + self.old[1:] else: old = self.old old = re.sub('[_ ]', '[_ ]', old) templateR=re.compile(r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}') replacements = [] if self.remove: replacements.append((templateR, '')) elif self.resolve: replacements.append((templateR, '{{subst:' + self.old + '\g<parameters>}}')) else: replacements.append((templateR, '{{' + self.new + '\g<parameters>}}')) replaceBot = replace.ReplaceRobot(self.generator, replacements) replaceBot.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.