rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
class Error(Exception): """Wikipedia error""" class NoPage(Error): """Wikipedia page does not exist""" class IsRedirectPage(Error): """Wikipedia page is a redirect page""" class IsNotRedirectPage(Error): """Wikipedia page is not a redirect page""" class LockedPage(Error): """Wikipedia page is locked""" class NoSuchEntity(ValueError): """No entity exist for this character""" class SectionError(ValueError): """The section specified by class NoNamespace(Error): """Wikipedia page is not in a special namespace""" SaxError = xml.sax._exceptions.SAXParseException class PageLink: """A Wikipedia page link.""" def __init__(self, code, title = None, incode = None): """ Constructor. Normally called with two arguments: Parameters: 1) The language code of the wiki on which the page resides 2) The title of the page as a unicode string The argument incode can be specified to help decode the name; it is the language where this link was found. """ self._code = code title = title.strip() if title and title[0]==':': title = title[1:] self._urlname = link2url(title, self._code, incode = incode) self._linkname = url2link(self._urlname, code = self._code, incode = mylang) def code(self): """The code for the language of the page this PageLink refers to, without :""" return self._code def encoding(self): """ Returns the character encoding used on this page's wiki. """ return code2encoding(self._code) def urlname(self): """The name of the page this PageLink refers to, in a form suitable for the URL of the page.""" return self._urlname def linkname(self): """The name of the page this PageLink refers to, in a form suitable for a wiki-link""" return self._linkname def catname(self): """The name of the page without the namespace part. Gives an error if the page is from the main namespace.""" title=self.linkname() parts=title.split(':') parts=parts[1:] if parts==[]: raise NoNamespace(self) return ':'.join(parts) def hashname(self): """The name of the section this PageLink refers to. Sections are denominated by a None is returned.""" ln = self.linkname() ln = re.sub('& if not ' return None else: hn = ln[ln.find(' hn = re.sub('&hash;', '& return hn def hashfreeLinkname(self): hn=self.hashname() if hn: return self.linkname()[:-len(hn)-1] else: return self.linkname() def ascii_linkname(self): """Make a link-name that contains only ascii characters""" return url2link(self._urlname, code = self._code, incode = 'ascii') def __str__(self): """A simple ASCII representation of the pagelink""" return "%s:%s" % (self._code, self.ascii_linkname()) def __repr__(self): """A more complete string representation""" return "%s{%s}" % (self.__class__.__name__, str(self)) def aslink(self): """A string representation in the form of an interwiki link""" return "[[%s:%s]]" % (self.code(), self.linkname()) def aslocallink(self): """A string representation in the form of a local link""" return "[[%s]]" % (self.linkname()) def asselflink(self): """A string representation in the form of a local link, but prefixed by the language code""" return "%s:[[%s]]" % (self.code(), self.linkname()) def get(self, read_only = False): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code: NoPage: The page does not exist IsRedirectPage: The page is a redirect. The argument of the exception is the page it redirects to. LockedPage: The page is locked, and therefore it won't be possible to change the page. This exception won't be raised if the argument read_only is True. SectionError: The subject does not exist on a page with a """ if hasattr(self, '_redirarg'): raise IsRedirectPage,self._redirarg if hasattr(self, '_getexception'): raise self._getexception if not hasattr(self, '_contents'): try: self._contents = getPage(self.code(), self.urlname(), read_only = read_only) hn = self.hashname() if hn: hn = underline2space(hn) m = re.search("== *%s *==" % hn, self._contents) if not m: output("WARNING: Hashname does not exist: %s" % self) except NoPage: self._getexception = NoPage raise except IsRedirectPage,arg: self._getexception = IsRedirectPage self._redirarg = arg raise except LockedPage: self._getexception = LockedPage raise except SectionError: self._getexception = SectionError raise return self._contents def exists(self): """True if the page exists (itself or as redirect), False if not""" try: self.get(read_only = True) except NoPage: return False except IsRedirectPage: return True except SectionError: return False return True def isRedirectPage(self): """True if the page is a redirect page, False if not or not existing""" try: self.get(read_only = True) except NoPage: return False except IsRedirectPage: return True return False def isEmpty(self): """True if the page except for language links and category links has less than 4 characters, False otherwise. Can return the same exceptions as get() """ txt = self.get(read_only = True) txt = removeLanguageLinks(txt) txt = removeCategoryLinks(txt, self.code()) if len(txt) < 4: return 1 else: return 0 def put(self, newtext, comment=None, watchArticle = False, minorEdit = True): """Replace the new page with the contents of the first argument. The second argument is a string that is to be used as the summary for the modification """ if self.exists(): newPage="0" else: newPage="1" return putPage(self.code(), self.urlname(), newtext, comment, watchArticle, minorEdit, newPage) def interwiki(self): """A list of interwiki links in the page. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method. The return value is a list of PageLink objects for each of the interwiki links in the page text. """ result = [] ll = getLanguageLinks(self.get(read_only = True), incode = self.code()) for newcode,newname in ll.iteritems(): if newname[0] == ':': print "ERROR> link from %s to %s:%s has leading :?!"%(self,newcode,repr(newname)) if newname[0] == ' ': print "ERROR> link from %s to %s:%s has leading space?!"%(self,newcode,repr(newname)) try: result.append(self.__class__(newcode, newname, incode = self.code())) except UnicodeEncodeError: print "ERROR> link from %s to %s:%s is invalid encoding?!"%(self,newcode,repr(newname)) except NoSuchEntity: print "ERROR> link from %s to %s:%s contains invalid character?!"%(self,newcode,repr(newname)) except ValueError: print "ERROR> link from %s to %s:%s contains invalid unicode reference?!"%(self,newcode,repr(newname)) return result def categories(self): """A list of categories that the article is in. This will retrieve the page text to do its work, so it can raise the same exceptions that are raised by the get() method. The return value is a list of PageLink objects for each of the category links in the page text.""" result = [] ll = getCategoryLinks(self.get(read_only = True), self.code()) for catname in ll: result.append(self.__class__(self.code(), title = catname)) return result def __cmp__(self, other): """Pseudo method to be able to use equality and inequality tests on PageLink objects""" if not hasattr(other, 'code'): return -1 if not self.code() == other.code(): return cmp(self.code(), other.code()) u1=html2unicode(self.linkname(), language = self.code()) u2=html2unicode(other.linkname(), language = other.code()) return cmp(u1,u2) def __hash__(self): """Pseudo method that makes it possible to store PageLink objects as keys in hash-tables. This relies on the fact that the string representation of an instance can not change after the construction. """ return hash(str(self)) def links(self): """Gives the normal (not-interwiki, non-category) pages the page links to, as a list of strings """ result = [] try: thistxt = removeLanguageLinks(self.get(read_only = True)) except IsRedirectPage: return thistxt = removeCategoryLinks(thistxt, self.code()) w=r'([^\]\|]*)' Rlink = re.compile(r'\[\['+w+r'(\|'+w+r')?\]\]') for l in Rlink.findall(thistxt): result.append(l[0]) return result def imagelinks(self): """Gives the wiki-images the page shows, as a list of strings """ result = [] im=family.image_namespace(self._code) + ':' w1=r'('+im+'[^\]\|]*)' w2=r'([^\]]*)' Rlink = re.compile(r'\[\['+w1+r'(\|'+w2+r')?\]\]') for l in Rlink.findall(self.get(read_only = True)): result.append(PageLink(self._code,l[0])) w1=r'('+im.lower()+'[^\]\|]*)' w2=r'([^\]]*)' Rlink = re.compile(r'\[\['+w1+r'(\|'+w2+r')?\]\]') for l in Rlink.findall(self.get(read_only = True)): result.append(PageLink(self._code,l[0])) return result def getRedirectTo(self, read_only = False): """ If the page is a redirect page, gives the title of the page it redirects to. Otherwise it will raise an IsNotRedirectPage exception. This function can raise a NoPage exception, and unless the argument read_only is True, a LockedPage exception as well. """ try: self.get(read_only = True) except NoPage: raise NoPage(self) except LockedPage: raise LockedPage(self) except IsRedirectPage, arg: return str(arg) else: raise IsNotRedirectPage(self) def delete(pl, reason = None, prompt = True): """Deletes the page from the wiki. Requires administrator status. If reason is None, asks for a reason. If prompt is True, asks the user if he wants to delete the page. """ def post_multipart(host, selector, fields): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files. Return the server's response page. """ import httplib content_type, body = encode_multipart_formdata(fields) h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('content-type', content_type) h.putheader('content-length', str(len(body))) h.putheader("User-agent", "RobHooftWikiRobot/1.0") h.putheader('Host', host) h.putheader('Cookie', cookies) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read() def encode_multipart_formdata(fields): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body if reason == None: reason = input(u'Please enter a reason for the deletion:', myencoding()) answer = 'y' if prompt: answer = input(u'Do you want to delete %s? [y|N]' % pl.linkname()) if answer in ['y', 'Y']: output(u'Deleting page %s...' % pl.linkname()) returned_html = post_multipart(family.hostname(mylang), family.delete_address(pl.urlname()), (('wpReason', reason), ('wpConfirm', '1'))) deleted_msg = mediawiki_messages.get('actioncomplete') deleted_msg = re.escape(deleted_msg) deleted_msgR = re.compile(deleted_msg) if deleted_msgR.search(returned_html): output(u'Deletion successful.') else: output(u'Deletion failed:.') try: ibegin = returned_html.index('<!-- start content -->') + 22 iend = returned_html.index('<!-- end content -->') except ValueError: output(returned_html, myencoding()) else: returned_html = returned_html[ibegin:iend] output(returned_html, myencoding()) def redirectRe(code): if family.redirect.has_key(code): txt = '(?:redirect|'+family.redirect[code]+')' else: txt = 'redirect' return re.compile(r'\ class WikimediaXmlHandler(xml.sax.handler.ContentHandler): def setCallback(self, callback): self.callback = callback def startElement(self, name, attrs): self.destination = None if name == 'page': self.text=u'' self.title=u'' self.timestamp=u'' elif name == 'text': self.destination = 'text' elif name == 'title': self.destination = 'title' elif name == 'timestamp': self.destination = 'timestamp' def endElement(self, name): if name == 'revision': text = self.text while text and text[-1] in '\n ': text = text[:-1] text = u'\r\n'.join(text.split('\n')) timestamp = (self.timestamp[0:4]+ self.timestamp[5:7]+ self.timestamp[8:10]+ self.timestamp[11:13]+ self.timestamp[14:16]+ self.timestamp[17:19]) self.callback(self.title.strip(), timestamp, text) def characters(self, data): if self.destination == 'text': self.text += data elif self.destination == 'title': self.title += data elif self.destination == 'timestamp': self.timestamp += data class GetAll: debug = 0 def __init__(self, code, pages): self.code = code self.pages = [] for pl in pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): self.pages.append(pl) else: output(u"BUGWARNING: %s already done!" % pl.aslink()) def run(self): data = self.getData() handler = WikimediaXmlHandler() handler.setCallback(self.oneDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print "Dumped invalid XML to sax_parse_bug.dat" raise for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): pl._getexception = NoPage elif hasattr(pl,'_contents') and pl.code()=="eo": for c in 'C','G','H','J','S','U': for c2 in c,c.lower(): for x in 'X','x': pl._contents = pl._contents.replace(c2+x,c2+x+x) def oneDone(self, title, timestamp, text): pl = PageLink(self.code, title) for pl2 in self.pages: if PageLink(self.code, pl2.hashfreeLinkname()) == pl: if not hasattr(pl2,'_contents') and not hasattr(pl2,'_getexception'): break else: print repr(title) print repr(pl) print repr(self.pages) print "BUG> bug, page not found in list" if self.debug: xtext = pl2.get(read_only = True) if text != xtext: print " import difflib for line in difflib.ndiff(xtext.split('\r\n'), text.split('\r\n')): if line[0] in ['+', '-']: print repr(line)[2:-1] if edittime[self.code, link2url(title, self.code)] != timestamp: print " print "-",edittime[self.code, link2url(title, self.code)] print "+",timestamp else: m = redirectRe(self.code).match(text) if m: pl2._getexception = IsRedirectPage(m.group(1)) else: if len(text)<50: output(u"DBG> short text in %s:" % pl2.aslink()) output(text) hn = pl2.hashname() if hn: m = re.search("== *%s *==" % hn, text) if not m: output("WARNING: Hashname does not exist: %s" % self) else: pl2._contents = text edittime[self.code, link2url(title, self.code)] = timestamp else: pl2._contents = text edittime[self.code, link2url(title, self.code)] = timestamp def getData(self): import httplib addr = family.export_address(self.code) pagenames = u'\r\n'.join([x.hashfreeLinkname() for x in self.pages]) pagenames = forCode(pagenames, self.code) data = urlencode(( ('action', 'submit'), ('pages', pagenames), ('curonly', 'True'), )) headers = {"Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0"} get_throttle(requestsize = len(self.pages)) conn = httplib.HTTPConnection(family.hostname(self.code)) conn.request("POST", addr, data, headers) response = conn.getresponse() data = response.read() conn.close() return data def getall(code, pages): print u'Getting %d pages from %s:' % (len(pages), code) return GetAll(code, pages).run() def PageLinksFromFile(fn): """Read a file of page links between double-square-brackets, and return them as a list of PageLink objects. 'fn' is the name of the file that should be read.""" f=open(fn, 'r') R=re.compile(r'\[\[([^:]*):([^\]]*)\]\]') for line in f.readlines(): m=R.match(line) if m: yield PageLink(m.group(1), m.group(2)) else: print "ERROR: Did not understand %s line:\n%s" % (fn, repr(line)) f.close() def unescape(s): """Replace escaped HTML-special characters by their originals""" if '&' not in s: return s s = s.replace("<", "<") s = s.replace(">", ">") s = s.replace("'", "'") s = s.replace(""", '"') s = s.replace("&", "&") return s def setAction(s): """Set a summary to use for changed page submissions""" global action action = s setAction('Wikipedia python library') def urlencode(query): """This can encode a query so that it can be sent as a query using a http POST request""" l=[] for k, v in query: if debug: print "k =", k print "v =", v k = urllib.quote(k) v = urllib.quote(v) l.append(k + '=' + v) return '&'.join(l) Rmorespaces = re.compile(' +') def space2underline(name): name = Rmorespaces.sub(' ', name) return name.replace(' ', '_') Rmoreunderlines = re.compile('__+') def underline2space(name): name = Rmoreunderlines.sub('_', name) return name.replace('_', ' ') import time class Throttle: def __init__(self, delay = config.throttle, ignore = 0): """Make sure there are at least 'delay' seconds between page-gets after 'ignore' initial page-gets""" self.delay = delay self.ignore = ignore self.now = 0 self.next_multiplicity = 1.0 def setDelay(self, delay = config.throttle): self.delay = delay def waittime(self): """Calculate the time in seconds we will have to wait if a query would be made right now""" if self.ignore > 0: return 0.0 thisdelay = self.next_multiplicity * self.delay now = time.time() ago = now - self.now if ago < thisdelay: delta = thisdelay - ago return delta else: return 0.0 def __call__(self, requestsize = 1): """This is called from getPage without arguments. It will make sure that if there are no 'ignores' left, there are at least delay seconds since the last time it was called before it returns.""" if self.ignore > 0: self.ignore -= 1 else: waittime = self.waittime() import math self.next_multiplicity = math.log(1+requestsize)/math.log(2.0) if waittime > config.noisysleep: print "Sleeping for %.1f seconds" % waittime time.sleep(waittime) self.now = time.time() get_throttle = Throttle() put_throttle = Throttle(config.put_throttle) def putPage(code, name, text, comment = None, watchArticle = False, minorEdit = True, newPage = False): """Upload 'text' on page 'name' to the 'code' language wikipedia. Use of this routine can normally be avoided; use PageLink.put instead. """ import httplib put_throttle() host = family.hostname(code) address = family.put_address(code, space2underline(name)) if comment is None: comment=action if not loggedin or code != mylang: comment = username + ' - ' + comment comment = comment.encode(code2encoding(code)) try: text = forCode(text, code) predata = [ ('wpSave', '1'), ('wpSummary', comment), ('wpTextbox1', text)] if newPage and newPage != '0': predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', edittime[code, link2url(name, code)])) if minorEdit and minorEdit != '0': predata.append(('wpMinoredit', '1')) if watchArticle and watchArticle != '0': predata.append(('wpWatchthis', '1')) data = urlencode(tuple(predata)) except KeyError: print edittime raise if debug: print text print address print data return None, None, None output(url2unicode("Changing page %s:%s"%(code,name), language = code)) conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "RobHooftWikiRobot/1.0") if cookies and code == mylang: conn.putheader('Cookie',cookies) conn.endheaders() conn.send(data) response = conn.getresponse() data = response.read() conn.close() if data != '': output(data, decoder = myencoding()) return response.status, response.reason, data def forCode(text, code): """Prepare the unicode string 'text' for inclusion into a page for language 'code'. All of the characters in the text should be encodable, otherwise this will fail! This condition is normally met, except if you would copy text verbatim from an UTF-8 language into a iso-8859-1 language, and none of the robots in the package should do such things""" if type(text) == type(u''): text = text.encode(code2encoding(code)) return text class MyURLopener(urllib.FancyURLopener): version="RobHooftWikiRobot/1.0" def getUrl(host,address): """Low-level routine to get a URL from wikipedia. host and address are the host and address part of a http url. Return value is a 2-tuple of the text of the page, and the character set used to encode it. """ uo = MyURLopener() if cookies: uo.addheader('Cookie', cookies) f = uo.open('http://%s%s'%(host, address)) text = f.read() ct = f.info()['Content-Type'] R = re.compile('charset=([^\'\"]+)') m = R.search(ct) if m: charset = m.group(1) else: charset = None return text,charset def getPage(code, name, get_edit_page = True, read_only = False, do_quote = True): """ Get the contents of page 'name' from the 'code' language wikipedia Do not use this directly; for 99% of the possible ideas you can use the PageLink object instead. Arguments: code - the wiki's language code name - the page name get_edit_page - If true, gets the edit page, otherwise gets the normal page. read_only - If true, doesn't raise LockedPage exceptions. do_quote - ??? (TODO: what is this for?) This routine returns a unicode string containing the wiki text if get_edit_page is True; otherwise it returns a unicode string containing the entire page's HTML code. """ host = family.hostname(code) name = re.sub(' ', '_', name) output(url2unicode(u'Getting page %s:%s' % (code, name), language = code)) if not '%' in name and do_quote: if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) address = family.get_address(code, name) if get_edit_page: address += '&action=edit&printable=yes' if debug: print host, address get_throttle() retry_idle_time = 1 while True: text, charset = getUrl(host,address) if get_edit_page: if debug: print "Raw:", len(text), type(text), text.count('x') if charset is None: print "WARNING: No character set found" else: if charsets.has_key(code): assert charsets[code].lower() == charset.lower(), "charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code] = charset if code2encoding(code).lower() != charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) if not read_only: if text.find('Userlogin') != -1: output(u'Warning: You\'re probably not logged in on %s:' % code) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code, link2url(name, code)] = m.group(1) else: edittime[code, link2url(name, code)] = "0" try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %d minutes..." % (host, address, retry_idle_time) time.sleep(retry_idle_time * 60) retry_idle_time *= 2 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(code, name) if debug: print text[i1:i2] m = redirectRe(code).match(text[i1:i2]) if m: output(u"DBG> %s is redirect to %s" % (url2unicode(name, language = code), unicode(m.group(1), code2encoding(code)))) raise IsRedirectPage(m.group(1)) if edittime[code, link2url(name, code)] == "0" and not read_only: print "DBG> page may be locked?!" raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] else: x = text x = unicode(x, charset, errors = 'replace') return x def languages(first = []): """Return a list of language codes for known wikipedia servers. If a list of language codes is given as argument, these will be put at the front of the returned list.""" result = [] for key in first: if key in family.langs.iterkeys(): result.append(key) for key in family.seriouslangs: if key not in result: result.append(key) return result def allpages(start = '!'): """Generator which yields all articles in the home language in alphanumerical order, starting at a given page. By default, it starts at '!', so it should yield all pages. The objects returned by this generator are all PageLink()s. """ while 1: start = link2url(start, code = mylang) returned_html = getPage(mylang, family.allpagesname(mylang, start), do_quote = False, get_edit_page = False) try: ibegin = returned_html.index('<table') iend = returned_html.index('</table') except ValueError: raise NoPage('Couldn\'t extract allpages special page. Make sure you\'re using the MonoBook skin.') returned_html = returned_html[ibegin:iend] if family.version(mylang)=="1.2": R = re.compile('/wiki/(.*?)" *class=[\'\"]printable') else: R = re.compile('title ="(.*?)"') n = 0 for hit in R.findall(returned_html): n = n + 1 if family.version(mylang)=="1.2": yield PageLink(mylang, url2link(hit, code = mylang, incode = mylang)) else: yield PageLink(mylang, hit) start = hit + '%20%200' if n < 100: break def getLanguageLinks(text,incode=None): """Returns a dictionary of other language links mentioned in the text in the form {code:pagename}. Do not call this routine directly, use PageLink objects instead""" result = {} interwikiR = re.compile(r'\[\[([a-z\-]+):([^\]]*)\]\]') for code, pagetitle in interwikiR.findall(text): if code in family.obsolete: output(u"ERROR: ignoring link to obsolete language %s" % code) elif not pagetitle: print "ERROR: empty link to %s:" % code elif code in family.langs: if '|' in pagetitle: pagetitle = pagetitle[:pagetitle.index('|')] if incode == 'eo': pagetitle=pagetitle.replace('xx','x') if not pagetitle: output(u"ERROR: ignoring impossible link to %s:%s" % (code, pagetitle)) else: result[code] = pagetitle if incode in ['zh','zh-cn','zh-tw']: m=re.search(u'\\[\\[([^\\]\\|]*)\\|\u7b80\\]\\]', text) if m: result['zh-cn'] = m.group(1) m=re.search(u'\\[\\[([^\\]\\|]*)\\|\u7c21\\]\\]', text) if m: result['zh-cn'] = m.group(1) m=re.search(u'\\[\\[([^\\]\\|]*)\\|\u7e41\\]\\]', text) if m: result['zh-tw'] = m.group(1) return result def removeLanguageLinks(text): """Given the wiki-text of a page, return that page with all interwiki links removed. If a link to an unknown language is encountered, a warning is printed.""" interwikiR = re.compile(r'\[\[([a-z\-]+):[^\]]*\]\][\s]*') index = 0 done = False while not done: match = interwikiR.search(text, index) if not match: done = True else: code = match.group(1) if code in family.langs: text = text[:match.start()] + text[match.end():] index = match.start() else: index = match.end() if len(code) == 2: print "WARNING: Link to unknown language %s" % (match.group(1)) return normalWhitespace(text) def replaceLanguageLinks(oldtext, new): """Replace the interwiki language links given in the wikitext given in oldtext by the new links given in new. 'new' should be a dictionary with the language names as keys, and either PageLink objects or the link-names of the pages as values. """ s = interwikiFormat(new) s2 = removeLanguageLinks(oldtext) if s: if mylang in config.interwiki_attop: newtext = s + config.interwiki_text_separator + s2 else: newtext = s2 + config.interwiki_text_separator + s else: newtext = s2 return newtext def interwikiFormat(links): """Create a suitable string encoding all interwiki links for a wikipedia page. 'links' should be a dictionary with the language names as keys, and either PageLink objects or the link-names of the pages as values. The string is formatted for inclusion in mylang. """ if not links: return '' s = [] ar = links.keys() ar.sort() if mylang in family.interwiki_putfirst: ar2 = [] for code in family.interwiki_putfirst[mylang]: if code in ar: del ar[ar.index(code)] ar2 = ar2 + [code] ar = ar2 + ar for code in ar: try: s.append(links[code].aslink()) except AttributeError: s.append('[[%s:%s]]' % (code, links[code])) if mylang in config.interwiki_on_one_line: sep = ' ' else: sep = '\r\n' s=sep.join(s) + '\r\n' return s def normalWhitespace(text): while 1: if text and text.startswith('\r\n'): text=text[2:] elif text and text.startswith(' '): text=text[1:] else: break while 1: if text and text[-1:] in '\r\n \t': text=text[:-1] else: break text += '\n' return text def getCategoryLinks(text, code): """Returns a list of category links. in the form {code:pagename}. Do not call this routine directly, use PageLink objects instead""" result = [] ns = family.category_namespaces(code) for prefix in ns: R = re.compile(r'\[\['+prefix+':([^\]]*)\]\]') for t in R.findall(text): if t: t = t.strip() if code == 'eo': t = t.replace('xx','x') t = t[:1].capitalize() + t[1:] result.append(ns[0]+':'+t) else: print "ERROR: empty category link" return result def removeCategoryLinks(text, code): """Given the wiki-text of a page, return that page with all category links removed. """ ns = family.category_namespaces(code) for prefix in ns: text = re.sub(r'\[\['+prefix+':([^\]]*)\]\]', '', text) return normalWhitespace(text) def replaceCategoryLinks(oldtext, new, code = None): """Replace the category links given in the wikitext given in oldtext by the new links given in new. 'new' should be a list of category pagelink objects. """ if code is None: code = mylang interwiki_links = getLanguageLinks(oldtext) oldtext = removeLanguageLinks(oldtext) s = categoryFormat(new) s2 = removeCategoryLinks(oldtext, code) if s: if mylang in config.category_attop: newtext = s + config.category_text_separator + s2 else: newtext = s2 + config.category_text_separator + s else: newtext = s2 newtext = replaceLanguageLinks(newtext, interwiki_links) return newtext def categoryFormat(links): """Create a suitable string encoding all category links for a wikipedia page. 'links' should be a list of category pagelink objects. The string is formatted for inclusion in mylang. """ if not links: return '' s = [] for pl in links: s.append(pl.aslocallink()) if mylang in config.category_on_one_line: sep = ' ' else: sep = '\r\n' s.sort() s=sep.join(s) + '\r\n' return s def myencoding(): """The character encoding used by the home wiki""" return code2encoding(mylang) def code2encoding(code): """Return the encoding for a specific language wikipedia""" if code == 'ascii': return code return family.code2encoding(code) def code2encodings(code): """Return a list of historical encodings for a specific language wikipedia""" return family.code2encodings(code) def url2link(percentname, incode, code): """Convert a url-name of a page into a proper name for an interwiki link the argument 'incode' specifies the encoding of the target wikipedia """ result = underline2space(percentname) x = url2unicode(result, language = code) return unicode2html(x, encoding = code2encoding(incode)) def link2url(name, code, incode = None): """Convert an interwiki link name of a page to the proper name to be used in a URL for that page. code should specify the language for the link""" if code == 'eo': name = name.replace('cx','& name = name.replace('Cx','& name = name.replace('CX','& name = name.replace('gx','& name = name.replace('Gx','& name = name.replace('GX','& name = name.replace('hx','& name = name.replace('Hx','& name = name.replace('HX','& name = name.replace('jx','& name = name.replace('Jx','& name = name.replace('JX','& name = name.replace('sx','& name = name.replace('Sx','& name = name.replace('SX','& name = name.replace('ux','& name = name.replace('Ux','& name = name.replace('UX','& name = name.replace('XX','X') name = name.replace('Xx','X') name = name.replace('xx','x') name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& name = name.replace('& if '%' in name: try: name = url2unicode(name, language = code) except UnicodeError: name = html2unicode(name, language = code, altlanguage = incode) else: name = html2unicode(name, language = code, altlanguage = incode) name = name.strip() if name: if not code in family.nocapitalize: name = name[0].upper()+name[1:] try: result = str(name.encode(code2encoding(code))) except UnicodeError: print "Cannot convert %s into a URL for %s" % (repr(name), code) result = addEntity(name) print "Using entities instead",result print "BUG> This is probably a bug in the robot that did not recognize an interwiki link!" result = space2underline(result) return urllib.quote(result) def isInterwikiLink(s): """Try to check whether s is in the form "xx:link" where xx: is a known language. In such a case we are dealing with an interwiki link.""" if not ':' in s: return False l,k=s.split(':',1) if l in family.langs: return True return False def getReferences(pl, follow_redirects = True): host = family.hostname(pl.code()) url = family.references_address(mylang, pl.urlname()) output(u'Getting references to %s:%s' % (pl.code(), pl.linkname())) txt, charset = getUrl(host,url) txt = txt.replace('<a', 'a') txt = txt.replace('</a', '/a') txt = txt.replace('<li', 'li') txt = txt.replace('</li', 'li') if not follow_redirects: cascadedListR = re.compile(r"(.*<ul>[^<]*)<ul>[^<]*<\/ul>([^<]*</\ul>.*)") endR = re.compile(r"</ul>") pos = 0 while cascadedListR.search(txt): m = cascadedListR.search(txt) txt = m.group(1) + m.group(2) Rref = re.compile('li>a href.*="([^"]*)"') x = Rref.findall(txt) x.sort() for i in range(len(x)-1, 0, -1): if x[i] == x[i-1]: del x[i] return x def UnicodeToAsciiHtml(s): html = [] for c in s: cord = ord(c) if cord < 128: html.append(c) else: html.append('& return ''.join(html) def url2unicode(percentname, language): for c in percentname: if ord(c)>128: x=percentname break else: x=urllib.unquote(str(percentname)) for encoding in ('utf-8',)+code2encodings(language): try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) x,l = decode_func(x) return x except UnicodeError: pass raise UnicodeError("Could not decode %s" % repr(percentname)) def unicode2html(x, encoding): try: encode_func, decode_func, stream_reader, stream_writer = codecs.lookup(encoding) y,l = encode_func(x) except UnicodeError: x = UnicodeToAsciiHtml(x) return x def removeEntity(name): import htmlentitydefs Rentity = re.compile(r'&([A-Za-z]+);') result = u'' i = 0 while i < len(name): m = Rentity.match(name[i:]) if m: if htmlentitydefs.name2codepoint.has_key(m.group(1)): x = htmlentitydefs.name2codepoint[m.group(1)] result = result + unichr(x) i += m.end() else: result += name[i] i += 1 else: result += name[i] i += 1 return result def addEntity(name): """Convert a unicode name into ascii name with entities""" import htmlentitydefs result = '' for c in name: if ord(c) < 128: result += str(c) else: for k, v in htmlentitydefs.entitydefs.iteritems(): if (len(v) == 1 and ord(c) == ord(v)) or v == '& result += '&%s;' % k break else: result += '& return result def unicodeName(name, language, altlanguage = None): for encoding in code2encodings(language): try: if type(name)==type(u''): return name else: return unicode(name, encoding) except UnicodeError: continue if altlanguage is not None: print "DBG> Using local encoding!", altlanguage, "to", language, name for encoding in code2encodings(altlanguage): try: return unicode(name, encoding) except UnicodeError: continue raise Error("Cannot decode") def html2unicode(name, language, altlanguage=None): name = unicodeName(name, language, altlanguage) name = removeEntity(name) Runi = re.compile('& Runi2 = re.compile('& result = u'' i=0 while i < len(name): m = Runi.match(name[i:]) m2 = Runi2.match(name[i:]) if m: result += unichr(int(m.group(1))) i += m.end() elif m2: result += unichr(int(m2.group(1),16)) i += m2.end() else: try: result += name[i] i += 1 except UnicodeDecodeError: print repr(name) raise return result def setFamily(fam): """ Import the user's family. If not changed in user_config, the family is Wikipedia. """ try: global family namespace_dict = {} exec "import %s_family as family_module" % fam in namespace_dict family = namespace_dict['family_module'] except ImportError: print "Error importing the family. This probably means the family" print "name is mistyped in the configuration file" sys.exit(1) if hasattr(family,'Family'): family=family.Family() def setMyLang(code): """Change the home language""" global mylang global cookies mylang = code try: f = open('login-data/%s-login.data' % mylang) cookies = '; '.join([x.strip() for x in f.readlines()]) loggedin = True f.close() except IOError: cookies = None loggedin = False def checkLogin(): global loggedin txt = getPage(mylang,'Non-existing page', get_edit_page = False) loggedin = 'Userlogin' not in txt return loggedin def argHandler(arg): ''' Takes a commandline parameter, converts it to unicode, and returns it unless it is one of the global parameters as -lang or -throttle. If it is a global parameter, processes it and returns None. ''' if sys.platform=='win32': arg = unicode(arg, 'windows-1252') else: arg = unicode(arg, config.console_encoding) if arg.startswith('-family:'): setFamily(arg[8:]) elif arg.startswith('-lang:'): setMyLang(arg[6:]) elif arg.startswith('-throttle:'): get_throttle.setDelay(int(arg[10:])) elif arg.startswith('-putthrottle:'): put_throttle.setDelay(int(arg[13:])) else: return arg return None username = config.username if not config.username: print "Please make a file user-config.py, and put in there:" print "One line saying \"username='yy'\"" print "One line saying \"mylang='xx'\"" print "....filling in your real name and home wikipedia." print "for other possible configuration variables check config.py" sys.exit(1) setFamily(config.family) setMyLang(config.mylang) if not family.langs.has_key(mylang): print "Home-wikipedia from user-config.py does not exist" print "Defaulting to test: wikipedia" setMyLang('test') family.langs['test']='test.wikipedia.org'
|
def altlang(code): if code in ['fa','ku']: return ['ar'] if code=='sk': return ['cs'] if code=='nds': return ['de','nl'] if code in ['ca','gn','nah']: return ['es'] if code=='eu': return ['es','fr'] if code=='gl': return ['es','pt'] if code in ['br','oc','th','vi','wa']: return ['fr'] if code=='als': return ['fr','de'] if code=='co': return ['fr','it'] if code=='fy': return ['nl'] if code=='csb': return ['pl'] if code in ['mo','roa-rup']: return ['ro'] if code in ['be','lt','lv','uk']: return ['ru'] if code=='uz': return ['ru','tr'] if code in ['ja','ko','minnan','za','zh','zh-cn','zh-tw']: return ['zh','zh-cn','zh-tw'] if code=='da': return ['nb','no'] if code in ['is','no','nb']: return ['no','nb','nn','da'] if code=='sv': return ['da','no','nb'] if code in ['id','jv','ms','su']: return ['id','ms','jv','su'] if code in ['bs','hr','mk','sh','sr']: return ['hr','sr','bs'] if code in ['ia','ie']: return ['ia','la','ie','es','fr','it'] if code=='sa': return ['hi'] if code=='yi': return ['he'] if code=='bi': return ['tpi'] if code=='tpi': return ['bi'] return []
|
|
for alternative in altlang(code): if dict.has_key(alternative): return dict[alternative]
|
for alt in altlang(code): if dict.has_key(alt): return dict[alt]
|
def translate(code, dict): """ Given a language code and a dictionary, returns the dictionary's value for key 'code' if this key exists; otherwise tries to return a value for an alternative language that is most applicable to use on the Wikipedia in language 'code'. The language itself is always checked first, then languages that have been defined to be alternatives, and finally English. If none of the options gives result, we just take the first language in the list. """ if dict.has_key(code): return dict[code] for alternative in altlang(code): if dict.has_key(alternative): return dict[alternative] if dict.has_key('en'): return dict['en'] return dict.values()[0]
|
address = '/w/wiki.phtml?title='+name
|
address = '/w/wiki.phtml?title='+name+"&redirect=no"
|
def getPage(code, name, do_edit=1, do_quote=1): """Get the contents of page 'name' from the 'code' language wikipedia""" host = langs[code] if code in oldsoftware: # Old algorithm name = re.sub('_', ' ', name) n=[] for x in name.split(): n.append(x[0].capitalize()+x[1:]) name='_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name!=urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if code not in oldsoftware: address = '/w/wiki.phtml?title='+name if do_edit: address += '&action=edit' else: if not do_edit: raise "can not skip edit on old-software wikipedia" address = '/wiki.cgi?action=edit&id='+name if debug: print host,address text,charset = getUrl(host,address) if do_edit: if debug: print "Raw:",len(text),type(text),text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower()==charset.lower(),"charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code]=charset if code2encoding(code).lower()!=charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code,link2url(name,code)]=m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code,link2url(name,code)]=m.group(1) else: edittime[code,link2url(name,code)]=0 try: i1 = re.search('<textarea[^>]*>',text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>',text).start() if i2-i1 < 2: # new software raise NoPage() if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage() Rredirect=re.compile(r'\#redirect:? *\[\[(.*?)\]\]',re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) assert edittime[code,name]!=0 or code in oldsoftware, "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text) x=text[i1:i2] x=unescape(x) else: x=text # If not editing if charset=='utf-8': # Make it to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup('utf-8') try: x,l=decode_func(x) except UnicodeError: print code,name print repr(x) raise # Convert the unicode characters to &# references, and make it ascii. x=str(UnicodeToAsciiHtml(x)) return x
|
assert edittime[code,name]!=0 or code in oldsoftware, "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text)
|
if needput: assert edittime[code,name]!=0 or code in oldsoftware, "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text)
|
def getPage(code, name, do_edit=1, do_quote=1): """Get the contents of page 'name' from the 'code' language wikipedia""" host = langs[code] if code in oldsoftware: # Old algorithm name = re.sub('_', ' ', name) n=[] for x in name.split(): n.append(x[0].capitalize()+x[1:]) name='_'.join(n) #print name else: name = re.sub(' ', '_', name) if not '%' in name and do_quote: # It should not have been done yet if name!=urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if code not in oldsoftware: address = '/w/wiki.phtml?title='+name if do_edit: address += '&action=edit' else: if not do_edit: raise "can not skip edit on old-software wikipedia" address = '/wiki.cgi?action=edit&id='+name if debug: print host,address text,charset = getUrl(host,address) if do_edit: if debug: print "Raw:",len(text),type(text),text.count('x') if charset is None: print "WARNING: No character set found" else: # Store character set for later reference if charsets.has_key(code): assert charsets[code].lower()==charset.lower(),"charset for %s changed from %s to %s"%(code,charsets[code],charset) charsets[code]=charset if code2encoding(code).lower()!=charset.lower(): raise ValueError("code2encodings has wrong charset for %s. It should be %s"%(code,charset)) if debug>1: print repr(text) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[code,link2url(name,code)]=m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[code,link2url(name,code)]=m.group(1) else: edittime[code,link2url(name,code)]=0 try: i1 = re.search('<textarea[^>]*>',text).end() except AttributeError: #print "No text area.",host,address #print repr(text) raise LockedPage(text) i2 = re.search('</textarea>',text).start() if i2-i1 < 2: # new software raise NoPage() if debug: print text[i1:i2] if text[i1:i2] == 'Describe the new page here.\n': # old software raise NoPage() Rredirect=re.compile(r'\#redirect:? *\[\[(.*?)\]\]',re.I) m=Rredirect.match(text[i1:i2]) if m: raise IsRedirectPage(m.group(1)) assert edittime[code,name]!=0 or code in oldsoftware, "No edittime on non-empty page?! %s:%s\n%s"%(code,name,text) x=text[i1:i2] x=unescape(x) else: x=text # If not editing if charset=='utf-8': # Make it to a unicode string encode_func, decode_func, stream_reader, stream_writer = codecs.lookup('utf-8') try: x,l=decode_func(x) except UnicodeError: print code,name print repr(x) raise # Convert the unicode characters to &# references, and make it ascii. x=str(UnicodeToAsciiHtml(x)) return x
|
if code in ['meta','bs','ru','eo','ja','zh','hi','he','hu','pl','ko','cs','el','sl']:
|
if code in ['meta','bs','ru','eo','ja','zh','hi','he','hu','pl','ko','cs','el','sl','ro']:
|
def code2encoding(code): if code in ['meta','bs','ru','eo','ja','zh','hi','he','hu','pl','ko','cs','el','sl']: return 'utf-8' return 'iso-8859-1'
|
raise ValueError("Cannot locate entity for character %s"%repr(c))
|
raise NoSuchEntity("Cannot locate entity for character %s"%repr(c))
|
def addEntity(name): """Convert a unicode name into ascii name with entities""" import htmlentitydefs result='' for c in name: if ord(c)<128: result+=str(c) else: for k,v in htmlentitydefs.entitydefs.iteritems(): if (len(v)==1 and ord(c)==ord(v)) or v=='&#%d;'%ord(c): result+='&%s;'%k; break else: raise ValueError("Cannot locate entity for character %s"%repr(c)) print "DBG> addEntity:",repr(name),repr(result) return result
|
w2=r'([^\]\|]*)'
|
w2=r'([^\]]*)'
|
def imagelinks(self): result = [] if self._code in image: im=image[self._code] + ':' else: im='Image:' w1=r'('+im+'[^\]\|]*)' w2=r'([^\]\|]*)' Rlink = re.compile(r'\[\['+w1+r'(\|'+w2+r')?\]\]') for l in Rlink.findall(self.get()): result.append(PageLink(self._code,l[0])) return result
|
def read_pages_from_sql_dump(sqlfilename, old, regex):
|
def read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex):
|
def read_pages_from_sql_dump(sqlfilename, old, regex): import sqldump dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) for entry in dump.entries(): if regex: if old.search(entry.text): yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) else: if entry.text.find(old) != -1: yield wikipedia.PageLink(wikipedia.mylang, entry.full_title())
|
if regex: if old.search(entry.text): yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) else: if entry.text.find(old) != -1: yield wikipedia.PageLink(wikipedia.mylang, entry.full_title())
|
for exception in exceptions: if regex: exception = re.compile(exception) if exception.search(entry.text): break else: if entry.text.find(exception) != -1: break for old in replacements.keys(): if regex: old = re.compile(old) if old.search(entry.text): yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break else: if entry.text.find(old) != -1: yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) break
|
def read_pages_from_sql_dump(sqlfilename, old, regex): import sqldump dump = sqldump.SQLdump(sqlfilename, wikipedia.myencoding()) for entry in dump.entries(): if regex: if old.search(entry.text): yield wikipedia.PageLink(wikipedia.mylang, entry.full_title()) else: if entry.text.find(old) != -1: yield wikipedia.PageLink(wikipedia.mylang, entry.full_title())
|
def read_pages_from_text_file(textfilename, old, regex):
|
def read_pages_from_text_file(textfilename):
|
def read_pages_from_text_file(textfilename, old, regex): f = open(textfilename, 'r') # regular expression which will find [[wiki links]] R = re.compile(r'.*\[\[([^\]]*)\]\].*') m = False for line in f.readlines(): m=R.match(line) if m: yield wikipedia.PageLink(wikipedia.mylang, m.group(1)) f.close()
|
def generator(source, old, regex, textfilename = None, sqlfilename = None):
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagename = None):
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
for pl in read_pages_from_sql_dump(sqlfilename, old, regex):
|
for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex):
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
for pl in read_pages_from_text_file(textfilename, old, regex):
|
for pl in read_pages_from_text_file(textfilename):
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
replacements = []
|
commandline_replacements = [] replacements = {} exceptions = []
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
replacements.append(arg) if source == None or len(replacements) != 2:
|
commandline_replacements.append(arg) if source == None:
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
old = replacements[0] new = replacements[1] if regex: old = re.compile(old)
|
if (len(commandline_replacements) == 2 and fix == None): replacements[commandline_replacements[0]] = commandline_replacements[1] elif fix == None: old = wikipedia.input(u'Please enter the text that should be replaced:') new = wikipedia.input(u'Please enter the new text:') replacements[old] = new while True: old = wikipedia.input(u'Please enter another text that should be replaced, or press Enter to start:') if old == '': break new = wikipedia.input(u'Please enter the new text:') replacements[old] = new else: fix = fixes[fix] if fix.has_key('regex'): regex = fix['regex'] if fix.has_key('msg'): wikipedia.setAction(fix['msg'][wikipedia.chooselang(wikipedia.mylang, fix['msg'])]) if fix.has_key('exceptions'): regex = fix['exceptions'] replacements = fix['replacements']
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
wikipedia.setAction(msg[wikipedia.chooselang(wikipedia.mylang, msg)]) for pl in generator(source, old, regex, textfilename, sqlfilename):
|
for pl in generator(source, replacements, exceptions, regex, textfilename, sqlfilename, pagename):
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
if regex: new_text = old.sub(new, original_text) else: new_text = original_text.replace(old, new) if new_text == original_text: print 'No changes were necessary in %s' % pl.linkname() else: showDiff(original_text, new_text) if not acceptall: choice = wikipedia.input(u'Do you want to accept these changes? [y|n|a(ll)]') if choice in ['a', 'A']: acceptall = True choice = 'y' if choice in ['y', 'Y']: pl.put(new_text)
|
skip_page = False for exception in exceptions: if regex: exception = re.compile(exception) hit = exception.search(original_text) if hit: wikipedia.output('Skipping %s because it contains %s' % (pl.linkname(), hit.group(0))) skip_page = True break else: hit = original_text.find(exception) if hit != -1: wikipedia.output('Skipping %s because it contains %s' % (pl.linkname(), original_text[hit:hit + len(exception)])) skip_page = True break if not skip_page: new_text = original_text for old, new in replacements.items(): if regex: old = re.compile(old) new_text = old.sub(new, new_text) else: new_text = new_text.replace(old, new) if new_text == original_text: print 'No changes were necessary in %s' % pl.linkname() else: showDiff(original_text, new_text) if not acceptall: choice = wikipedia.input(u'Do you want to accept these changes? [y|n|a(ll)]') if choice in ['a', 'A']: acceptall = True choice = 'y' if choice in ['y', 'Y']: pl.put(new_text)
|
def generator(source, old, regex, textfilename = None, sqlfilename = None): if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, old, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename, old, regex): yield pl
|
path = 'copyright/' + i[0] + '/' + i[2]
|
path = appdir + i[0] + '/' + i[2]
|
def exclusion_file_list(): for i in pages_for_exclusion_database: path = 'copyright/' + i[0] + '/' + i[2] mediawiki_messages.makepath(path) p = wikipedia.Page(wikipedia.getSite(i[0]),i[1]) yield p, path
|
def load_pages(): write = False
|
def load_pages(force_update = False):
|
def load_pages(): write = False for page, path in exclusion_file_list(): try: file_age = time.time() - os.path.getmtime(path) if file_age > 24 * 60 * 60: print 'Updating source pages to exclude new URLs...' write = True except OSError: write = True if write: f = codecs.open(path, 'w', 'utf-8') f.write(page.get()) f.close() return
|
print 'Updating source pages to exclude new URLs...' write = True
|
print 'Updating page [[' + page.title() + ']] to exclude new URLs...' length = 0
|
def load_pages(): write = False for page, path in exclusion_file_list(): try: file_age = time.time() - os.path.getmtime(path) if file_age > 24 * 60 * 60: print 'Updating source pages to exclude new URLs...' write = True except OSError: write = True if write: f = codecs.open(path, 'w', 'utf-8') f.write(page.get()) f.close() return
|
write = True if write: f = codecs.open(path, 'w', 'utf-8') f.write(page.get()) f.close()
|
pass if length == 0 or force_update: try: data = page.get() f = codecs.open(path, 'w', 'utf-8') f.write(data) f.close() except wikipedia.IsRedirectPage: data = page.get(get_redirect=True) except: print 'Getting page failed'
|
def load_pages(): write = False for page, path in exclusion_file_list(): try: file_age = time.time() - os.path.getmtime(path) if file_age > 24 * 60 * 60: print 'Updating source pages to exclude new URLs...' write = True except OSError: write = True if write: f = codecs.open(path, 'w', 'utf-8') f.write(page.get()) f.close() return
|
def check_list(text, cl):
|
def check_list(text, cl, debug=False):
|
def check_list(text, cl): for entry in cl: if entry: if text.find(entry) != -1: print 'SKIP URL ' + text #print 'DEBUG: ' + entry return True
|
print 'SKIP URL ' + text
|
if debug: print 'SKIP URL ' + text
|
def check_list(text, cl): for entry in cl: if entry: if text.find(entry) != -1: print 'SKIP URL ' + text #print 'DEBUG: ' + entry return True
|
f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data)
|
if 'exclusion_list.txt' in path: result_list += re.sub("</?pre>","", read_file(path, cut_comment = True)).splitlines() else: data = read_file(path) prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*)\]?", data) if 'it/Cloni.txt' in path: prelist += re.findall('(?mi)^==(?!=)\s*\[?\s*(?:<nowiki>)?\s*(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data)
|
def exclusion_list(): prelist = [] load_pages() for page, path in exclusion_file_list(): f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() # wikipedia:en:Wikipedia:Mirrors and forks prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) # wikipedia:it:Wikipedia:Cloni if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data) #prelist += re.findall("(?i)<h2>\s*(?:http://)?(.*?)\s*</h2>", data) list1 = [] for entry in prelist: list1 += entry.split(", ") list2 = [] for entry in list1: list2 += entry.split("and ") list3 = [] for entry in list2: entry = re.sub("http://", "", entry) if entry: if '/' in entry: list3 += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: list3 += [re.sub(" .*", "", entry)] #list3 += [re.sub("/.*", "", entry)] f = codecs.open('copyright/exclusion_list.txt', 'r','utf-8') list3 += re.sub(" ?#.*","",f.read()).splitlines() f.close() return list3
|
list3 = []
|
def exclusion_list(): prelist = [] load_pages() for page, path in exclusion_file_list(): f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() # wikipedia:en:Wikipedia:Mirrors and forks prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) # wikipedia:it:Wikipedia:Cloni if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data) #prelist += re.findall("(?i)<h2>\s*(?:http://)?(.*?)\s*</h2>", data) list1 = [] for entry in prelist: list1 += entry.split(", ") list2 = [] for entry in list1: list2 += entry.split("and ") list3 = [] for entry in list2: entry = re.sub("http://", "", entry) if entry: if '/' in entry: list3 += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: list3 += [re.sub(" .*", "", entry)] #list3 += [re.sub("/.*", "", entry)] f = codecs.open('copyright/exclusion_list.txt', 'r','utf-8') list3 += re.sub(" ?#.*","",f.read()).splitlines() f.close() return list3
|
|
list3 += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: list3 += [re.sub(" .*", "", entry)] f = codecs.open('copyright/exclusion_list.txt', 'r','utf-8') list3 += re.sub(" ?
|
result_list += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: result_list += [re.sub(" .*", "", entry)] result_list += read_file(appdir + 'exclusion_list.txt', cut_comment = True).splitlines() return result_list def read_file(filename, cut_comment = False): text = u"" f = codecs.open(filename, 'r','utf-8') text = f.read() if cut_comment: text = re.sub(" ?
|
def exclusion_list(): prelist = [] load_pages() for page, path in exclusion_file_list(): f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() # wikipedia:en:Wikipedia:Mirrors and forks prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) # wikipedia:it:Wikipedia:Cloni if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data) #prelist += re.findall("(?i)<h2>\s*(?:http://)?(.*?)\s*</h2>", data) list1 = [] for entry in prelist: list1 += entry.split(", ") list2 = [] for entry in list1: list2 += entry.split("and ") list3 = [] for entry in list2: entry = re.sub("http://", "", entry) if entry: if '/' in entry: list3 += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: list3 += [re.sub(" .*", "", entry)] #list3 += [re.sub("/.*", "", entry)] f = codecs.open('copyright/exclusion_list.txt', 'r','utf-8') list3 += re.sub(" ?#.*","",f.read()).splitlines() f.close() return list3
|
return list3 def write_log(text, filename = "copyright/output.txt"): file1=codecs.open(filename, 'a', 'utf-8') file1.write(text) file1.close()
|
return text def write_log(text, filename = output_file): f = codecs.open(filename, 'a', 'utf-8') f.write(text) f.close()
|
def exclusion_list(): prelist = [] load_pages() for page, path in exclusion_file_list(): f = codecs.open(path, "r", 'utf-8') data = f.read() f.close() # wikipedia:en:Wikipedia:Mirrors and forks prelist += re.findall("(?i)url\s*=\s*<nowiki>(?:http://)?(.*?)</nowiki>", data) prelist += re.findall("(?i)\*\s*Site:\s*\[?(?:http://)?(.*?)\]?", data) # wikipedia:it:Wikipedia:Cloni if 'copyright/it/Cloni.txt' in path: prelist += re.findall('(?i)^==(?!=)\s*\[?\s*(?:<nowiki>)?(?:http://)?(.*?)(?:</nowiki>)?\s*\]?\s*==', data) #prelist += re.findall("(?i)<h2>\s*(?:http://)?(.*?)\s*</h2>", data) list1 = [] for entry in prelist: list1 += entry.split(", ") list2 = [] for entry in list1: list2 += entry.split("and ") list3 = [] for entry in list2: entry = re.sub("http://", "", entry) if entry: if '/' in entry: list3 += [re.sub(" .*", "", entry[:entry.rfind('/')])] else: list3 += [re.sub(" .*", "", entry)] #list3 += [re.sub("/.*", "", entry)] f = codecs.open('copyright/exclusion_list.txt', 'r','utf-8') list3 += re.sub(" ?#.*","",f.read()).splitlines() f.close() return list3
|
text = text.replace("<br>", "") text = text.replace("<br/>", "") text = text.replace("<br />", "")
|
text = re.sub('(?i)<br(\s*/)?>', '', text)
|
def cleanwikicode(text): if not text: return "" #wikipedia.output(text) text = text.replace("<br>", "") text = text.replace("<br/>", "") text = text.replace("<br />", "") text = re.sub('<!--.*?-->', '', text) if exclude_quote: text = re.sub("(?i){{quote|.*?}}", "", text) text = re.sub("^:''.*?''\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$", "", text) text = re.sub('^[:*]?["][^"]+["]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[«][^»]+[»]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[“][^â€]+[â€]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('http://[a-z/._%0-9]+ ', ' ', text) text = re.sub('\[\[[^\|]+\|(.*?)\]\]', '\\1', text) text = re.sub('{{.*?}}', '', text) text = text.replace("''", "") text = text.replace("[", "") text = text.replace("]", "") text = re.sub('^[*:;]', '', text) text = text.replace("<!--", "") text = text.replace("-->", "") #wikipedia.output("CLEANED: %s" % (text)) return text
|
text = re.sub("^:''.*?''\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$", "", text) text = re.sub('^[:*]?["][^"]+["]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[«][^»]+[»]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[“][^â€]+[â€]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('http://[a-z/._%0-9]+ ', ' ', text) text = re.sub('\[\[[^\|]+\|(.*?)\]\]', '\\1', text)
|
text = re.sub("^[:*]?\s*''.*?''\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$", "", text) text = re.sub('^[:*]?\s*["][^"]+["]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?\s*[«][^»]+[»]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?\s*[“][^”]+[”]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub ("<ref.*?>.*?</ref>", "", text) text = re.sub('^(\||{[^{]).*', "", text) text = re.sub('http://[\w/.,;:@&=% text = re.sub("\[\[[^\]]*?\|(.*?)\]\]", "\\1", text) text = re.sub("</*nowiki>", "", text)
|
def cleanwikicode(text): if not text: return "" #wikipedia.output(text) text = text.replace("<br>", "") text = text.replace("<br/>", "") text = text.replace("<br />", "") text = re.sub('<!--.*?-->', '', text) if exclude_quote: text = re.sub("(?i){{quote|.*?}}", "", text) text = re.sub("^:''.*?''\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$", "", text) text = re.sub('^[:*]?["][^"]+["]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[«][^»]+[»]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[“][^â€]+[â€]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('http://[a-z/._%0-9]+ ', ' ', text) text = re.sub('\[\[[^\|]+\|(.*?)\]\]', '\\1', text) text = re.sub('{{.*?}}', '', text) text = text.replace("''", "") text = text.replace("[", "") text = text.replace("]", "") text = re.sub('^[*:;]', '', text) text = text.replace("<!--", "") text = text.replace("-->", "") #wikipedia.output("CLEANED: %s" % (text)) return text
|
text = text.replace("''", "")
|
text = re.sub('<math>.*?</math>', '', text)
|
def cleanwikicode(text): if not text: return "" #wikipedia.output(text) text = text.replace("<br>", "") text = text.replace("<br/>", "") text = text.replace("<br />", "") text = re.sub('<!--.*?-->', '', text) if exclude_quote: text = re.sub("(?i){{quote|.*?}}", "", text) text = re.sub("^:''.*?''\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$", "", text) text = re.sub('^[:*]?["][^"]+["]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[«][^»]+[»]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('^[:*]?[“][^â€]+[â€]\.?\s*((\(|<ref>).*?(\)|</ref>))?\.?$', "", text) text = re.sub('http://[a-z/._%0-9]+ ', ' ', text) text = re.sub('\[\[[^\|]+\|(.*?)\]\]', '\\1', text) text = re.sub('{{.*?}}', '', text) text = text.replace("''", "") text = text.replace("[", "") text = text.replace("]", "") text = re.sub('^[*:;]', '', text) text = text.replace("<!--", "") text = text.replace("-->", "") #wikipedia.output("CLEANED: %s" % (text)) return text
|
if len(line)>200: n_query+=1 if n_query>max_query_for_page: print "Max query limit for page reached" return output if len(line)>max_query_len: line=line[:max_query_len] glen=len(line) while line[glen-1] != ' ': glen -= 1 line = line[:glen] results = get_results(line) for url, engine in results: output += '\n*%s - %s' % (engine, url) if results: output += '\n**' + line
|
for search_words in mysplit(line,31," "): if len(search_words)>120: n_query += 1 if max_query_for_page and n_query>max_query_for_page: print "Max query limit for page reached" return output if len(search_words)>max_query_len: search_words=search_words[:max_query_len] if " " in search_words: search_words = search_words[:search_words.rindex(" ")] results = get_results(search_words) group_url = '' for url, engine in results: group_url += '\n*%s - %s' % (engine, url) if results: if previous_group_url == group_url: output += '\n**' + search_words else: output += group_url + '\n**' + search_words previous_group_url = group_url
|
def query(lines = [], max_query_len = 1300): # Google max_query_len = 1480? # - '-Wikipedia ""' = 1467 output = u"" n_query = 0 for line in lines: line = cleanwikicode(line) if len(line)>200: n_query+=1 if n_query>max_query_for_page: print "Max query limit for page reached" return output if len(line)>max_query_len: line=line[:max_query_len] glen=len(line) while line[glen-1] != ' ': glen -= 1 line = line[:glen] # wikipedia.output(line) results = get_results(line) for url, engine in results: output += '\n*%s - %s' % (engine, url) if results: output += '\n**' + line return output
|
if check_list(url[i+offset][0], excl_list):
|
if check_list(url[i+offset][0], excl_list, debug=True):
|
def get_results(query, numresults = 10): url = list() if search_in_google: import google google.LICENSE_KEY = config.google_key print " google query..." search_request_retry = 6 while search_request_retry: #SOAP.faultType: <Fault SOAP-ENV:Server: Exception from service object: # Daily limit of 1000 queries exceeded for key xxx> try: data = google.doGoogleSearch('-Wikipedia "' + query + '"') search_request_retry = 0 for entry in data.results: url.append((entry.URL, 'google')) except Exception, err: print "Got an error ->", err search_request_retry -= 1 if search_in_yahoo: import yahoo.search.web print " yahoo query..." data = yahoo.search.web.WebSearch(config.yahoo_appid, query='"' + query.encode('utf_8')+ '" -Wikipedia', results=numresults) search_request_retry = 6 while search_request_retry: try: for entry in data.parse_results(): if not check_urllist(url, entry.Url): url.append((entry.Url, 'yahoo')) search_request_retry = 0 except Exception, err: print "Got an error ->", err search_request_retry -= 1 offset = 0 for i in range(len(url)): if check_list(url[i+offset][0], excl_list): url.pop(i+offset) offset+=-1 return url
|
write_log('=== [[' + page.title() + ']] ===' + output + '\n')
|
write_log('=== [[' + page.title() + ']] ===' + output + '\n', filename = output_file)
|
def run(self): """ Starts the robot. """ # Run the generator which will yield Pages which might need to be # checked. for page in self.generator: try: # Load the page's text from the wiki original_text = page.get() except wikipedia.NoPage: wikipedia.output(u'Page %s not found' % page.title()) continue except wikipedia.IsRedirectPage: original_text = page.get(get_redirect=True)
|
global search_in_google, search_in_yahoo
|
global search_in_google, search_in_yahoo, max_query_for_page, output_file
|
def main(): global search_in_google, search_in_yahoo gen = None # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used PageTitles = [] # a page whose referrers will be processed when the -ref parameter is used referredPageTitle = None # an image page whose file links will be processed when the -filelinks parameter is used fileLinksPageTitle = None # a page whose links will be processed when the -links parameter is used linkingPageTitle = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None repeat = False # Read commandline parameters. for arg in wikipedia.handleArgs(): if arg.startswith('-filelinks'): if len(arg) == 10: fileLinksPageTitle = wikipedia.input(u'Links to which image page should be processed?') else: fileLinksPageTitle = arg[11:] #TODO: Replace 'Image:' with something that automatically gets the name of images based on the language. fileLinksPage = wikipedia.Page(wikipedia.getSite(), 'Image:' + fileLinksPageTitle) gen = pagegenerators.FileLinksGenerator(fileLinksPage) elif arg.startswith('-repeat'): repeat = True elif arg.startswith('-y'): search_in_yahoo = True elif arg.startswith('-g'): search_in_google = True elif arg.startswith('-ny'): search_in_yahoo = False elif arg.startswith('-ng'): search_in_google = False elif arg.startswith('-new'): if len(arg) >=5: gen = pagegenerators.NewpagesPageGenerator(number=int(arg[5:]), repeat = repeat) else: gen = pagegenerators.NewpagesPageGenerator(number=80, repeat = repeat) elif arg.startswith('-file'): if len(arg) >= 6: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] elif arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'Which page do you want to change?')) else: PageTitles.append(arg[6:]) source = 'specificPages' elif arg.startswith('-ref'): if len(arg) == 4: referredPageTitle = wikipedia.input(u'Links to which page should be processed?') else: referredPageTitle = arg[5:] referredPage = wikipedia.Page(wikipedia.getSite(), referredPageTitle) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links'): if len(arg) == 6: linkingPageTitle = wikipedia.input(u'Links from which page should be processed?') else: linkingPageTitle = arg[7:] linkingPage = wikipedia.Page(wikipedia.getSite(), linkingPageTitle) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to change?') else: firstPageTitle = arg[7:] namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() firstPageTitle = wikipedia.Page(wikipedia.getSite(), firstPageTitle).titleWithoutNamespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) if PageTitles: pages = [wikipedia.Page(wikipedia.getSite(), PageTitle) for PageTitle in PageTitles] gen = iter(pages) if not gen: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = CheckRobot(preloadingGen) bot.run()
|
startpage = None
|
def main(): global search_in_google, search_in_yahoo gen = None # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used PageTitles = [] # a page whose referrers will be processed when the -ref parameter is used referredPageTitle = None # an image page whose file links will be processed when the -filelinks parameter is used fileLinksPageTitle = None # a page whose links will be processed when the -links parameter is used linkingPageTitle = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None repeat = False # Read commandline parameters. for arg in wikipedia.handleArgs(): if arg.startswith('-filelinks'): if len(arg) == 10: fileLinksPageTitle = wikipedia.input(u'Links to which image page should be processed?') else: fileLinksPageTitle = arg[11:] #TODO: Replace 'Image:' with something that automatically gets the name of images based on the language. fileLinksPage = wikipedia.Page(wikipedia.getSite(), 'Image:' + fileLinksPageTitle) gen = pagegenerators.FileLinksGenerator(fileLinksPage) elif arg.startswith('-repeat'): repeat = True elif arg.startswith('-y'): search_in_yahoo = True elif arg.startswith('-g'): search_in_google = True elif arg.startswith('-ny'): search_in_yahoo = False elif arg.startswith('-ng'): search_in_google = False elif arg.startswith('-new'): if len(arg) >=5: gen = pagegenerators.NewpagesPageGenerator(number=int(arg[5:]), repeat = repeat) else: gen = pagegenerators.NewpagesPageGenerator(number=80, repeat = repeat) elif arg.startswith('-file'): if len(arg) >= 6: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] elif arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'Which page do you want to change?')) else: PageTitles.append(arg[6:]) source = 'specificPages' elif arg.startswith('-ref'): if len(arg) == 4: referredPageTitle = wikipedia.input(u'Links to which page should be processed?') else: referredPageTitle = arg[5:] referredPage = wikipedia.Page(wikipedia.getSite(), referredPageTitle) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links'): if len(arg) == 6: linkingPageTitle = wikipedia.input(u'Links from which page should be processed?') else: linkingPageTitle = arg[7:] linkingPage = wikipedia.Page(wikipedia.getSite(), linkingPageTitle) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to change?') else: firstPageTitle = arg[7:] namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() firstPageTitle = wikipedia.Page(wikipedia.getSite(), firstPageTitle).titleWithoutNamespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) if PageTitles: pages = [wikipedia.Page(wikipedia.getSite(), PageTitle) for PageTitle in PageTitles] gen = iter(pages) if not gen: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = CheckRobot(preloadingGen) bot.run()
|
|
gen = pagegenerators.NewpagesPageGenerator(number=80, repeat = repeat)
|
gen = pagegenerators.NewpagesPageGenerator(number=60, repeat = repeat)
|
def main(): global search_in_google, search_in_yahoo gen = None # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used PageTitles = [] # a page whose referrers will be processed when the -ref parameter is used referredPageTitle = None # an image page whose file links will be processed when the -filelinks parameter is used fileLinksPageTitle = None # a page whose links will be processed when the -links parameter is used linkingPageTitle = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None repeat = False # Read commandline parameters. for arg in wikipedia.handleArgs(): if arg.startswith('-filelinks'): if len(arg) == 10: fileLinksPageTitle = wikipedia.input(u'Links to which image page should be processed?') else: fileLinksPageTitle = arg[11:] #TODO: Replace 'Image:' with something that automatically gets the name of images based on the language. fileLinksPage = wikipedia.Page(wikipedia.getSite(), 'Image:' + fileLinksPageTitle) gen = pagegenerators.FileLinksGenerator(fileLinksPage) elif arg.startswith('-repeat'): repeat = True elif arg.startswith('-y'): search_in_yahoo = True elif arg.startswith('-g'): search_in_google = True elif arg.startswith('-ny'): search_in_yahoo = False elif arg.startswith('-ng'): search_in_google = False elif arg.startswith('-new'): if len(arg) >=5: gen = pagegenerators.NewpagesPageGenerator(number=int(arg[5:]), repeat = repeat) else: gen = pagegenerators.NewpagesPageGenerator(number=80, repeat = repeat) elif arg.startswith('-file'): if len(arg) >= 6: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] elif arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'Which page do you want to change?')) else: PageTitles.append(arg[6:]) source = 'specificPages' elif arg.startswith('-ref'): if len(arg) == 4: referredPageTitle = wikipedia.input(u'Links to which page should be processed?') else: referredPageTitle = arg[5:] referredPage = wikipedia.Page(wikipedia.getSite(), referredPageTitle) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links'): if len(arg) == 6: linkingPageTitle = wikipedia.input(u'Links from which page should be processed?') else: linkingPageTitle = arg[7:] linkingPage = wikipedia.Page(wikipedia.getSite(), linkingPageTitle) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to change?') else: firstPageTitle = arg[7:] namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() firstPageTitle = wikipedia.Page(wikipedia.getSite(), firstPageTitle).titleWithoutNamespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) if PageTitles: pages = [wikipedia.Page(wikipedia.getSite(), PageTitle) for PageTitle in PageTitles] gen = iter(pages) if not gen: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = CheckRobot(preloadingGen) bot.run()
|
gen = pagegenerators.CategorizedPageGenerator(cat)
|
if firstPageTitle: gen = pagegenerators.CategorizedPageGenerator(cat, recurse = catrecurse, start = firstPageTitle) else: gen = pagegenerators.CategorizedPageGenerator(cat, recurse = catrecurse)
|
def main(): global search_in_google, search_in_yahoo gen = None # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used PageTitles = [] # a page whose referrers will be processed when the -ref parameter is used referredPageTitle = None # an image page whose file links will be processed when the -filelinks parameter is used fileLinksPageTitle = None # a page whose links will be processed when the -links parameter is used linkingPageTitle = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None repeat = False # Read commandline parameters. for arg in wikipedia.handleArgs(): if arg.startswith('-filelinks'): if len(arg) == 10: fileLinksPageTitle = wikipedia.input(u'Links to which image page should be processed?') else: fileLinksPageTitle = arg[11:] #TODO: Replace 'Image:' with something that automatically gets the name of images based on the language. fileLinksPage = wikipedia.Page(wikipedia.getSite(), 'Image:' + fileLinksPageTitle) gen = pagegenerators.FileLinksGenerator(fileLinksPage) elif arg.startswith('-repeat'): repeat = True elif arg.startswith('-y'): search_in_yahoo = True elif arg.startswith('-g'): search_in_google = True elif arg.startswith('-ny'): search_in_yahoo = False elif arg.startswith('-ng'): search_in_google = False elif arg.startswith('-new'): if len(arg) >=5: gen = pagegenerators.NewpagesPageGenerator(number=int(arg[5:]), repeat = repeat) else: gen = pagegenerators.NewpagesPageGenerator(number=80, repeat = repeat) elif arg.startswith('-file'): if len(arg) >= 6: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] elif arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'Which page do you want to change?')) else: PageTitles.append(arg[6:]) source = 'specificPages' elif arg.startswith('-ref'): if len(arg) == 4: referredPageTitle = wikipedia.input(u'Links to which page should be processed?') else: referredPageTitle = arg[5:] referredPage = wikipedia.Page(wikipedia.getSite(), referredPageTitle) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links'): if len(arg) == 6: linkingPageTitle = wikipedia.input(u'Links from which page should be processed?') else: linkingPageTitle = arg[7:] linkingPage = wikipedia.Page(wikipedia.getSite(), linkingPageTitle) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to change?') else: firstPageTitle = arg[7:] namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() firstPageTitle = wikipedia.Page(wikipedia.getSite(), firstPageTitle).titleWithoutNamespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) if PageTitles: pages = [wikipedia.Page(wikipedia.getSite(), PageTitle) for PageTitle in PageTitles] gen = iter(pages) if not gen: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = CheckRobot(preloadingGen) bot.run()
|
if not gen:
|
if ids: checks_by_ids(ids) if not gen and not ids:
|
def main(): global search_in_google, search_in_yahoo gen = None # Can either be 'xmldump', 'textfile' or 'userinput'. source = None # the textfile's path, either absolute or relative, which will be used when # source is 'textfile'. textfilename = None # the category name which will be used when source is 'category'. categoryname = None # pages which will be processed when the -page parameter is used PageTitles = [] # a page whose referrers will be processed when the -ref parameter is used referredPageTitle = None # an image page whose file links will be processed when the -filelinks parameter is used fileLinksPageTitle = None # a page whose links will be processed when the -links parameter is used linkingPageTitle = None # will become True when the user presses a ('yes to all') or uses the -always # commandline paramater. acceptall = False # Which namespaces should be processed? # default to [] which means all namespaces will be processed namespaces = [] # Which page to start startpage = None repeat = False # Read commandline parameters. for arg in wikipedia.handleArgs(): if arg.startswith('-filelinks'): if len(arg) == 10: fileLinksPageTitle = wikipedia.input(u'Links to which image page should be processed?') else: fileLinksPageTitle = arg[11:] #TODO: Replace 'Image:' with something that automatically gets the name of images based on the language. fileLinksPage = wikipedia.Page(wikipedia.getSite(), 'Image:' + fileLinksPageTitle) gen = pagegenerators.FileLinksGenerator(fileLinksPage) elif arg.startswith('-repeat'): repeat = True elif arg.startswith('-y'): search_in_yahoo = True elif arg.startswith('-g'): search_in_google = True elif arg.startswith('-ny'): search_in_yahoo = False elif arg.startswith('-ng'): search_in_google = False elif arg.startswith('-new'): if len(arg) >=5: gen = pagegenerators.NewpagesPageGenerator(number=int(arg[5:]), repeat = repeat) else: gen = pagegenerators.NewpagesPageGenerator(number=80, repeat = repeat) elif arg.startswith('-file'): if len(arg) >= 6: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-cat'): if len(arg) == 4: categoryname = wikipedia.input(u'Please enter the category name:') else: categoryname = arg[5:] cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % categoryname) gen = pagegenerators.CategorizedPageGenerator(cat) elif arg.startswith('-xml'): if len(arg) == 4: xmlFilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlFilename = arg[5:] elif arg.startswith('-page'): if len(arg) == 5: PageTitles.append(wikipedia.input(u'Which page do you want to change?')) else: PageTitles.append(arg[6:]) source = 'specificPages' elif arg.startswith('-ref'): if len(arg) == 4: referredPageTitle = wikipedia.input(u'Links to which page should be processed?') else: referredPageTitle = arg[5:] referredPage = wikipedia.Page(wikipedia.getSite(), referredPageTitle) gen = pagegenerators.ReferringPageGenerator(referredPage) elif arg.startswith('-links'): if len(arg) == 6: linkingPageTitle = wikipedia.input(u'Links from which page should be processed?') else: linkingPageTitle = arg[7:] linkingPage = wikipedia.Page(wikipedia.getSite(), linkingPageTitle) gen = pagegenerators.LinkedPageGenerator(linkingPage) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = wikipedia.input(u'Which page do you want to change?') else: firstPageTitle = arg[7:] namespace = wikipedia.Page(wikipedia.getSite(), firstPageTitle).namespace() firstPageTitle = wikipedia.Page(wikipedia.getSite(), firstPageTitle).titleWithoutNamespace() gen = pagegenerators.AllpagesPageGenerator(firstPageTitle, namespace) elif arg.startswith('-namespace:'): namespaces.append(int(arg[11:])) if PageTitles: pages = [wikipedia.Page(wikipedia.getSite(), PageTitle) for PageTitle in PageTitles] gen = iter(pages) if not gen: # syntax error, show help text from the top of this file wikipedia.output(__doc__, 'utf-8') wikipedia.stopme() sys.exit() if namespaces != []: gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces) preloadingGen = pagegenerators.PreloadingGenerator(gen, pageNumber = 20) bot = CheckRobot(preloadingGen) bot.run()
|
debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages )
|
wikipedia.debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages )
|
def refresh_messages(site = None): site = site or wikipedia.getSite() # get 'all messages' special page's path path = site.allmessages_address() print 'Retrieving MediaWiki messages for %s' % repr(site) wikipedia.put_throttle() # It actually is a get, but a heavy one. allmessages = site.getUrl(path) print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. if site.version() >= "1.5": itemR = re.compile("<tr class='def' id='.*?'>\n" # first possibility: original MediaWiki message used + "\s*<td>\n" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*(?P<current>.+?)\n" # current message + "\s*</td>" + "\s*</tr>" + "|" + "<tr class='orig' id='.*?'>\n" # second possibility: custom message used + "\s*<td rowspan='2'>" + '\s*<a id=".+?" name=".+?"></a>' # anchor + '\s*<a href=".+?" title=".+?"><span id=\'.*?\'>(?P<key2>.+?)</span><\/a><br \/>' # message link + '\s*<a href=".+?" title=".+?">.+?<\/a>\n' # talk link + "\s*</td><td>" + "\s*.+?\n" # original message + "\s*</td>" + "\s*</tr><tr class='new' id='.*?'>" + "\s*<td>\n" + "\s*(?P<current2>.+?)\n" # current message + "\s*</td>" + "\s*</tr>", re.DOTALL) else: itemR = re.compile("<tr bgcolor=\"#[0-9a-f]{6}\"><td>\n" + "\s*<a href=.+?>(?P<key>.+?)<\/a><br \/>\n" + "\s*<a href=.+?>.+?<\/a>\n" + "\s*</td><td>\n" + "\s*.+?\n" + "\s*</td><td>\n" + "\s*(?P<current>.+?)\n" + "\s*<\/td><\/tr>", re.DOTALL) # we will save the found key:value pairs here dictionary = {} for match in itemR.finditer(allmessages): # Key strings only contain ASCII characters, so we can use them as dictionary keys key = match.group('key') or match.group('key2') current = match.group('current') or match.group('current2') dictionary[key] = current # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. if dictionary == {}: debugDump( 'MediaWiki_Msg', site, u'Error URL: '+unicode(path), allmessages ) sys.exit() else: f = open(makepath('mediawiki-messages/mediawiki-messages-%s-%s.dat' % (site.family.name, site.lang)), 'w') pickle.dump(dictionary, f) f.close() #print dictionary['addgroup'] #print dictionary['sitestatstext']
|
def get_image(fn,target,description):
|
def get_image(fn, target, description, debug=False):
|
def get_image(fn,target,description): uploadaddr='/wiki/%s:Upload'%wikipedia.special[wikipedia.mylang] # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] print "The filename on wikipedia will default to:",fn newfn = raw_input("Better name : ") if newfn: fn = unicode(newfn, config.console_encoding) fn = fn.encode(wikipedia.code2encoding(wikipedia.mylang)) # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # A proper description for the submission. # Only ask for a description if no one was found on the original image # description page. if description=='': description = raw_input('Description : ') description = unicode(description, config.console_encoding) description = description.encode(wikipedia.code2encoding(wikipedia.mylang)) print description data = post_multipart(wikipedia.langs[wikipedia.mylang], uploadaddr, (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) return fn
|
print description
|
def get_image(fn,target,description): uploadaddr='/wiki/%s:Upload'%wikipedia.special[wikipedia.mylang] # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] print "The filename on wikipedia will default to:",fn newfn = raw_input("Better name : ") if newfn: fn = unicode(newfn, config.console_encoding) fn = fn.encode(wikipedia.code2encoding(wikipedia.mylang)) # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # A proper description for the submission. # Only ask for a description if no one was found on the original image # description page. if description=='': description = raw_input('Description : ') description = unicode(description, config.console_encoding) description = description.encode(wikipedia.code2encoding(wikipedia.mylang)) print description data = post_multipart(wikipedia.langs[wikipedia.mylang], uploadaddr, (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) return fn
|
|
data = post_multipart(wikipedia.langs[wikipedia.mylang], uploadaddr, (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) )
|
if not debug: data = post_multipart(wikipedia.langs[wikipedia.mylang], uploadaddr, (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) )
|
def get_image(fn,target,description): uploadaddr='/wiki/%s:Upload'%wikipedia.special[wikipedia.mylang] # Get file contents uo = wikipedia.MyURLopener() file = uo.open(fn) contents = file.read() file.close() # Isolate the pure name if '/' in fn: fn = fn.split('/')[-1] if '\\' in fn: fn = fn.split('\\')[-1] print "The filename on wikipedia will default to:",fn newfn = raw_input("Better name : ") if newfn: fn = unicode(newfn, config.console_encoding) fn = fn.encode(wikipedia.code2encoding(wikipedia.mylang)) # Wikipedia doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form fn = fn.replace(' ', '_') # A proper description for the submission. # Only ask for a description if no one was found on the original image # description page. if description=='': description = raw_input('Description : ') description = unicode(description, config.console_encoding) description = description.encode(wikipedia.code2encoding(wikipedia.mylang)) print description data = post_multipart(wikipedia.langs[wikipedia.mylang], uploadaddr, (('wpUploadDescription', description), ('wpUploadAffirm', '1'), ('wpUpload','upload bestand')), (('wpUploadFile',fn,contents),) ) return fn
|
'li': [u'Verdudeliking', u'Verdudelikingpazjena'],
|
'li': [u'Verdudeliking', u'Verdudelikingpazjena', u'Vp'],
|
def __init__(self): family.Family.__init__(self) self.name = 'wikipedia'
|
if site.sitename() == 'wikipedia:de':
|
if self.site.sitename() == 'wikipedia:de':
|
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') if not wikipedia.isInterwikiLink(titleWithSection): # The link looks like this: # [[page_title|link_text]]trailing_chars # We only work on namespace 0 because pipes and linktrails work # differently for images and categories. page = wikipedia.Page(self.site, titleWithSection) if page.namespace() == 0: # Replace underlines by spaces, also multiple underlines titleWithSection = re.sub('_+', ' ', titleWithSection) # Remove double spaces titleWithSection = re.sub(' +', ' ', titleWithSection) # Convert URL-encoded characters to unicode titleWithSection = wikipedia.url2unicode(titleWithSection, site = self.site) label = m.group('label') or titleWithSection trailingChars = m.group('linktrail') if trailingChars: label += trailingChars if titleWithSection == label: newLink = "[[%s]]" % titleWithSection # Check if we can create a link with trailing characters instead of a pipelink elif len(titleWithSection) <= len(label) and label[:len(titleWithSection)] == titleWithSection and re.sub(trailR, '', label[len(titleWithSection):]) == '': newLink = "[[%s]]%s" % (label[:len(titleWithSection)], label[len(titleWithSection):]) else: # Try to capitalize the first letter of the title. # Maybe this feature is not useful for languages that # don't capitalize nouns... #if not self.site.nocapitalize: if site.sitename() == 'wikipedia:de': titleWithSection = titleWithSection[0].upper() + titleWithSection[1:] newLink = "[[%s|%s]]" % (titleWithSection, label) text = text[:m.start()] + newLink + text[m.end():] return text
|
if first in self.validLanguageLinks() or (first in self.family.known_families and self.family.known_families[first] != self.family.name):
|
if first in self.validLanguageLinks() or (first in self.validLanguageLinks() and self.family.known_families[first] != self.family.name):
|
def isInterwikiLink(self, s): """ Try to check whether s is in the form "foo:bar" where foo is a known language code or family. In such a case we are dealing with an interwiki link. """ if not ':' in s: return False first, rest = s.split(':',1) # interwiki codes are case-insensitive first = first.lower() if first in self.validLanguageLinks() or (first in self.family.known_families and self.family.known_families[first] != self.family.name): return True return False
|
if not language in self.namespaces():
|
if not language[0].upper()+language[1:] in self.namespaces():
|
def validLanguageLinks(self): langlist = [] for language in self.languages(): if not language in self.namespaces(): langlist += [language] return langlist
|
print "%s doesn't exit yet. Ignoring."%(pl2.aslocallink())
|
print "%s doesn't exist yet. Ignoring."%(pl2.aslocallink())
|
def add_category(): print "This bot has two modes: you can add a category link to all" print "pages mentioned in a List that is now in another wikipedia page" print "or you can add a category link to all pages that link to a" print "specific page. If you want the second, please give an empty" print "answer to the first question." listpage = wikipedia.input('Wikipedia page with list of pages to change: ') if listpage: try: pl = wikipedia.PageLink(wikipedia.mylang, listpage) except NoPage: print 'The page ' + listpage + ' could not be loaded from the server.' sys.exit() pagenames = pl.links() else: refpage = wikipedia.input('Wikipedia page that is now linked to: ') pl = wikipedia.PageLink(wikipedia.mylang, refpage) pagenames = wikipedia.getReferences(pl) print " ==> %d pages to process"%len(pagenames) print newcat = wikipedia.input('Category to add (do not give namespace) : ') newcat = newcat.encode(wikipedia.code2encoding(wikipedia.mylang)) newcat = newcat[:1].capitalize() + newcat[1:] print newcat ns = wikipedia.family.category_namespaces(wikipedia.mylang) catpl = wikipedia.PageLink(wikipedia.mylang, ns[0].encode(wikipedia.code2encoding(wikipedia.mylang))+':'+newcat) print "Will add %s"%catpl.aslocallink() answer = '' for nm in pagenames: pl2 = wikipedia.PageLink(wikipedia.mylang, nm) if answer != 'a': answer = '' while answer not in ('y','n','a'): answer = wikipedia.input("%s [y/n/a(ll)] : "%(pl2.asasciilink())) if answer == 'a': confirm = '' while confirm not in ('y','n'): confirm = wikipedia.input("This should be used if and only if you are sure that your links are correct !!! Are you sure ? [y/n] : ") if answer == 'y' or answer == 'a': try: cats = pl2.categories() except wikipedia.NoPage: print "%s doesn't exit yet. Ignoring."%(pl2.aslocallink()) pass except wikipedia.IsRedirectPage,arg: pl3 = wikipedia.PageLink(wikipedia.mylang,arg.args[0]) print "WARNING: %s is redirect to [[%s]]. Ignoring."%(pl2.aslocallink(),pl3.aslocallink()) else: print "Current categories: ",cats if catpl in cats: print "%s already has %s"%(pl2.aslocallink(),catpl.aslocallink()) else: cats.append(catpl) text = pl2.get() text = wikipedia.replaceCategoryLinks(text, cats) pl2.put(text, comment = catpl.aslocallink().encode(wikipedia.code2encoding(wikipedia.mylang)))
|
Multiple references in one page will be scanned in order, but typing 'n' on any one of them will leave the complete page unchanged; it is not possible to leave only one reference unchanged.
|
If you don't want to move the article to a subcategory, but to another category, you can use the 'j' (jump) command. Typing 's' will leave the complete page unchanged.
|
def remove_category(): old_title = wikipedia.input('Please enter the name of the category that should be removed: ') old_cat = catlib.CatLink(old_title) # get edit summary message wikipedia.setAction(msg_delete[wikipedia.chooselang(wikipedia.mylang,msg_delete)] % old_title) articles = old_cat.articles(recurse = 0) if len(articles) == 0: print 'There are no articles in category ' + old_title else: for article in articles: change_category(article, old_cat, None) subcategories = old_cat.subcategories(recurse = 0) if len(subcategories) == 0: print 'There are no subcategories in category ' + old_title else: for subcategory in subcategories: change_category(subcategory, old_cat, None)
|
title = title.strip()
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. # Remove leading and trailing whitespace title = title.strip() # Replace underlines by spaces title = underline2space(title) # Convert HTML entities to unicode title = html2unicode(title, site = site, altsite = insite) # Convert URL-encoded characters to unicode title = url2unicode(title, site = site) # replace cx by ĉ etc. if site.lang == 'eo': title = resolveEsperantoXConvention(title) # Remove leading colon if title.startswith(':'): title = title[1:] # Capitalize first letter try: if not site.nocapitalize: title = title[0].upper() + title[1:] except IndexError: # title is empty pass # split up into namespace and rest title = title.split(':', 1) # if the page is not in namespace 0: if len(title) > 1: # translate a default namespace name into the local namespace name for ns in site.family.namespaces.keys(): if title[0] == site.family.namespace('_default', ns): title[0] = site.namespace(ns) # Capitalize the first non-namespace part for ns in site.family.namespaces.keys(): if title[0] == site.namespace(ns): if not site.nocapitalize: try: title[1] = title[1][0].upper()+title[1][1:] except IndexError: # title[1] is empty print "WARNING: Strange title %s"%'%3A'.join(title) self._title = ':'.join(title)
|
|
pickle.dump(databases, f, bin=1)
|
pickle.dump(databases, f, protocol=pickle.HIGHEST_PROTOCOL)
|
def dump(self, filename = 'category.dump.bz2'): ''' Saves the contents of the dictionaries superclassDB and catContentDB to disk. ''' wikipedia.output(u'Dumping to %s, please wait...' % filename) f = bz2.BZ2File(filename, 'w') databases = { 'catContentDB': self.catContentDB, 'superclassDB': self.superclassDB } # store dump to disk in binary format pickle.dump(databases, f, bin=1) f.close()
|
incode = self._incode)
|
incode = mylang)
|
def __init__(self, code, name = None, urlname = None, linkname = None, incode = None): """Constructor. Normally called with two arguments: 1) The language code on which the page resides 2) The name of the page as suitable for a URL """ self._incode = incode self._code = code if linkname is None and urlname is None and name is not None: # Clean up the name, it can come from anywhere. name = name.strip() self._urlname = link2url(name, self._code, incode = self._incode) self._linkname = url2link(self._urlname, code = self._code, incode = self._incode) elif linkname is not None: # We do not trust a linkname either.... name = linkname.strip() self._urlname = link2url(name, self._code, incode=self._incode) self._linkname = url2link(self._urlname, code = self._code, incode = self._incode) elif urlname is not None: self._urlname = urlname self._linkname = url2link(urlname, code = self._code, incode = self._incode)
|
colors = colors or [None for char in text]
|
def output(self, text, colors = None, newline = True): """ If a character can't be displayed in the encoding used by the user's terminal, it will be replaced with a question mark or by a transliteration.
|
|
m = re.search("== *%s *==" % hn, self._contents)
|
m = re.search("=+ *%s *=+" % hn, self._contents)
|
def get(self, read_only = False, force = False, get_redirect=False, throttle = True): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
|
return self.lang in site.family.category_on_one_line
|
return self.lang in self.site().family.category_on_one_line
|
def category_on_one_line(self): return self.lang in site.family.category_on_one_line
|
if choice in ['a', 'A']: acceptall = True choice = 'y' if choice in ['y', 'Y']:
|
if choice in ['a', 'A']: acceptall = True if acceptall or choice in ['y', 'Y']:
|
def generator(source, replacements, exceptions, regex, textfilename = None, sqlfilename = None, pagenames = None): ''' Generator which will yield PageLinks for pages that might contain text to replace. These pages might be retrieved from a local SQL dump file or a text file, or as a list of pages entered by the user. Arguments: * source - where the bot should retrieve the page list from. can be 'sqldump', 'textfile' or 'userinput'. * replacements - a dictionary where keys are original texts and values are replacement texts. * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions * textfilename - the textfile's path, either absolute or relative, which will be used when source is 'textfile'. * sqlfilename - the dump's path, either absolute or relative, which will be used when source is 'sqldump'. * pagenames - a list of pages which will be used when source is 'userinput'. ''' if source == 'sqldump': for pl in read_pages_from_sql_dump(sqlfilename, replacements, exceptions, regex): yield pl elif source == 'textfile': for pl in read_pages_from_text_file(textfilename): yield pl elif source == 'userinput': for pagename in pagenames: yield wikipedia.PageLink(wikipedia.mylang, pagename)
|
print "Creating page %s"%title
|
def findpage(t): try: location = re.search(starttext+"([^\Z]*?)"+endtext,t) if include: page = location.group() else: page = location.group(1) except AttributeError: return try: title = re.search("'''(.*?)'''",page).group(1) pl = wikipedia.PageLink(mysite,title) if pl.exists(): print "Page %s already exists, not adding!"%title else: print "Creating page %s"%title pl.put(page, comment = commenttext, minorEdit = False) except AttributeError: print "No title found - skipping a page." findpage(t[location.end()+1:]) return
|
|
text='\n'.join(text)
|
text=''.join(text)
|
def findpage(t): try: location = re.search(starttext+"([^\Z]*?)"+endtext,t) if include: page = location.group() else: page = location.group(1) except AttributeError: return try: title = re.search("'''(.*?)'''",page).group(1) pl = wikipedia.PageLink(mysite,title) if pl.exists(): print "Page %s already exists, not adding!"%title else: print "Creating page %s"%title pl.put(page, comment = commenttext, minorEdit = False) except AttributeError: print "No title found - skipping a page." findpage(t[location.end()+1:]) return
|
return x, isWatched
|
return x
|
def getPage(site, name, get_edit_page = True, read_only = False, do_quote = True, get_redirect=False, throttle = True): """ Get the contents of page 'name' from the 'site' wiki Do not use this directly; for 99% of the possible ideas you can use the Page object instead. Arguments: site - the wiki site name - the page name get_edit_page - If true, gets the edit page, otherwise gets the normal page. read_only - If true, doesn't raise LockedPage exceptions. do_quote - ??? (TODO: what is this for?) get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text if get_edit_page is True; otherwise it returns a unicode string containing the entire page's HTML code. """ isWatched = False host = site.hostname() name = re.sub(' ', '_', name) output(url2unicode(u'Getting page %s' % site.linkto(name), site = site)) # A heuristic to encode the URL into %XX for characters that are not # allowed in a URL. if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) if get_edit_page: address = site.edit_address(name) else: address = site.get_address(name) # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. if throttle: get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: starttime = time.time() text, charset = getUrl(host, address, site) get_throttle.setDelay(time.time() - starttime)\ # Extract the actual text from the textedit field if charset is None: print "WARNING: No character set found" else: # Store character set for later reference site.checkCharset(charset) if get_edit_page: # Look for the edit token R = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = R.search(text) if tokenloc: site.puttoken(tokenloc.group(1)) elif not site.getToken(getalways = False): site.puttoken('') # Look if the page is on our watchlist R = re.compile(r"\<input tabindex='[\d]+' type='checkbox' name='wpWatchthis' checked='checked'") matchWatching = R.search(text) if matchWatching: print 'Page is on watchlist.' print 'The bot doesn\'t know how to deal with this. The page won\'t be on the watchlist any longer after saving.' if not read_only: # check if we're logged in p=re.compile('userlogin') if p.search(text) != None: output(u'Warning: You\'re probably not logged in on %s:' % repr(site)) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: edittime[repr(site), link2url(name, site = site)] = "0" try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %d minutes..." % (host, address, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(site, name) m = redirectRe(site).match(text[i1:i2]) if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (url2unicode(name, site = site), unicode(m.group(1), site.encoding()))) raise IsRedirectPage(m.group(1)) if edittime[repr(site), link2url(name, site = site)] == "0" and not read_only: print "DBG> page may be locked?!" raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] else: x = text # If not editing # Convert to a unicode string. If there's invalid unicode data inside # the page, replace it with question marks. x = unicode(x, charset, errors = 'replace') return x, isWatched
|
if len(arg) == 7:
|
if len(arg) == 6:
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False xmlfilename = None textfilename = None startpage = None for arg in wikipedia.handleArgs(): if arg.startswith('-file:'): if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the textfile\'s name:') else: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-start:'): if len(arg) == 7: startpage = wikipedia.input(u'Please enter the article to start then:') else: startpage = arg[8:] gen = pagegenerators.AllpagesPageGenerator(startpage) elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlfilename = arg[5:] gen = TableXmlDumpPageGenerator(xmlfilename) elif arg == '-sql': query = u"""
|
startpage = arg[8:]
|
startpage = arg[7:]
|
def main(): quietMode = False # use -quiet to get less output # if the -file argument is used, page titles are stored in this array. # otherwise it will only contain one page. articles = [] # if -file is not used, this temporary array is used to read the page title. page_title = [] debug = False xmlfilename = None textfilename = None startpage = None for arg in wikipedia.handleArgs(): if arg.startswith('-file:'): if len(arg) == 5: textfilename = wikipedia.input(u'Please enter the textfile\'s name:') else: textfilename = arg[6:] gen = pagegenerators.TextfilePageGenerator(textfilename) elif arg.startswith('-start:'): if len(arg) == 7: startpage = wikipedia.input(u'Please enter the article to start then:') else: startpage = arg[8:] gen = pagegenerators.AllpagesPageGenerator(startpage) elif arg.startswith('-xml'): if len(arg) == 4: xmlfilename = wikipedia.input(u'Please enter the XML dump\'s filename:') else: xmlfilename = arg[5:] gen = TableXmlDumpPageGenerator(xmlfilename) elif arg == '-sql': query = u"""
|
'pt': u'Discussão Portal',
|
'pt': u'Portal Discussão',
|
def __init__(self): family.Family.__init__(self) self.name = 'wikipedia'
|
start = int(arg[7:])
|
start = arg[7:]
|
def main(): start = '!' sqlfilename = None for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg, logname = 'weblinkchecker.log') if arg: if arg.startswith('-sql'): if len(arg) == 4: sqlfilename = wikipedia.input(u'Please enter the SQL dump\'s filename: ') else: sqlfilename = arg[5:] source = sqlfilename elif arg.startswith('-start:'): start = int(arg[7:]) else: print 'Unknown argument: %s' % arg if sqlfilename: gen = SqlPageGenerator(sqlfilename) else: gen = AllpagesPageGenerator(start) bot = WeblinkCheckerRobot(gen) bot.run()
|
newTable = re.sub("[\r\n]*?<(?i)(table) ([\w\W]*?)>([\w\W]*?)[\r\n ]*", r"\r\n{| \2\r\n\3", newTable) newTable = re.sub("[\r\n]*?<(TABLE|table)>([\w\W]*?)[\r\n ]*", r"\r\n{|\n\2\r\n", newTable) newTable = re.sub("[\r\n]*?<(TABLE|table) ([\w\W]*?)>[\r\n ]*", r"\r\n{| \2\r\n", newTable) newTable = re.sub("[\r\n]*?<(TABLE|table)>[\r\n ]*",
|
newTable = re.sub("(?i)[\r\n]*?<table (?P<attr>[\w\W]*?)>(?P<more>[\w\W]*?)[\r\n ]*", r"\r\n{| \g<attr>\r\n\g<more>", newTable) newTable = re.sub("(?i)[\r\n]*?<table>(?P<more>[\w\W]*?)[\r\n ]*", r"\r\n{|\n\g<more>\r\n", newTable) newTable = re.sub("(?i)[\r\n]*?<table (?P<attr>[\w\W]*?)>[\r\n ]*", r"\r\n{| \g<attr>\r\n", newTable) newTable = re.sub("(?i)[\r\n]*?<table>[\r\n ]*",
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable = re.sub("[\s]*<\/(TABLE|table)>", "\r\n|}", newTable) newTable = re.sub("<(CAPTION|caption) ([\w\W]*?)>([\w\W]*?)<\/caption>", r"\r\n|+\1 | \2", newTable) newTable = re.sub("<(CAPTION|caption)([\w\W]*?)<\/caption>", r"\r\n|+ \1", newTable)
|
newTable = re.sub("(?i)[\s]*<\/table>", "\r\n|}", newTable) newTable = re.sub("(?i)<caption (?P<attr>[\w\W]*?)>(?P<caption>[\w\W]*?)<\/caption>", r"\r\n|+\g<attr> | \g<caption>", newTable) newTable = re.sub("(?i)<caption>(?P<caption>[\w\W]*?)<\/caption>", r"\r\n|+ \g<caption>", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable = re.sub("[\r\n]+<(TH|th)([^>]*?)>([\w\W]*?)<\/(th|TH)>", r"\r\n!\2 | \3\r\n", newTable)
|
newTable = re.sub("(?i)[\r\n]+<th(?P<attr>[^>]*?)>(?P<header>[\w\W]*?)<\/th>", r"\r\n!\g<attr> | \g<header>\r\n", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable, n = re.subn("[\r\n]+<(th|TH)>([\w\W]*?)[\r\n]+", r"\r\n! \2\r\n", newTable)
|
newTable, n = re.subn("(?i)[\r\n]+<th>(?P<header>[\w\W]*?)[\r\n]+", r"\r\n! \g<header>\r\n", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable, n = re.subn("[\r\n]+<(th|TH)([^>]*?)>([\w\W]*?)[\r\n]+", r"\n!\2 | \3\r\n", newTable)
|
newTable, n = re.subn("(?i)[\r\n]+<th(?P<attr>[^>]*?)>(?P<header>[\w\W]*?)[\r\n]+", r"\n!\g<attr> | \g<header>\r\n", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
warning_messages.append(u'WARNING: found <th> without </th>. (%d occurences\n)' % n)
|
warning_messages.append(u'WARNING: found <th ...> without </th>. (%d occurences\n)' % n)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable = re.sub("[\r\n]*<(tr|TR)([^>]*?)>[\r\n]*", r"\r\n|-----\2\r\n", newTable) newTable = re.sub("[\r\n]*<(tr|TR)>[\r\n]*",
|
newTable = re.sub("(?i)[\r\n]*<tr(?P<attr>[^>]*?)>[\r\n]*", r"\r\n|-----\g<attr>\r\n", newTable) newTable = re.sub("(?i)[\r\n]*<tr>[\r\n]*",
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable = re.sub("[\r\n]+<(td|TD)>([\w\W]*?)<\/(TD|td)>", r"\r\n| \2\r\n", newTable)
|
newTable = re.sub("(?i)[\r\n]+<td>(?P<cell>[\w\W]*?)<\/td>", r"\r\n| \g<cell>\r\n", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable = re.sub("[\r\n]+<(td|TD)([^>]*?)>([\w\W]*?)<\/(TD|td)>", r"\r\n|\2 | \3", newTable)
|
newTable = re.sub("(?i)[\r\n]+<td(?P<attr>[^>]*?)>(?P<cell>[\w\W]*?)<\/td>", r"\r\n|\g<attr> | \g<cell>", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable, n = re.subn("[\r\n]+<(td|TD)>([^\r\n]*?)<(td|TD)>", r"\r\n| \2\r\n", newTable)
|
newTable, n = re.subn("(?i)[\r\n]+<td>(?P<cell>[^\r\n]*?)<td>", r"\r\n| \g<cell>\r\n", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
warning_messages.append(u'WARNING: (sorry, bot code unreadable (1). I don\'t know why this warning is given.) (%d occurences)\n' % n)
|
warning_messages.append(u'<td> used where </td> was expected. (%d occurences)\n' % n)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
warning_messages.append(u'WARNING: found <td><td></tr>, but no </td>. (%d occurences)\n' % n) warnings += n
|
warning_messages.append(u'WARNING: (sorry, bot code unreadable (1). I don\'t know why this warning is given.) (%d occurences)\n' % n)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable, n = re.subn("<(td|TD)>([^<]*?)[\r\n]+", r"\r\n| \2\r\n", newTable)
|
newTable, n = re.subn("(?i)<td>(?P<cell>[^<]*?)[\r\n]+", r"\r\n| \g<cell>\r\n", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
warning_messages.append(u'WARNING: found <td> without </td>. (%d occurences)\n' % n) warnings += n
|
warning_messages.append(u'NOTE: Found <td> without </td>. This shouldn\'t cause problems.\n')
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable, n = re.subn("[\r\n]*<(td|TD)([^>]*?)>([\w\W]*?)[\r\n]+", r"\r\n|\2 | \3\r\n", newTable)
|
newTable, n = re.subn("(?i)[\r\n]*<td(?P<attr>[^>]*?)>(?P<cell>[\w\W]*?)[\r\n]+", r"\r\n|\g<attr> | \g<cell>\r\n", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
newTable, n = re.subn("<(td|TD)>([\w\W]*?)[\r\n]+", r"\r\n| \2\r\n", newTable) if n>0: warning_messages.append(u'WARNING: (sorry, bot code unreadable (2). I don\'t know why this warning is given.) (%d occurences)\n' % n) warnings += n
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
|
newTable = re.sub("<td>[\r\n]*<\/tr>", "", newTable) newTable = re.sub("[\r\n]*<\/[Tt][rRdDhH]>", "", newTable)
|
newTable = re.sub("(?i)<td>[\r\n]*<\/tr>", "", newTable) newTable = re.sub("(?i)[\r\n]*<\/t[rdh]>", "", newTable)
|
def convertTable(self, table): ''' Converts an HTML table to wiki syntax. If the table already is a wiki table or contains a nested wiki table, tries to beautify it. Returns the converted table, the number of warnings that occured and a list containing these warnings.
|
def main(): if __name__ == "__main__":
|
if __name__ == "__main__": try:
|
def main(): if __name__ == "__main__": action = None sort_by_last_name = False for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg) if arg: if arg == 'add': action = 'add' elif arg == 'remove': action = 'remove' elif arg == 'rename': action = 'rename' elif arg == 'tidy': action = 'tidy' elif arg == 'tree': action = 'tree' elif arg == '-person': sort_by_last_name = True elif arg == '-restore': f = open('cattree.dump', 'r') databases = pickle.load(f) f.close() catContentDB = databases['catContentDB'] superclassDB = databases['superclassDB'] del databases # catlib needs to be imported at this position because its constructor uses # mylang which might have been changed by wikipedia.argHandler(). import catlib if action == 'add': add_category(sort_by_last_name) elif action == 'remove': cat_title = wikipedia.input(u'Please enter the name of the category that should be removed:') remove_category(cat_title) elif action == 'rename': old_cat_title = wikipedia.input(u'Please enter the old name of the category:') new_cat_title = wikipedia.input(u'Please enter the new name of the category:') rename_category(old_cat_title, new_cat_title) elif action == 'tidy': cat_title = wikipedia.input(u'Which category do you want to tidy up?') tidy_category(cat_title) elif action == 'tree': cat_title = wikipedia.input(u'For which category do you want to create a tree view?') try: print_treeview(cat_title) except: dump('cattree.dump') raise else: # show help wikipedia.output(__doc__, 'utf-8')
|
try: main() except: wikipedia.stopme() raise wikipedia.stopme()
|
finally: wikipedia.stopme()
|
def main(): if __name__ == "__main__": action = None sort_by_last_name = False for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg) if arg: if arg == 'add': action = 'add' elif arg == 'remove': action = 'remove' elif arg == 'rename': action = 'rename' elif arg == 'tidy': action = 'tidy' elif arg == 'tree': action = 'tree' elif arg == '-person': sort_by_last_name = True elif arg == '-restore': f = open('cattree.dump', 'r') databases = pickle.load(f) f.close() catContentDB = databases['catContentDB'] superclassDB = databases['superclassDB'] del databases # catlib needs to be imported at this position because its constructor uses # mylang which might have been changed by wikipedia.argHandler(). import catlib if action == 'add': add_category(sort_by_last_name) elif action == 'remove': cat_title = wikipedia.input(u'Please enter the name of the category that should be removed:') remove_category(cat_title) elif action == 'rename': old_cat_title = wikipedia.input(u'Please enter the old name of the category:') new_cat_title = wikipedia.input(u'Please enter the new name of the category:') rename_category(old_cat_title, new_cat_title) elif action == 'tidy': cat_title = wikipedia.input(u'Which category do you want to tidy up?') tidy_category(cat_title) elif action == 'tree': cat_title = wikipedia.input(u'For which category do you want to create a tree view?') try: print_treeview(cat_title) except: dump('cattree.dump') raise else: # show help wikipedia.output(__doc__, 'utf-8')
|
choice = wikipedia.inputChoice(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext))
|
choice = wikipedia.inputChoice(u"File format is not one of [%s], but %s. Continue?" % (u' '.join(allowed_formats), ext), ['yes', 'no'], ['y', 'n'], 'N')
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ # Get file contents if '://' in self.url: uo = wikipedia.MyURLopener() file = uo.open(self.url,"rb") else: # Opening local files with MyURLopener would be possible, but we # don't do it because it only accepts ASCII characters in the # filename. file = open(self.url,"rb") wikipedia.output(u'Reading file %s' % self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'gif', u'jpg', u'jpeg', u'mid', u'midi', u'ogg', u'png', u'svg', u'xcf') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: choice = wikipedia.inputChoice(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if choice == 'n': ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki encodedFilename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) if self.verifyDescription: newDescription = u'' choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['y', 'N'], 'n') if choice == 'y': import editarticle editor = editarticle.TextEditor() newDescription = editor.edit(self.description) # if user saved / didn't press Cancel if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
f=open('ALLNOTFOUND.dat','w') f.write(data) f.close() sys.exit(1)
|
def run(self): dt=15 while True: try: data = self.getData() except (socket.error, httplib.BadStatusLine, ServerError): # Print the traceback of the caught exception print ''.join(traceback.format_exception(*sys.exc_info())) output(u'DBG> got network error in GetAll.run. Sleeping for %d seconds'%dt) time.sleep(dt) if dt <= 60: dt += 15 elif dt < 360: dt += 60 else: break if not data: return handler = xmlreader.MediaWikiXmlHandler() handler.setCallback(self.oneDone) handler.setHeaderCallback(self.headerDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print >>sys.stderr, "Dumped invalid XML to sax_parse_bug.dat" raise except PageNotFound: return # All of the ones that have not been found apparently do not exist for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): # the following 4 lines are just for debugging purposes and will # be removed later. --Daniel f=open('ALLNOTFOUND.dat','w') f.write(data) f.close() sys.exit(1) pl._getexception = NoPage
|
|
output("WARNING: Hashname does not exist: %s" % self)
|
output("WARNING: Hashname does not exist: %s" % self.linkname())
|
def get(self, read_only = False): """The wiki-text of the page. This will retrieve the page if it has not been retrieved yet. This can raise the following exceptions that should be caught by the calling code:
|
output(u""+mediawiki_messages.get('spamprotectiontitle', self.site()))
|
def putPage(self, text, comment = None, watchArticle = False, minorEdit = True, newPage = False, token = None, gettoken = False, sysop = False): """ Upload 'text' as new contents for this Page by filling out the edit page.
|
|
return dh_noConv( value, u'%d' )
|
return dh_noConvYear( value, u'%d' )
|
def dh_simpleInt( value ): """decoding helper for a single integer value representing a year with no extra symbols""" return dh_noConv( value, u'%d' )
|
'nap': lambda v: slh( v, [u"Januari", u"Februari", u"Mac", u"April", u"Mei", u"Jun", u"Julai", u"Ogos", u"September", u"Oktober", u"November", u"Disember"] ), 'nap': lambda v: slh( v, [u"Jennaro", u"Frevaro", u"Màrzo", u"Abbrile", u"Majo", u"Giùgno", u"Luglio", u"Aùsto", u"Settembre", u"Ottovre", u"Nuvembre", u"Dicembre"] ),
|
'nap': lambda v: slh( v, [u"Jennaro", u"Frevaro", u"Màrzo", u"Abbrile", u"Maggio", u"Giùgno", u"Luglio", u"Aùsto", u"Settembre", u"Ottovre", u"Nuvembre", u"Dicembre"] ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'af' : dh_simpleInt, 'ar' : dh_simpleInt, 'ast': dh_simpleInt, 'be' : dh_simpleInt, 'bg' : dh_simpleInt, 'bs' : dh_simpleInt, 'ca' : dh_simpleInt, 'cs' : dh_simpleInt, 'csb': dh_simpleInt, 'cv' : dh_simpleInt, 'cy' : dh_simpleInt, 'da' : dh_simpleInt, 'de' : dh_simpleInt, 'el' : dh_simpleInt, 'en' : dh_simpleInt, 'eo' : dh_simpleInt, 'es' : dh_simpleInt, 'et' : dh_simpleInt, 'eu' : dh_simpleInt, 'fi' : dh_simpleInt, 'fo' : dh_simpleInt, 'fr' : dh_simpleInt, 'fy' : dh_simpleInt, 'gl' : dh_simpleInt, 'he' : dh_simpleInt, 'hr' : dh_simpleInt, 'hu' : dh_simpleInt, 'ia' : dh_simpleInt, 'id' : dh_simpleInt, 'ie' : dh_simpleInt, 'io' : dh_simpleInt, 'is' : dh_simpleInt, 'it' : dh_simpleInt, 'ja' : lambda v: dh_noConv( v, u'%d年' ), 'ka' : dh_simpleInt,
|
'af' : dh_simpleYearAD, 'ar' : dh_simpleYearAD, 'ast': dh_simpleYearAD, 'be' : dh_simpleYearAD, 'bg' : dh_simpleYearAD, 'bs' : dh_simpleYearAD, 'ca' : dh_simpleYearAD, 'cs' : dh_simpleYearAD, 'csb': dh_simpleYearAD, 'cv' : dh_simpleYearAD, 'cy' : dh_simpleYearAD, 'da' : dh_simpleYearAD, 'de' : dh_simpleYearAD, 'el' : dh_simpleYearAD, 'en' : dh_simpleYearAD, 'eo' : dh_simpleYearAD, 'es' : dh_simpleYearAD, 'et' : dh_simpleYearAD, 'eu' : dh_simpleYearAD, 'fi' : dh_simpleYearAD, 'fo' : dh_simpleYearAD, 'fr' : dh_simpleYearAD, 'fy' : dh_simpleYearAD, 'gl' : dh_simpleYearAD, 'he' : dh_simpleYearAD, 'hr' : dh_simpleYearAD, 'hu' : dh_simpleYearAD, 'ia' : dh_simpleYearAD, 'id' : dh_simpleYearAD, 'ie' : dh_simpleYearAD, 'io' : dh_simpleYearAD, 'is' : dh_simpleYearAD, 'it' : dh_simpleYearAD, 'ja' : lambda v: dh_noConvYear( v, u'%d年' ), 'ka' : dh_simpleYearAD,
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'ko' : lambda v: dh_noConv( v, u'%d년' ), 'ku' : dh_simpleInt, 'kw' : dh_simpleInt, 'la' : dh_simpleInt, 'lb' : dh_simpleInt, 'li' : dh_simpleInt, 'lt' : dh_simpleInt, 'lv' : dh_simpleInt, 'mi' : dh_simpleInt, 'mk' : dh_simpleInt, 'ms' : dh_simpleInt, 'nap': dh_simpleInt, 'nds': dh_simpleInt, 'nl' : dh_simpleInt, 'nn' : dh_simpleInt, 'no' : dh_simpleInt, 'os' : dh_simpleInt, 'pl' : dh_simpleInt, 'pt' : dh_simpleInt, 'ro' : dh_simpleInt, 'ru' : dh_simpleInt, 'scn': dh_simpleInt, 'simple' : dh_simpleInt, 'sk' : dh_simpleInt, 'sl' : dh_simpleInt, 'sq' : dh_simpleInt, 'sr' : dh_simpleInt, 'sv' : dh_simpleInt, 'te' : dh_simpleInt,
|
'ko' : lambda v: dh_noConvYear( v, u'%d년' ), 'ku' : dh_simpleYearAD, 'kw' : dh_simpleYearAD, 'la' : dh_simpleYearAD, 'lb' : dh_simpleYearAD, 'li' : dh_simpleYearAD, 'lt' : dh_simpleYearAD, 'lv' : dh_simpleYearAD, 'mi' : dh_simpleYearAD, 'mk' : dh_simpleYearAD, 'ms' : dh_simpleYearAD, 'nap': dh_simpleYearAD, 'nds': dh_simpleYearAD, 'nl' : dh_simpleYearAD, 'nn' : dh_simpleYearAD, 'no' : dh_simpleYearAD, 'os' : dh_simpleYearAD, 'pl' : dh_simpleYearAD, 'pt' : dh_simpleYearAD, 'ro' : dh_simpleYearAD, 'ru' : dh_simpleYearAD, 'scn': dh_simpleYearAD, 'simple' : dh_simpleYearAD, 'sk' : dh_simpleYearAD, 'sl' : dh_simpleYearAD, 'sq' : dh_simpleYearAD, 'sr' : dh_simpleYearAD, 'sv' : dh_simpleYearAD, 'te' : dh_simpleYearAD,
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'tl' : dh_simpleInt, 'tr' : dh_simpleInt, 'tt' : dh_simpleInt, 'uk' : dh_simpleInt, 'ur' : lambda v: dh_noConv( v, u'%dسبم' ), 'vi' : dh_simpleInt, 'wa' : dh_simpleInt, 'zh' : lambda v: dh_noConv( v, u'%d年' ), 'zh-min-nan' : lambda v: dh_noConv( v, u'%d nî' ),
|
'tl' : dh_simpleYearAD, 'tr' : dh_simpleYearAD, 'tt' : dh_simpleYearAD, 'uk' : dh_simpleYearAD, 'ur' : lambda v: dh_noConvYear( v, u'%dسبم' ), 'vi' : dh_simpleYearAD, 'wa' : dh_simpleYearAD, 'zh' : lambda v: dh_noConvYear( v, u'%d年' ), 'zh-min-nan' : lambda v: dh_noConvYear( v, u'%d nî' ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'af' : lambda v: dh_noConv( v, u'%d v.C.' ), 'bg' : lambda v: dh_noConv( v, u'%d г. пр.н.е.' ), 'bs' : lambda v: dh_noConv( v, u'%d p.ne.' ), 'ca' : lambda v: dh_noConv( v, u'%d aC' ), 'da' : lambda v: dh_noConv( v, u'%d f.Kr.' ), 'de' : lambda v: dh_noConv( v, u'%d v. Chr.' ), 'en' : lambda v: dh_noConv( v, u'%d BC' ), 'eo' : lambda v: dh_noConv( v, u'-%d' ), 'es' : lambda v: dh_noConv( v, u'%d adC' ), 'et' : lambda v: dh_noConv( v, u'%d eKr' ), 'fi' : lambda v: dh_noConv( v, u'%d eaa' ), 'fo' : lambda v: dh_noConv( v, u'%d f. Kr.' ), 'fr' : lambda v: dh_noConv( v, u'-%d' ), 'gl' : lambda v: dh_noConv( v, u'-%d' ), 'he' : lambda v: dh_noConv( v, u'%d לפנה"ס' ), 'hr' : lambda v: dh_noConv( v, u'%d p.n.e.' ), 'id' : lambda v: dh_noConv( v, u'%d SM' ), 'io' : lambda v: dh_noConv( v, u'%d aK' ), 'is' : lambda v: dh_noConv( v, u'%d f. Kr.' ), 'it' : lambda v: dh_noConv( v, u'%d AC' ), 'ko' : lambda v: dh_noConv( v, u'기원전 %d년' ), 'la' : lambda v: dh_noConv( v, u'%d a.C.n.' ), 'lb' : lambda v: dh_noConv( v, u'-%d' ), 'ms' : lambda v: dh_noConv( v, u'%d SM' ), 'nap': lambda v: dh_noConv( v, u'%d AC' ), 'nds': lambda v: dh_noConv( v, u'%d v. Chr.' ), 'nl' : lambda v: dh_noConv( v, u'%d v. Chr.' ), 'nn' : lambda v: dh_noConv( v, u'-%d' ), 'no' : lambda v: dh_noConv( v, u'%d f.Kr.' ), 'pl' : lambda v: dh_noConv( v, u'%d p.n.e.' ), 'pt' : lambda v: dh_noConv( v, u'%d a.C.' ), 'ro' : lambda v: dh_noConv( v, u'%d î.Hr.' ), 'ru' : lambda v: dh_noConv( v, u'%d до н. э.' ), 'scn': lambda v: dh_noConv( v, u'%d a.C.' ), 'sl' : lambda v: dh_noConv( v, u'%d pr. n. št.' ), 'sr' : lambda v: dh_noConv( v, u'%d. пне.' ), 'sv' : lambda v: dh_noConv( v, u'%d f.Kr.' ), 'tt' : lambda v: dh_noConv( v, u'MA %d' ), 'uk' : lambda v: dh_noConv( v, u'%d до Р.Х.' ), 'zh' : lambda v: dh_noConv( v, u'前%d年' ),
|
'af' : lambda v: dh_noConvYear( v, u'%d v.C.' ), 'bg' : lambda v: dh_noConvYear( v, u'%d г. пр.н.е.' ), 'bs' : lambda v: dh_noConvYear( v, u'%d p.ne.' ), 'ca' : lambda v: dh_noConvYear( v, u'%d aC' ), 'da' : lambda v: dh_noConvYear( v, u'%d f.Kr.' ), 'de' : lambda v: dh_noConvYear( v, u'%d v. Chr.' ), 'en' : lambda v: dh_noConvYear( v, u'%d BC' ), 'eo' : lambda v: dh_noConvYear( v, u'-%d' ), 'es' : lambda v: dh_noConvYear( v, u'%d adC' ), 'et' : lambda v: dh_noConvYear( v, u'%d eKr' ), 'fi' : lambda v: dh_noConvYear( v, u'%d eaa' ), 'fo' : lambda v: dh_noConvYear( v, u'%d f. Kr.' ), 'fr' : lambda v: dh_noConvYear( v, u'-%d' ), 'gl' : lambda v: dh_noConvYear( v, u'-%d' ), 'he' : lambda v: dh_noConvYear( v, u'%d לפנה"ס' ), 'hr' : lambda v: dh_noConvYear( v, u'%d p.n.e.' ), 'id' : lambda v: dh_noConvYear( v, u'%d SM' ), 'io' : lambda v: dh_noConvYear( v, u'%d aK' ), 'is' : lambda v: dh_noConvYear( v, u'%d f. Kr.' ), 'it' : lambda v: dh_noConvYear( v, u'%d AC' ), 'ko' : lambda v: dh_noConvYear( v, u'기원전 %d년' ), 'la' : lambda v: dh_noConvYear( v, u'%d a.C.n.' ), 'lb' : lambda v: dh_noConvYear( v, u'-%d' ), 'ms' : lambda v: dh_noConvYear( v, u'%d SM' ), 'nap': lambda v: dh_noConvYear( v, u'%d AC' ), 'nds': lambda v: dh_noConvYear( v, u'%d v. Chr.' ), 'nl' : lambda v: dh_noConvYear( v, u'%d v. Chr.' ), 'nn' : lambda v: dh_noConvYear( v, u'-%d' ), 'no' : lambda v: dh_noConvYear( v, u'%d f.Kr.' ), 'pl' : lambda v: dh_noConvYear( v, u'%d p.n.e.' ), 'pt' : lambda v: dh_noConvYear( v, u'%d a.C.' ), 'ro' : lambda v: dh_noConvYear( v, u'%d î.Hr.' ), 'ru' : lambda v: dh_noConvYear( v, u'%d до н. э.' ), 'scn': lambda v: dh_noConvYear( v, u'%d a.C.' ), 'sl' : lambda v: dh_noConvYear( v, u'%d pr. n. št.' ), 'sr' : lambda v: dh_noConvYear( v, u'%d. пне.' ), 'sv' : lambda v: dh_noConvYear( v, u'%d f.Kr.' ), 'tt' : lambda v: dh_noConvYear( v, u'MA %d' ), 'uk' : lambda v: dh_noConvYear( v, u'%d до Р.Х.' ), 'zh' : lambda v: dh_noConvYear( v, u'前%d年' ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_noConv( x, u'%d-ві' ), lambda x: x == 0 or (x % 100 == 40)), (lambda x: dh_noConv( x, u'%d-ні' ), lambda x: x % 1000 == 0), (lambda x: dh_noConv( x, u'%d-ті' ), lambda x: True)]),
|
(lambda x: dh_dec( x, u'%d-ві' ), lambda x: x == 0 or (x % 100 == 40)), (lambda x: dh_dec( x, u'%d-ні' ), lambda x: x % 1000 == 0), (lambda x: dh_dec( x, u'%d-ті' ), lambda x: True)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_noConv( x, u'%d-ві до Р.Х.' ), lambda x: x == 0 or (x % 100 == 40)), (lambda x: dh_noConv( x, u'%d-ті до Р.Х.' ), lambda x: True)]),
|
(lambda x: dh_dec( x, u'%d-ві до Р.Х.' ), lambda x: x == 0 or (x % 100 == 40)), (lambda x: dh_dec( x, u'%d-ті до Р.Х.' ), lambda x: True)]), 'zh' : lambda v: dh_dec( v, u'前%d年代' ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'en' : lambda v: dh_noConv( v, u'%dth century' ),
|
'en' : lambda v: multi( v, [ (lambda x: dh_noConv( x, u'%dst century' ), lambda x: x == 1 or (x > 20 and x%10 == 1)), (lambda x: dh_noConv( x, u'%dnd century' ), lambda x: x == 2 or (x > 20 and x%10 == 2)), (lambda x: dh_noConv( x, u'%drd century' ), lambda x: x == 3 or (x > 20 and x%10 == 3)), (lambda x: dh_noConv( x, u'%dth century' ), lambda x: True)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
raise "bug, page not found in list"
|
print "BUG> bug, page not found in list"
|
def oneDone(self, title, timestamp, text): #print "DBG>", repr(title), timestamp, len(text) pl = PageLink(self.code, title) for pl2 in self.pages: #print "DBG>", pl, pl2, pl2.hashfreeLinkname() if PageLink(self.code, pl2.hashfreeLinkname()) == pl: if not hasattr(pl2,'_contents') and not hasattr(pl2,'_getexception'): break else: print repr(title) print repr(pl) print repr(self.pages) raise "bug, page not found in list" if self.debug: xtext = pl2.get() if text != xtext: print "################Text differs" import difflib for line in difflib.ndiff(xtext.split('\r\n'), text.split('\r\n')): if line[0] in ['+', '-']: print repr(line)[2:-1] if edittime[self.code, link2url(title, self.code)] != timestamp: print "################Timestamp differs" print "-",edittime[self.code, link2url(title, self.code)] print "+",timestamp else: m=Rredirect.match(text) if m: #print "DBG> ",pl2.asasciilink(),"is a redirect page" pl2._getexception = IsRedirectPage(m.group(1)) else: if len(text)<50: print "DBG> short text in",pl2.asasciilink() print repr(text) hn = pl2.hashname() if hn: m = re.search("== *%s *==" % hn, text) if not m: pl2._getexception = SubpageError("Hashname does not exist: %s" % self) else: # Store the content pl2._contents = text # Store the time stamp edittime[self.code, link2url(title, self.code)] = timestamp else: # Store the content pl2._contents = text # Store the time stamp edittime[self.code, link2url(title, self.code)] = timestamp
|
while 1:
|
while True:
|
def run(self): dt=15 while 1: try: data = self.getData() except socket.error: # Print the traceback of the caught exception print ''.join(traceback.format_exception(*sys.exc_info())) output(u'DBG> got socket error in GetAll.run. Sleeping for %d seconds'%dt) time.sleep(dt) dt *= 2 else: break handler = WikimediaXmlHandler() handler.setCallback(self.oneDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print "Dumped invalid XML to sax_parse_bug.dat" raise # All of the ones that have not been found apparently do not exist for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): if self.site.lang == 'eo': if pl.hashfreeLinkname() != pl.hashfreeLinkname(doublex = True): # Maybe we have used x-convention when we should not? try: pl.get(force = True) except NoPage: pass except IsRedirectPage,arg: pass except LockedPage: pass except SectionError: pass else: pl._getexception = NoPage else: pl._getexception = NoPage if hasattr(pl,'_contents') and pl.site().lang=="eo": # Edit-pages on eo: use X-convention, XML export does not. # Double X-es where necessary so that we can submit a changed # page later. for c in 'C','G','H','J','S','U': for c2 in c,c.lower(): for x in 'X','x': pl._contents = pl._contents.replace(c2+x,c2+x+x)
|
dt *= 2
|
if dt <= 60: dt += 15 elif dt < 360: dt += 60
|
def run(self): dt=15 while 1: try: data = self.getData() except socket.error: # Print the traceback of the caught exception print ''.join(traceback.format_exception(*sys.exc_info())) output(u'DBG> got socket error in GetAll.run. Sleeping for %d seconds'%dt) time.sleep(dt) dt *= 2 else: break handler = WikimediaXmlHandler() handler.setCallback(self.oneDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print "Dumped invalid XML to sax_parse_bug.dat" raise # All of the ones that have not been found apparently do not exist for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): if self.site.lang == 'eo': if pl.hashfreeLinkname() != pl.hashfreeLinkname(doublex = True): # Maybe we have used x-convention when we should not? try: pl.get(force = True) except NoPage: pass except IsRedirectPage,arg: pass except LockedPage: pass except SectionError: pass else: pl._getexception = NoPage else: pl._getexception = NoPage if hasattr(pl,'_contents') and pl.site().lang=="eo": # Edit-pages on eo: use X-convention, XML export does not. # Double X-es where necessary so that we can submit a changed # page later. for c in 'C','G','H','J','S','U': for c2 in c,c.lower(): for x in 'X','x': pl._contents = pl._contents.replace(c2+x,c2+x+x)
|
print "Dumped invalid XML to sax_parse_bug.dat"
|
print >>sys.stderr, "Dumped invalid XML to sax_parse_bug.dat"
|
def run(self): dt=15 while 1: try: data = self.getData() except socket.error: # Print the traceback of the caught exception print ''.join(traceback.format_exception(*sys.exc_info())) output(u'DBG> got socket error in GetAll.run. Sleeping for %d seconds'%dt) time.sleep(dt) dt *= 2 else: break handler = WikimediaXmlHandler() handler.setCallback(self.oneDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print "Dumped invalid XML to sax_parse_bug.dat" raise # All of the ones that have not been found apparently do not exist for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): if self.site.lang == 'eo': if pl.hashfreeLinkname() != pl.hashfreeLinkname(doublex = True): # Maybe we have used x-convention when we should not? try: pl.get(force = True) except NoPage: pass except IsRedirectPage,arg: pass except LockedPage: pass except SectionError: pass else: pl._getexception = NoPage else: pl._getexception = NoPage if hasattr(pl,'_contents') and pl.site().lang=="eo": # Edit-pages on eo: use X-convention, XML export does not. # Double X-es where necessary so that we can submit a changed # page later. for c in 'C','G','H','J','S','U': for c2 in c,c.lower(): for x in 'X','x': pl._contents = pl._contents.replace(c2+x,c2+x+x)
|
except NoPage: pass except IsRedirectPage,arg: pass except LockedPage: pass except SectionError:
|
except (NoPage, IsRedirectPage, LockedPage, SectionError):
|
def run(self): dt=15 while 1: try: data = self.getData() except socket.error: # Print the traceback of the caught exception print ''.join(traceback.format_exception(*sys.exc_info())) output(u'DBG> got socket error in GetAll.run. Sleeping for %d seconds'%dt) time.sleep(dt) dt *= 2 else: break handler = WikimediaXmlHandler() handler.setCallback(self.oneDone) try: xml.sax.parseString(data, handler) except xml.sax._exceptions.SAXParseException: f=open('sax_parse_bug.dat','w') f.write(data) f.close() print "Dumped invalid XML to sax_parse_bug.dat" raise # All of the ones that have not been found apparently do not exist for pl in self.pages: if not hasattr(pl,'_contents') and not hasattr(pl,'_getexception'): if self.site.lang == 'eo': if pl.hashfreeLinkname() != pl.hashfreeLinkname(doublex = True): # Maybe we have used x-convention when we should not? try: pl.get(force = True) except NoPage: pass except IsRedirectPage,arg: pass except LockedPage: pass except SectionError: pass else: pl._getexception = NoPage else: pl._getexception = NoPage if hasattr(pl,'_contents') and pl.site().lang=="eo": # Edit-pages on eo: use X-convention, XML export does not. # Double X-es where necessary so that we can submit a changed # page later. for c in 'C','G','H','J','S','U': for c2 in c,c.lower(): for x in 'X','x': pl._contents = pl._contents.replace(c2+x,c2+x+x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.