rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
if "</noinclude>" in s2[firstafter:]: newtext = s2[:firstafter+1] + s + s2[firstafter+1:]
|
if "</noinclude>" in s2[firstafter:] and firstafter < 0: newtext = s2[:firstafter] + s + s2[firstafter:]
|
def replaceLanguageLinks(oldtext, new, site = None): """Replace the interwiki language links given in the wikitext given in oldtext by the new links given in new. 'new' should be a dictionary with the language names as keys, and Page objects as values. """ if site == None: site = getSite() s = interwikiFormat(new, insite = site) s2 = removeLanguageLinks(oldtext, site = site) if s: if site.language() in site.family.interwiki_attop: newtext = s + site.family.interwiki_text_separator + s2 else: # calculate what was after the language links on the page firstafter = 0 try: while s2[firstafter-1] == oldtext[firstafter-1]: firstafter -= 1 except IndexError: pass # Is there any text in the 'after' part that means we should keep it after? if "</noinclude>" in s2[firstafter:]: newtext = s2[:firstafter+1] + s + s2[firstafter+1:] elif site.language() in site.family.categories_last: cats = getCategoryLinks(s2, site = site) s2 = removeCategoryLinks(s2, site) + site.family.interwiki_text_separator + s newtext = replaceCategoryLinks(s2, cats, site=site) else: newtext = s2 + site.family.interwiki_text_separator + s else: newtext = s2 return newtext
|
return self.lang in self.site().family.category_on_one_line
|
return self.lang in self.family.category_on_one_line
|
def category_on_one_line(self): return self.lang in self.site().family.category_on_one_line
|
if regex:
|
if self.regex:
|
def read_pages_from_sql_dump(self): """ Generator which will yield PageLinks to pages that might contain text to replace. These pages will be retrieved from a local sql dump file (cur table). Arguments: * sqlfilename - the dump's path, either absolute or relative * replacements - a dictionary where old texts are keys and new texts are values * exceptions - a list of strings; pages which contain one of these won't be changed. * regex - if the entries of replacements and exceptions should be interpreted as regular expressions """ mysite = wikipedia.getSite() import sqldump dump = sqldump.SQLdump(self.sqlfilename, wikipedia.myencoding()) for entry in dump.entries(): skip_page = False if self.namespace != -1 and self.namespace != entry.namespace: continue else: for exception in self.exceptions: if regex: exception = re.compile(exception) if exception.search(entry.text): skip_page = True break else: if entry.text.find(exception) != -1: skip_page = True break if not skip_page: for old in self.replacements.iterkeys(): if self.regex: old = re.compile(old) if old.search(entry.text): yield wikipedia.PageLink(mysite, entry.full_title()) break else: if entry.text.find(old) != -1: yield wikipedia.PageLink(mysite, entry.full_title()) break
|
filename = 'interwiki-graphs/%s.png' % inpl.urlname() graph.write(filename, prog = 'dot', format = 'png') wikipedia.output(u'Graph saved as %s' % filename)
|
filename = 'interwiki-graphs/%s-%s-%s.png' % (inpl.site().family.name, inpl.site().language(), inpl.urlname()) if graph.write(filename, prog = 'dot', format = 'png'): wikipedia.output(u'Graph saved as %s' % filename) else: wikipedia.output(u'Graph could not be saved as %s' % filename)
|
def createGraph(self): import pydot # create empty graph graph = pydot.Dot() graph.add_node(pydot.Node('start', shape = 'point')) for page in self.foundin.iterkeys(): # a node for each found page node = pydot.Node('"%s:%s"' % (page.site().language(), wikipedia.unicode2html(page.title(), 'ascii')), shape = 'rectangle') if not page.exists(): node.set_style('filled') node.set_fillcolor('red') elif page.isRedirectPage(): node.set_style('filled') node.set_fillcolor('blue') elif page.isDisambig(): node.set_style('filled') node.set_fillcolor('orange') graph.add_node(node) # mark start node firstLabel = '"%s:%s"' % (inpl.site().language(), wikipedia.unicode2html(inpl.title(), 'ascii')) graph.add_edge(pydot.Edge('start', firstLabel)) for page, referrers in self.foundin.iteritems(): for refPage in referrers: sourceLabel = '"%s:%s"' % (refPage.site().language(), wikipedia.unicode2html(refPage.title(), 'ascii')) targetLabel = '"%s:%s"' % (page.site().language(), wikipedia.unicode2html(page.title(), 'ascii')) edge = pydot.Edge(sourceLabel, targetLabel) oppositeEdge = graph.get_edge(targetLabel, sourceLabel) if oppositeEdge: oppositeEdge.set_arrowtail('normal') else: # add edge if refPage.site() == page.site(): edge.set_color('blue') elif not page.exists(): # mark dead links edge.set_color('red') elif refPage.isDisambig() != page.isDisambig(): # mark links between disambiguation and non-disambiguation # pages edge.set_color('orange') graph.add_edge(edge) filename = 'interwiki-graphs/%s.png' % inpl.urlname() graph.write(filename, prog = 'dot', format = 'png') wikipedia.output(u'Graph saved as %s' % filename)
|
wikipedia.output(u"WARNING: Page %s does no longer exist?!"%page)
|
wikipedia.output(u"WARNING: Page %s does no longer exist?!" % page.title())
|
def reportBacklinks(self, new, updatedSites): """ Report missing back links. This will be called from finish() if needed.
|
for i in range(len(line)):
|
for i in range(min(len(line), len(lastline))):
|
def showDiff(oldtext, newtext): """ Prints a string showing the differences between oldtext and newtext. The differences are highlighted (only on Unix systems) to show which changes were made. """ # For information on difflib, see http://pydoc.org/2.3/difflib.html color = { '+': 10, # green '-': 12 # red } diff = '' colors = [] # This will store the last line beginning with + or -. lastline = None # For testing purposes only: show original, uncolored diff # for line in difflib.ndiff(oldtext.splitlines(), newtext.splitlines()): # print line for line in difflib.ndiff(oldtext.splitlines(), newtext.splitlines()): if line.startswith('?'): # initialize color vector with None, which means default color lastcolors = [None for c in lastline] # colorize the + or - sign lastcolors[0] = color[lastline[0]] # colorize changed parts in red or green for i in range(len(line)): if line[i] != ' ': lastcolors[i] = color[lastline[0]] diff += lastline + '\n' # append one None (default color) for the newline character colors += lastcolors + [None] elif lastline: diff += lastline + '\n' # colorize the + or - sign only lastcolors = [None for c in lastline] lastcolors[0] = color[lastline[0]] colors += lastcolors + [None] lastline = None if line[0] in ('+', '-'): lastline = line # there might be one + or - line left that wasn't followed by a ? line. if lastline: diff += lastline + '\n' # colorize the + or - sign only lastcolors = [None for c in lastline] lastcolors[0] = color[lastline[0]] colors += lastcolors + [None] output(diff, colors = colors)
|
del new[zh]
|
del new['zh']
|
def assemble(self): new = {} nerr = 0 for pl in self.done.keys(): code = pl.code() if code == wikipedia.mylang and pl.exists() and not pl.isRedirectPage() and not pl.isEmpty(): if pl != self.inpl: self.problem("Found link to %s"%pl.asasciilink()) for pl2 in self.foundin[pl]: print " ",pl2.asasciilink() nerr += 1 elif pl.exists() and not pl.isRedirectPage(): if code in new: new[code].append(pl) else: new[code] = [pl] if 'zh-cn' in new or 'zh-tw' in new: if 'zh' in new: del new[zh] print "Ignoring links to zh in presence of zh-cn or zh-tw" result = {} for k,v in new.items(): if len(v) > 1: nerr += 1 self.problem("Found more than one link for %s"%k) for k,v in new.items(): if len(v) > 1 or nerr > 0: print "Links to %s"%k i = 0 for pl2 in v: i += 1 print " (%d) Found link to %s in:"%(i,pl2.asasciilink()) for pl3 in self.foundin[pl2]: print " %s"%pl3.asasciilink() if not globalvar.autonomous: answer = raw_input("Which variant should be used [type number or (n)one, (g)ive up] :") if answer in 'gG': return None elif answer in 'nN': # None acceptable pass else: answer = int(answer) result[k] = v[answer-1] else: result[k] = v[0] if globalvar.autonomous and nerr>0: return None return result
|
print "DBG> successfulupload contains %s" % get('successfulupload')
|
print "DBG> successfulupload contains %s" % get('successfulupload') print "DBG> deletedtext contains %s" % get('deletedtext')
|
def main(): debug = False refresh_all = False for arg in sys.argv[1:]: arg = wikipedia.argHandler(arg) if arg: if arg == '-debug': debug = True elif arg == '-all': refresh_all = True if refresh_all: refresh_all_messages() else: refresh_messages(wikipedia.getSite()) if debug: print "DBG> successfulupload contains %s" % get('successfulupload')
|
print data
|
def putPage(site, name, text, comment = None, watchArticle = False, minorEdit = True, newPage = False, anon=False, token = None): """Upload 'text' on page 'name' to the 'site' wiki. Use of this routine can normally be avoided; use PageLink.put instead. """ # Check whether we are not too quickly after the previous putPage, and # wait a bit until the interval is acceptable put_throttle() # Which web-site host are we submitting to? host = site.hostname() # Get the address of the page on that host. address = site.put_address(space2underline(name)) # If no comment is given for the change, use the default if comment is None: comment=action # Prefix the comment with the user name if the user is not logged in. if not site.loggedin() and not anon: comment = username + ' - ' + comment # Use the proper encoding for the comment comment = comment.encode(site.encoding()) try: # Encode the text into the right encoding for the wiki text = forSite(text, site) predata = [ ('wpSave', '1'), ('wpPreview', '0'), ('wpSummary', comment), ('wpTextbox1', text), ('wpSection', '')] # Except if the page is new, we need to supply the time of the # previous version to the wiki to prevent edit collisions if newPage and newPage != '0': predata.append(('wpEdittime', '')) else: predata.append(('wpEdittime', edittime[repr(site), link2url(name, site = site)])) # Pass the minorEdit and watchArticle arguments to the Wiki. if minorEdit and minorEdit != '0': predata.append(('wpMinoredit', '1')) else: predata.append(('wpMinoredit', '0')) if watchArticle and watchArticle != '0': predata.append(('wpWatchthis', '1')) else: predata.append(('wpWatchthis', '0')) # Give the token, but only if one is supplied. if token: predata.append(('wpEditToken', token)) # Encode all of this into a HTTP request data = urlencode(tuple(predata)) print data except KeyError: print edittime raise if newPage and newPage!= '0': output(url2unicode("Creating page %s"%site.linkto(name), site = site)) else: output(url2unicode("Changing page %s"%site.linkto(name), site = site)) # Submit the prepared information conn = httplib.HTTPConnection(host) conn.putrequest("POST", address) conn.putheader('Content-Length', str(len(data))) conn.putheader("Content-type", "application/x-www-form-urlencoded") conn.putheader("User-agent", "RobHooftWikiRobot/1.0") if not anon and site.cookies(): conn.putheader('Cookie',site.cookies()) conn.endheaders() conn.send(data) # Prepare the return values response = conn.getresponse() data = response.read() conn.close() if data != '': if "<title>Edit conflict" in data: # FIXME: multi-lingual raise EditConflict() else: output(data, decoder = myencoding()) return response.status, response.reason, data
|
|
elif not site.gettoken:
|
elif not site.gettoken():
|
def getPage(site, name, get_edit_page = True, read_only = False, do_quote = True, get_redirect=False): """ Get the contents of page 'name' from the 'site' wiki Do not use this directly; for 99% of the possible ideas you can use the PageLink object instead. Arguments: site - the wiki site name - the page name get_edit_page - If true, gets the edit page, otherwise gets the normal page. read_only - If true, doesn't raise LockedPage exceptions. do_quote - ??? (TODO: what is this for?) get_redirect - Get the contents, even if it is a redirect page This routine returns a unicode string containing the wiki text if get_edit_page is True; otherwise it returns a unicode string containing the entire page's HTML code. """ host = site.hostname() name = re.sub(' ', '_', name) output(url2unicode(u'Getting page %s' % site.linkto(name), site = site)) # A heuristic to encode the URL into %XX for characters that are not # allowed in a URL. if not '%' in name and do_quote: # It should not have been done yet if name != urllib.quote(name): print "DBG> quoting",name name = urllib.quote(name) address = site.get_address(name) if get_edit_page: address += '&action=edit&printable=yes' # Make sure Brion doesn't get angry by waiting if the last time a page # was retrieved was not long enough ago. get_throttle() # Try to retrieve the page until it was successfully loaded (just in case # the server is down or overloaded) # wait for retry_idle_time minutes (growing!) between retries. retry_idle_time = 1 while True: text, charset = getUrl(host, address, site) # Extract the actual text from the textedit field if get_edit_page: if charset is None: print "WARNING: No character set found" else: # Store character set for later reference site.checkCharset(charset) if not read_only: # check if we're logged in p=re.compile('userlogin') if p.search(text) != None: output(u'Warning: You\'re probably not logged in on %s:' % repr(site)) m = re.search('value="(\d+)" name=\'wpEdittime\'',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: m = re.search('value="(\d+)" name="wpEdittime"',text) if m: edittime[repr(site), link2url(name, site = site)] = m.group(1) else: edittime[repr(site), link2url(name, site = site)] = "0" try: i1 = re.search('<textarea[^>]*>', text).end() except AttributeError: # We assume that the server is down. Wait some time, then try again. print "WARNING: No text area found on %s%s. Maybe the server is down. Retrying in %d minutes..." % (host, address, retry_idle_time) time.sleep(retry_idle_time * 60) # Next time wait longer, but not longer than half an hour retry_idle_time *= 2 if retry_idle_time > 30: retry_idle_time = 30 continue i2 = re.search('</textarea>', text).start() if i2-i1 < 2: raise NoPage(site, name) m = redirectRe(site).match(text[i1:i2]) if m and not get_redirect: output(u"DBG> %s is redirect to %s" % (url2unicode(name, site = site), unicode(m.group(1), site.encoding()))) raise IsRedirectPage(m.group(1)) if edittime[repr(site), link2url(name, site = site)] == "0" and not read_only: print "DBG> page may be locked?!" raise LockedPage() x = text[i1:i2] x = unescape(x) while x and x[-1] in '\n ': x = x[:-1] else: x = text # If not editing # Convert to a unicode string. If there's invalid unicode data inside # the page, replace it with question marks. x = unicode(x, charset, errors = 'replace') # Looking for the token R = re.compile(r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"") tokenloc = R.search(text) if tokenloc: site.puttoken(tokenloc.group(1)) elif not site.gettoken: site.puttoken('') return x
|
titleLength = len(titleWithSection) titleWithSection = titleWithSection.rstrip() hadTrailingSpaces = (len(titleWithSection) != titleLength)
|
if not trailingChars or label: titleLength = len(titleWithSection) titleWithSection = titleWithSection.rstrip() hadTrailingSpaces = (len(titleWithSection) != titleLength)
|
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') if not self.site.isInterwikiLink(titleWithSection): # The link looks like this: # [[page_title|link_text]]trailing_chars # We only work on namespace 0 because pipes and linktrails work # differently for images and categories. page = wikipedia.Page(self.site, titleWithSection) if page.namespace() == 0: # Replace underlines by spaces, also multiple underlines titleWithSection = re.sub('_+', ' ', titleWithSection) # Remove double spaces titleWithSection = re.sub(' +', ' ', titleWithSection) # Remove unnecessary leading spaces from title, # but remember if we did this because we eventually want # to re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.lstrip() hadLeadingSpaces = (len(titleWithSection) != titleLength) # Remove unnecessary trailing spaces from title, # but remember if we did this because it may affect # the linktrail and because we eventually want to # re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.rstrip() hadTrailingSpaces = (len(titleWithSection) != titleLength)
|
label = m.group('label')
|
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') if not self.site.isInterwikiLink(titleWithSection): # The link looks like this: # [[page_title|link_text]]trailing_chars # We only work on namespace 0 because pipes and linktrails work # differently for images and categories. page = wikipedia.Page(self.site, titleWithSection) if page.namespace() == 0: # Replace underlines by spaces, also multiple underlines titleWithSection = re.sub('_+', ' ', titleWithSection) # Remove double spaces titleWithSection = re.sub(' +', ' ', titleWithSection) # Remove unnecessary leading spaces from title, # but remember if we did this because we eventually want # to re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.lstrip() hadLeadingSpaces = (len(titleWithSection) != titleLength) # Remove unnecessary trailing spaces from title, # but remember if we did this because it may affect # the linktrail and because we eventually want to # re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.rstrip() hadTrailingSpaces = (len(titleWithSection) != titleLength)
|
|
labelLength = len(label) label = label.rstrip() hadTrailingSpaces = (len(label) != label)
|
if not trailingChars: labelLength = len(label) label = label.rstrip() hadTrailingSpaces = (len(label) != labelLength)
|
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') if not self.site.isInterwikiLink(titleWithSection): # The link looks like this: # [[page_title|link_text]]trailing_chars # We only work on namespace 0 because pipes and linktrails work # differently for images and categories. page = wikipedia.Page(self.site, titleWithSection) if page.namespace() == 0: # Replace underlines by spaces, also multiple underlines titleWithSection = re.sub('_+', ' ', titleWithSection) # Remove double spaces titleWithSection = re.sub(' +', ' ', titleWithSection) # Remove unnecessary leading spaces from title, # but remember if we did this because we eventually want # to re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.lstrip() hadLeadingSpaces = (len(titleWithSection) != titleLength) # Remove unnecessary trailing spaces from title, # but remember if we did this because it may affect # the linktrail and because we eventually want to # re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.rstrip() hadTrailingSpaces = (len(titleWithSection) != titleLength)
|
label = titleWithSection trailingChars = m.group('linktrail') if trailingChars and not hadTrailingSpaces:
|
label = titleWithSection if trailingChars:
|
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') if not self.site.isInterwikiLink(titleWithSection): # The link looks like this: # [[page_title|link_text]]trailing_chars # We only work on namespace 0 because pipes and linktrails work # differently for images and categories. page = wikipedia.Page(self.site, titleWithSection) if page.namespace() == 0: # Replace underlines by spaces, also multiple underlines titleWithSection = re.sub('_+', ' ', titleWithSection) # Remove double spaces titleWithSection = re.sub(' +', ' ', titleWithSection) # Remove unnecessary leading spaces from title, # but remember if we did this because we eventually want # to re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.lstrip() hadLeadingSpaces = (len(titleWithSection) != titleLength) # Remove unnecessary trailing spaces from title, # but remember if we did this because it may affect # the linktrail and because we eventually want to # re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.rstrip() hadTrailingSpaces = (len(titleWithSection) != titleLength)
|
if trailingChars: newLink += trailingChars
|
def cleanUpLinks(self, text): trailR = re.compile(self.site.linktrail()) # The regular expression which finds links. Results consist of four groups: # group title is the target page title, that is, everything before | or ]. # group section is the page section. It'll include the # to make life easier for us. # group label is the alternative link title, that's everything between | and ]. # group linktrail is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. self.linkR = re.compile(r'\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')') curpos = 0 # This loop will run until we have finished the current page while True: m = self.linkR.search(text, pos = curpos) if not m: break # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 titleWithSection = m.group('titleWithSection') if not self.site.isInterwikiLink(titleWithSection): # The link looks like this: # [[page_title|link_text]]trailing_chars # We only work on namespace 0 because pipes and linktrails work # differently for images and categories. page = wikipedia.Page(self.site, titleWithSection) if page.namespace() == 0: # Replace underlines by spaces, also multiple underlines titleWithSection = re.sub('_+', ' ', titleWithSection) # Remove double spaces titleWithSection = re.sub(' +', ' ', titleWithSection) # Remove unnecessary leading spaces from title, # but remember if we did this because we eventually want # to re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.lstrip() hadLeadingSpaces = (len(titleWithSection) != titleLength) # Remove unnecessary trailing spaces from title, # but remember if we did this because it may affect # the linktrail and because we eventually want to # re-add it outside of the link later. titleLength = len(titleWithSection) titleWithSection = titleWithSection.rstrip() hadTrailingSpaces = (len(titleWithSection) != titleLength)
|
|
second is a predicate function with an integer parameter that returns true or false if the 1st function applies or not.
|
second is a predicate function with an integer parameter that returns true or false. When the 2nd function evaluates to true, the 1st function is used.
|
def multi( value, tuplst ): """This method is used when different patterns are used for the same entry. Example: 1st century, 2nd century, etc. The tuplst is a list of tupples. Each tupple must contain two functions: first to encode/decode a single value (e.g. simpleInt), second is a predicate function with an integer parameter that returns true or false if the 1st function applies or not. """ if type(value) is int: # Find a predicate that gives true for this int value, and run a function for func, pred in tuplst: if pred(value): return func(value) else: # Try all functions, and test result against predicates for func, pred in tuplst: try: res = func(value) if pred(res): return res except: pass raise ValueError("could not find a matching function")
|
return dh_constVal( value, 0, match ) def dh_constVal( value, ind, match ): """This function helps with matching a single value to a constant. formats['CurrEvents']['en'](ind) => u'Current Events' formats['CurrEvents']['en'](u'Current Events') => ind"""
|
def dh_singVal( value, match ): """This function helps with matching a single value. formats['CurrEvents']['en'](0) => u'Current Events' formats['CurrEvents']['en'](u'Current Events') => 0""" if type(value) is int: if value == 0: return match else: raise ValueError("unknown value %d" % value) else: if value == match: return 0 else: raise ValueError()
|
|
if value == 0:
|
if value == ind:
|
def dh_singVal( value, match ): """This function helps with matching a single value. formats['CurrEvents']['en'](0) => u'Current Events' formats['CurrEvents']['en'](u'Current Events') => 0""" if type(value) is int: if value == 0: return match else: raise ValueError("unknown value %d" % value) else: if value == match: return 0 else: raise ValueError()
|
return 0
|
return ind
|
def dh_singVal( value, match ): """This function helps with matching a single value. formats['CurrEvents']['en'](0) => u'Current Events' formats['CurrEvents']['en'](u'Current Events') => 0""" if type(value) is int: if value == 0: return match else: raise ValueError("unknown value %d" % value) else: if value == match: return 0 else: raise ValueError()
|
return unicode(value).translate(_knDigitsToLocal)
|
return unicode(value).translate(digitsToLocal)
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
tmp = value.translate(_knDigitsToLocal)
|
tmp = value.translate(digitsToLocal)
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
tmp = value.translate(_knLocalToDigits)
|
tmp = value.translate(localToDigits)
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'ca' : lambda v: dh_dec( v, u'Dècada del %d' ),
|
'ca' : lambda v: multi( v, [ (lambda x: dh_dec( x, u'Dècada de %d' ), lambda p: p == 1970), (lambda x: dh_dec( x, u'Dècada del %d' ), alwaysTrue)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'fi' : lambda v: dh_dec( v, u'%d-luku' ),
|
'fi' : lambda v: multi( v, [ (lambda x: dh_dec( x, u'%d-luku' ), lambda p: (p % 100 != 0)), (lambda x: dh_dec( x, u'%d-vuosikymmen' ), alwaysTrue)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'hr' : lambda v: dh( v, u'%d-%d', lambda i: (encDec0(i),encDec0(i)+9), lambda v: v[0] ), 'hu' : lambda v: dh_dec( v, u'%d-as évek' ),
|
'hr' : lambda v: dh( v, u'%d-%d', lambda i: (encDec0(i),encDec0(i)+9), lambda ii: ii[0] ), 'hu' : lambda v: multi( v, [ (lambda x: dh_dec( x, u'%d-ás évek' ), lambda p: p == 0), (lambda x: dh_dec( x, u'%d-as évek' ), lambda p: (p % 100 / 10) in [0,2,3,6,8]), (lambda x: dh_dec( x, u'%d-es évek' ), alwaysTrue)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'is' : lambda v: dh( v, u'%d–%d', lambda i: (encDec1(i),encDec1(i)+9), lambda v: v[0]-1 ),
|
'is' : lambda v: dh( v, u'%d–%d', lambda i: (encDec1(i),encDec1(i)+9), lambda ii: ii[0]-1 ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_dec( v, u'%d-talet (decennium)' ), lambda x: (x % 100 == 0)), (lambda x: dh_dec( v, u'%d-talet' ), alwaysTrue)]),
|
(lambda x: dh_dec( x, u'%d-talet (decennium)' ), lambda p: (p % 100 == 0)), (lambda x: dh_dec( x, u'%d-talet' ), alwaysTrue)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_dec( x, u'%d-ві' ), lambda x: x == 0 or (x % 100 == 40)), (lambda x: dh_dec( x, u'%d-ні' ), lambda x: x % 1000 == 0),
|
(lambda x: dh_dec( x, u'%d-ві' ), lambda p: p == 0 or (p % 100 == 40)), (lambda x: dh_dec( x, u'%d-ні' ), lambda p: p % 1000 == 0),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_dec( v, u'%d-talet f.Kr. (decennium)' ), lambda x: (x % 100 == 0)), (lambda x: dh_dec( v, u'%d-talet f.Kr.' ), alwaysTrue)]),
|
(lambda x: dh_dec( x, u'%d-talet f.Kr. (decennium)' ), lambda p: (p % 100 == 0)), (lambda x: dh_dec( x, u'%d-talet f.Kr.' ), alwaysTrue)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_dec( x, u'%d-ві до Р.Х.' ), lambda x: x == 0 or (x % 100 == 40)),
|
(lambda x: dh_dec( x, u'%d-ві до Р.Х.' ), lambda p: p == 0 or (p % 100 == 40)),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_noConv( v, u'%dste eeu' ), lambda x: x in [1,8] or (x >= 20)), (lambda x: dh_noConv( x, u'%dde eeu' ), alwaysTrue)]),
|
(lambda x: dh_noConv( x, u'%dste eeu' ), lambda p: p in [1,8] or (p >= 20)), (lambda x: dh_noConv( x, u'%dde eeu' ), alwaysTrue)]), 'als': lambda v: dh_noConv( v, u'%d. Jahrhundert' ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_noConv( x, u'%dst century' ), lambda x: x == 1 or (x > 20 and x%10 == 1)), (lambda x: dh_noConv( x, u'%dnd century' ), lambda x: x == 2 or (x > 20 and x%10 == 2)), (lambda x: dh_noConv( x, u'%drd century' ), lambda x: x == 3 or (x > 20 and x%10 == 3)),
|
(lambda x: dh_noConv( x, u'%dst century' ), lambda p: p == 1 or (p > 20 and p%10 == 1)), (lambda x: dh_noConv( x, u'%dnd century' ), lambda p: p == 2 or (p > 20 and p%10 == 2)), (lambda x: dh_noConv( x, u'%drd century' ), lambda p: p == 3 or (p > 20 and p%10 == 3)),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'fi' : lambda v: dh( v, u'%d00-luku', lambda i: i-1, lambda v: v[0]+1 ),
|
'en' : lambda v: multi( v, [ (lambda x: dh_constVal( x, 20, u'سده ۲۰ (میلادی)'), lambda p: p == 20)]), 'fi' : lambda v: dh( v, u'%d00-luku', lambda i: i-1, lambda ii: ii[0]+1 ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_noConv( v, u'%dsa kansblydhen' ), lambda x: x <= 3), (lambda x: dh_noConv( v, u'%da kansblydhen' ), lambda x: x == 4), (lambda x: dh_noConv( v, u'%des kansblydhen' ), lambda x: x == 5), (lambda x: dh_noConv( v, u'%dns kansblydhen' ), lambda x: x >= 20), (lambda x: dh_noConv( v, u'%dves kansblydhen' ), alwaysTrue)]),
|
(lambda x: dh_noConv( x, u'%dsa kansblydhen' ), lambda p: p <= 3), (lambda x: dh_noConv( x, u'%da kansblydhen' ), lambda p: p == 4), (lambda x: dh_noConv( x, u'%des kansblydhen' ), lambda p: p == 5), (lambda x: dh_noConv( x, u'%dns kansblydhen' ), lambda p: p >= 20), (lambda x: dh_noConv( x, u'%dves kansblydhen' ), alwaysTrue)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'li' : lambda v: slh( v, [u'Ierste iew', u'Twiede iew', u'Derde iew', u'Veerde iew', u'Viefde iew', u'Zesde iew', u'Zevende iew', u'Achste iew', u'Negende iew', u'Tiende iew', u'Elfde iew', u'Twelfde iew', u'Dertiende iew', u'Veertiende iew', u'Vieftiende iew', u'Zestiende iew', u'Zeventiende iew', u'Achtiende iew', u'Negentiende iew', u'Twintegste iew', u'Einentwintegste iew', u'Twieëntwintegste iew',] ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
|
'nn' : lambda v: dh( v, u'%d00-talet', lambda i: i-1, lambda v: v[0]+1 ),
|
'nn' : lambda v: dh( v, u'%d00-talet', lambda i: i-1, lambda ii: ii[0]+1 ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'sv' : lambda v: dh( v, u'%d00-talet', lambda i: i-1, lambda v: v[0]+1 ),
|
'sv' : lambda v: dh( v, u'%d00-talet', lambda i: i-1, lambda ii: ii[0]+1 ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'sv' : lambda v: dh( v, u'%d00-talet f.Kr.', lambda i: i-1, lambda v: v[0]+1 ),
|
'sv' : lambda v: dh( v, u'%d00-talet f.Kr.', lambda i: i-1, lambda ii: ii[0]+1 ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_noConv( x, u'%dst millennium' ), lambda x: x == 1 or (x > 20 and x%10 == 1)), (lambda x: dh_noConv( x, u'%dnd millennium' ), lambda x: x == 2 or (x > 20 and x%10 == 2)), (lambda x: dh_noConv( x, u'%drd millennium' ), lambda x: x == 3 or (x > 20 and x%10 == 3)),
|
(lambda x: dh_noConv( x, u'%dst millennium' ), lambda p: p == 1 or (p > 20 and p%10 == 1)), (lambda x: dh_noConv( x, u'%dnd millennium' ), lambda p: p == 2 or (p > 20 and p%10 == 2)), (lambda x: dh_noConv( x, u'%drd millennium' ), lambda p: p == 3 or (p > 20 and p%10 == 3)),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
(lambda x: dh_roman( x, u'%ser millénaire' ), lambda x: x == 1), (lambda x: dh_roman( v, u'%se millénaire' ), alwaysTrue)]),
|
(lambda x: dh_roman( x, u'%ser millénaire' ), lambda p: p == 1), (lambda x: dh_roman( x, u'%se millénaire' ), alwaysTrue)]),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'ro' : lambda v: dh_roman( v, u'Mileniul %s' ),
|
'pt' : lambda v: slh( v, [u'Primeiro milénio d.C.', u'Segundo milénio d.C.', u'Terceiro milénio d.C.', u'Quarto milénio d.C.'] ), 'ro' : lambda v: slh( v, [u'Mileniul I', u'Mileniul al II-lea'] ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'sv' : lambda v: dh( v, u'%d000-talet (millennium)', lambda i: i-1, lambda v: v[0]+1 ),
|
'sv' : lambda v: dh( v, u'%d000-talet (millennium)', lambda i: i-1, lambda ii: ii[0]+1 ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
'pt' : lambda v: slh( v, [u'Primeiro milénio a.C.', u'Segundo milénio a.C.', u'Terceiro milénio a.C.', u'Quarto milénio a.C.'] ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
|
'sv' : lambda v: dh( v, u'%d000-talet f.Kr. (millennium)', lambda i: i-1, lambda v: v[0]+1 ),
|
'sv' : lambda v: dh( v, u'%d000-talet f.Kr. (millennium)', lambda i: i-1, lambda ii: ii[0]+1 ),
|
def dh_knYearConverter( value ): if type(value) is int: # Encode an integer value into a textual form. return unicode(value).translate(_knDigitsToLocal) else: # First make sure there are no real digits in the string tmp = value.translate(_knDigitsToLocal) # Test if tmp == value: tmp = value.translate(_knLocalToDigits) # Convert return dh_noConv( tmp, u'%d' ) else: raise ValueError("string contains regular digits")
|
u'(lambda x: dh_noConv( x, u"%%dî d\' %s" ), lambda x: x == 1),' + u'(lambda x: dh_noConv( x, u"%%d d\' %s" ), lambda x: x in [2,3,20,22,23]),' + u'(lambda x: dh_noConv( x, u"%%d di %s" ), alwaysTrue)])') % (waMonthNames[i],waMonthNames[i],waMonthNames[i]))
|
u'(lambda x: dh_noConv( x, u"%%dî d\' %s" ), lambda p: p == 1),' + u'(lambda x: dh_noConv( x, u"%%d d\' %s" ), lambda p: p in [2,3,20,22,23]),' + u'(lambda x: dh_noConv( x, u"%%d di %s" ), alwaysTrue)])') % (waMonthNames[i],waMonthNames[i],waMonthNames[i]))
|
def makeMonthNamedList( lang, pattern, makeUpperCase = None ): """Creates a list of 12 elements based on the name of the month. The language-dependent month name is used as a formating argument to the pattern. The pattern must be have one %s that will be replaced by the localized month name. Use %%d for any other parameters that should be preserved. """ if makeUpperCase == None: f = lambda s: s elif makeUpperCase == True: f = lambda s: s[0].upper() + s[1:] elif makeUpperCase == False: f = lambda s: s[0].lower() + s[1:] return [ pattern % f(monthName(lang, m)) for m in range(1,13) ]
|
u'(lambda x: dh_noConv( x, u"%%dî d\' %s" ), lambda x: x == 1),' + u'(lambda x: dh_noConv( x, u"%%d d\' %s" ), alwaysTrue)])') % (waMonthNames[i],waMonthNames[i]))
|
u'(lambda x: dh_noConv( x, u"%%dî d\' %s" ), lambda p: p == 1),' + u'(lambda x: dh_noConv( x, u"%%d d\' %s" ), alwaysTrue)])') % (waMonthNames[i],waMonthNames[i]))
|
def makeMonthNamedList( lang, pattern, makeUpperCase = None ): """Creates a list of 12 elements based on the name of the month. The language-dependent month name is used as a formating argument to the pattern. The pattern must be have one %s that will be replaced by the localized month name. Use %%d for any other parameters that should be preserved. """ if makeUpperCase == None: f = lambda s: s elif makeUpperCase == True: f = lambda s: s[0].upper() + s[1:] elif makeUpperCase == False: f = lambda s: s[0].lower() + s[1:] return [ pattern % f(monthName(lang, m)) for m in range(1,13) ]
|
wikipedia.output(u'Page [[%s]] does not seem to exist?! Skipping.') % refpl.linkname()
|
wikipedia.output(u'Page [[%s]] does not seem to exist?! Skipping.' % refpl.linkname())
|
def treat(self, refpl, disambPl): """ Parameters: disambPl - The disambiguation page or redirect we don't want anything to link on refpl - A page linking to disambPl Returns False if the user pressed q to completely quit the program. Otherwise, returns True. """ if self.mylang in link_trail: linktrail=link_trail[self.mylang] else: linktrail='[a-z]*' trailR=re.compile(linktrail) # The regular expression which finds links. Results consist of three groups: # group(1) is the target page title, that is, everything before | or ]. # group(2) is the alternative link title, that's everything between | and ]. # group(3) is the link trail, that's letters after ]] which are part of the word. # note that the definition of 'letter' varies from language to language. linkR=re.compile(r'\[\[([^\]\|]*)(?:\|([^\]]*))?\]\](' + linktrail + ')')
|
if response.status == 302:
|
if response.status in [200, 302]:
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ # Get file contents if '://' in self.url: uo = wikipedia.MyURLopener() file = uo.open(self.url) else: # Opening local files with MyURLopener would be possible, but we # don't do it because it only accepts ASCII characters in the # filename. file = open(self.url) wikipedia.output('Reading file %s' % self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki encodedFilename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
wikipedia.output('%i %s' % (response.status, response.reason))
|
wikipedia.output(u'%i %s' % (response.status, response.reason))
|
def upload_image(self, debug=False): """Gets the image at URL self.url, and uploads it to the target wiki. Returns the filename which was used to upload the image. If the upload fails, the user is asked whether to try again or not. If the user chooses not to retry, returns null. """ # Get file contents if '://' in self.url: uo = wikipedia.MyURLopener() file = uo.open(self.url) else: # Opening local files with MyURLopener would be possible, but we # don't do it because it only accepts ASCII characters in the # filename. file = open(self.url) wikipedia.output('Reading file %s' % self.url) contents = file.read() if contents.find("The requested URL was not found on this server.") != -1: print "Couldn't download the image." return file.close() # Isolate the pure name filename = self.url if '/' in filename: filename = filename.split('/')[-1] if '\\' in filename: filename = filename.split('\\')[-1] if self.urlEncoding: filename = urllib.unquote(filename) filename = filename.decode(self.urlEncoding) if not self.keepFilename: wikipedia.output(u"The filename on the target wiki will default to: %s" % filename) # ask newfn until it's valid ok = False # FIXME: these 2 belong somewhere else, presumably in family forbidden = '/' # to be extended allowed_formats = (u'jpg', u'jpeg', u'png', u'gif', u'svg', u'ogg') while not ok: ok = True newfn = wikipedia.input(u'Enter a better name, or press enter to accept:') if newfn == "": newfn = filename ext = os.path.splitext(newfn)[1].lower().strip('.') for c in forbidden: if c in newfn: print "Invalid character: %s. Please try again" % c ok = False if ext not in allowed_formats and ok: ans = wikipedia.input(u"File format is not %s but %s. Continue [y/N]? " % (allowed_formats, ext)) if not ans.lower().startswith('y'): ok = False if newfn != '': filename = newfn # MediaWiki doesn't allow spaces in the file name. # Replace them here to avoid an extra confirmation form filename = filename.replace(' ', '_') # Convert the filename (currently Unicode) to the encoding used on the # target wiki encodedFilename = filename.encode(self.targetSite.encoding()) # A proper description for the submission. wikipedia.output(u"The suggested description is:") wikipedia.output(self.description) choice = wikipedia.inputChoice(u'Do you want to change this description?', ['Yes', 'No'], ['Y', 'n'], 'y') if choice not in ['n', 'N']: newDescription = wikipedia.ui.editText(self.description) # if user didn't press Cancel: if newDescription: self.description = newDescription formdata = {} formdata["wpUploadDescription"] = self.description
|
return "*" + pl.aslink()
|
return "*" + pl.aslink(othersite = None)
|
def formatPl(self, pl): if hasattr(pl, '_contents'): #TODO: UGLY! need pl.isLoaded() if pl.isDisambig(): return "*" + pl.aslink() else: return pl.aslink() else: return "?" + pl.aslink()
|
return pl.aslink()
|
return pl.aslink(othersite = None)
|
def formatPl(self, pl): if hasattr(pl, '_contents'): #TODO: UGLY! need pl.isLoaded() if pl.isDisambig(): return "*" + pl.aslink() else: return pl.aslink() else: return "?" + pl.aslink()
|
return "?" + pl.aslink()
|
return "?" + pl.aslink(othersite = None)
|
def formatPl(self, pl): if hasattr(pl, '_contents'): #TODO: UGLY! need pl.isLoaded() if pl.isDisambig(): return "*" + pl.aslink() else: return pl.aslink() else: return "?" + pl.aslink()
|
R = re.compile(r'\[\[\s*(?P<namespace>%s)\s*:(?P<catName>.+?)(?:\|(?P<sortKey>.+?)\]\])' % catNamespace)
|
R = re.compile(r'\[\[\s*(?P<namespace>%s)\s*:(?P<catName>.+?)(?:\|(?P<sortKey>.+?))?\]\]' % catNamespace)
|
def getCategoryLinks(text, site): import catlib """Returns a list of category links. in the form {code:pagename}. Do not call this routine directly, use Page objects instead""" result = [] # Ignore interwiki links within nowiki tags and HTML comments nowikiOrHtmlCommentR = re.compile(r'<nowiki>.*?</nowiki>|<!--.*?-->', re.IGNORECASE | re.DOTALL) match = nowikiOrHtmlCommentR.search(text) while match: text = text[:match.start()] + text[match.end():] match = nowikiOrHtmlCommentR.search(text) catNamespace = '|'.join(site.category_namespaces()) R = re.compile(r'\[\[\s*(?P<namespace>%s)\s*:(?P<catName>.+?)(?:\|(?P<sortKey>.+?)\]\])' % catNamespace) for match in R.finditer(text): cat = catlib.Category(site, '%s:%s' % (match.group('namespace'), match.group('catName')), sortKey = match.group('sortKey')) result.append(cat) return result
|
if file_age > 7 * 24 * 60 * 60:
|
if file_age > 30 * 24 * 60 * 60:
|
def get(key): try: # find out how old our saved dump is (in seconds) file_age = time.time() - os.path.getmtime('mediawiki-messages/mediawiki-messages-%s.dat' % wikipedia.mylang) # if it's older than 7 days, reload it if file_age > 7 * 24 * 60 * 60: print 'Current MediaWiki message dump is outdated, reloading' refresh_messages() except OSError: # no saved dumped exists yet refresh_messages() # TODO: It's quite inefficient to reload the file every time this function # is used. Maybe we can save its content the first time the function is # called. f = open('mediawiki-messages/mediawiki-messages-%s.dat' % wikipedia.mylang, 'r') dictionary = pickle.load(f) f.close() key = key[0].lower() + key[1:] if dictionary.has_key(key): return dictionary[key] else: # TODO: Throw exception instead? print 'ERROR: MediaWiki Key %s not found' % key
|
+ "<a href=.+? title=.+?>.+?<\/a><\/p>\n"
|
+ ".*?<a href=.+? title=.+?>.+?<\/a><\/p>\n"
|
def refresh_messages(): host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.allmessages_address(wikipedia.mylang) print 'Retrieving MediaWiki messages' allmessages, charset = wikipedia.getUrl(host,url) #f=open('/home/daniel/allmessages.html', 'r') #allmessages = f.read() print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. itemR = re.compile("<tr bgcolor=\"#F0F0FF\">\n" + "<td>\n" + "<p><a href=\"\/wiki/MediaWiki:.+?\" title=\"MediaWiki:.+?\">(.+?)<\/a><br \/>\n" + "<a href=.+? title=.+?>.+?<\/a><\/p>\n" + "</td>\n" + "<td>\n" + "<p>.+?</p>\n" + "</td>\n" + "<td>\n" + "<p>(.+?)</p>\n" + "<\/td>\n" + "<\/tr>", re.DOTALL) items = itemR.findall(allmessages) # we will save the found key:value pairs here dictionary = {} for item in items: # Key strings only contain ASCII characters, so we can use them as dictionary keys dictionary[item[0]] = unicode(item[1], wikipedia.myencoding()) # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. f = open(makepath('mediawiki-messages/mediawiki-messages-%s.dat' % wikipedia.mylang), 'w') pickle.dump(dictionary, f) f.close()
|
get('about')
|
debug = True
|
def refresh_messages(): host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.allmessages_address(wikipedia.mylang) print 'Retrieving MediaWiki messages' allmessages, charset = wikipedia.getUrl(host,url) #f=open('/home/daniel/allmessages.html', 'r') #allmessages = f.read() print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. itemR = re.compile("<tr bgcolor=\"#F0F0FF\">\n" + "<td>\n" + "<p><a href=\"\/wiki/MediaWiki:.+?\" title=\"MediaWiki:.+?\">(.+?)<\/a><br \/>\n" + "<a href=.+? title=.+?>.+?<\/a><\/p>\n" + "</td>\n" + "<td>\n" + "<p>.+?</p>\n" + "</td>\n" + "<td>\n" + "<p>(.+?)</p>\n" + "<\/td>\n" + "<\/tr>", re.DOTALL) items = itemR.findall(allmessages) # we will save the found key:value pairs here dictionary = {} for item in items: # Key strings only contain ASCII characters, so we can use them as dictionary keys dictionary[item[0]] = unicode(item[1], wikipedia.myencoding()) # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. f = open(makepath('mediawiki-messages/mediawiki-messages-%s.dat' % wikipedia.mylang), 'w') pickle.dump(dictionary, f) f.close()
|
if debug: print get('successfulupload')
|
def refresh_messages(): host = wikipedia.family.hostname(wikipedia.mylang) # broken redirect maintenance page's URL url = wikipedia.family.allmessages_address(wikipedia.mylang) print 'Retrieving MediaWiki messages' allmessages, charset = wikipedia.getUrl(host,url) #f=open('/home/daniel/allmessages.html', 'r') #allmessages = f.read() print 'Parsing MediaWiki messages' # First group is MediaWiki key string. Second group is the current value string. itemR = re.compile("<tr bgcolor=\"#F0F0FF\">\n" + "<td>\n" + "<p><a href=\"\/wiki/MediaWiki:.+?\" title=\"MediaWiki:.+?\">(.+?)<\/a><br \/>\n" + "<a href=.+? title=.+?>.+?<\/a><\/p>\n" + "</td>\n" + "<td>\n" + "<p>.+?</p>\n" + "</td>\n" + "<td>\n" + "<p>(.+?)</p>\n" + "<\/td>\n" + "<\/tr>", re.DOTALL) items = itemR.findall(allmessages) # we will save the found key:value pairs here dictionary = {} for item in items: # Key strings only contain ASCII characters, so we can use them as dictionary keys dictionary[item[0]] = unicode(item[1], wikipedia.myencoding()) # Save the dictionary to disk # The file is stored in the mediawiki_messages subdir. Create if necessary. f = open(makepath('mediawiki-messages/mediawiki-messages-%s.dat' % wikipedia.mylang), 'w') pickle.dump(dictionary, f) f.close()
|
|
splitTitle = title.split(':')
|
title = title.split(':')
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. title = title.strip() if title[0]==':': title = title[1:] splitTitle = title.split(':') # translate a default namespace name into the local namespace name if len(splitTitle) > 1: for ns in site.family.namespaces.keys(): if splitTitle[0] == site.family.namespace('_default', ns): splitTitle[0] = site.namespace(ns) self._urlname = link2url(title, site = self._site, insite = insite) self._linkname = url2link(self._urlname, site = self._site, insite = self._tosite)
|
if len(splitTitle) > 1:
|
if len(title) > 1:
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. title = title.strip() if title[0]==':': title = title[1:] splitTitle = title.split(':') # translate a default namespace name into the local namespace name if len(splitTitle) > 1: for ns in site.family.namespaces.keys(): if splitTitle[0] == site.family.namespace('_default', ns): splitTitle[0] = site.namespace(ns) self._urlname = link2url(title, site = self._site, insite = insite) self._linkname = url2link(self._urlname, site = self._site, insite = self._tosite)
|
if splitTitle[0] == site.family.namespace('_default', ns): splitTitle[0] = site.namespace(ns)
|
if title[0] == site.family.namespace('_default', ns): title[0] = site.namespace(ns)
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. title = title.strip() if title[0]==':': title = title[1:] splitTitle = title.split(':') # translate a default namespace name into the local namespace name if len(splitTitle) > 1: for ns in site.family.namespaces.keys(): if splitTitle[0] == site.family.namespace('_default', ns): splitTitle[0] = site.namespace(ns) self._urlname = link2url(title, site = self._site, insite = insite) self._linkname = url2link(self._urlname, site = self._site, insite = self._tosite)
|
print title print pl print self.pages
|
print repr(title) print repr(pl) print repr(self.pages)
|
def oneDone(self, title, timestamp, text): #print "DBG>", repr(title), timestamp, len(text) pl = PageLink(self.code, title) for pl2 in self.pages: #print "DBG>", pl, pl2, pl2.hashfreeLinkname() if PageLink(self.code, pl2.hashfreeLinkname()) == pl: if not hasattr(pl2,'_contents') and not hasattr(pl2,'_getexception'): break else: print title print pl print self.pages raise "bug, page not found in list" if self.debug: xtext = pl2.get() if text != xtext: print "################Text differs" import difflib for line in difflib.ndiff(xtext.split('\r\n'), text.split('\r\n')): if line[0] in ['+', '-']: print repr(line)[2:-1] if edittime[self.code, link2url(title, self.code)] != timestamp: print "################Timestamp differs" print "-",edittime[self.code, link2url(title, self.code)] print "+",timestamp else: m=Rredirect.match(text) if m: #print "DBG> ",pl2.asasciilink(),"is a redirect page" pl2._getexception = IsRedirectPage(m.group(1)) else: if len(text)<50: print "DBG> short text in",pl2.asasciilink() print repr(text) hn = pl2.hashname() if hn: m = re.search("== *%s *==" % hn, text) if not m: pl2._getexception = SubpageError("Hashname does not exist: %s" % self) else: # Store the content pl2._contents = text # Store the time stamp edittime[self.code, link2url(title, self.code)] = timestamp else: # Store the content pl2._contents = text # Store the time stamp edittime[self.code, link2url(title, self.code)] = timestamp
|
addr = self.addr%special[self.code]
|
try: addr = self.addr%special[self.code] except KeyError: print "BUG: Can not find name of Special in %s:" % self.code raise
|
def getData(self): import httplib addr = self.addr%special[self.code] pagenames = u'\r\n'.join([x.hashfreeLinkname() for x in self.pages]) data = urlencode(( ('action', 'submit'), ('pages', pagenames), ('curonly', 'True'), )) headers = {"Content-type": "application/x-www-form-urlencoded", "User-agent": "RobHooftWikiRobot/1.0"} # Slow ourselves down get_throttle(requestsize = len(self.pages)) # Now make the actual request to the server conn = httplib.HTTPConnection(langs[self.code]) conn.request("POST", addr, data, headers) response = conn.getresponse() data = response.read() conn.close() return data
|
def __init__(self,lang,term,relatedwords=[],label=''):
|
def __init__(self,lang,term,relatedwords=[]):
|
def __init__(self,lang,term,relatedwords=[],label=''): """ Constructor Generally called with two parameters: - The language of the term - The term (string)
|
self.label=label
|
def __init__(self,lang,term,relatedwords=[],label=''): """ Constructor Generally called with two parameters: - The language of the term - The term (string)
|
|
def setLabel(self,label): self.label=label.replace('<!--','').replace('-->','') def getLabel(self): if self.label: return '<!--' + self.label + '-->'
|
def setLabel(self,label): self.label=label.replace('<!--','').replace('-->','')
|
|
and read it into our object structure'''
|
and read it into our object structure. It returns a list of dictionaries. Each dictionary contains a header object and the textual content found under that header. Only relevant content is stored. Empty lines and lines to create tables for presentation to the user are taken out.'''
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
if len(line) <2: continue
|
if len(line) <2: templist.append(line) continue
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
print "tempdictstructure: ", tempdictstructure print "splitcontent: ",splitcontent
|
print "splitcontent: ",splitcontent,"\n\n"
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
print line
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
|
print "templist: ", templist raw_input("") """ if header.type==u'pos': if line[:2] == "'''":
|
for contentblock in splitcontent: print "contentblock:",contentblock print contentblock['header'] if contentblock['header'].type==u'pos': flag=False for line in contentblock['text']: print line print line[:3] if line[:3] == "'''":
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
if part[:2] == "'''":
|
print part[:3], "Flag:", flag if flag==False and part[:3] == "'''":
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
if part[:1].lower()=='pl':
|
if part.replace("'",'')[:2].lower()=='pl':
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
if part[:2].lower()=='dim':
|
if part.replace("'",'')[:3].lower()=='dim':
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
plural=part.replace(',','').strip()
|
plural=part.replace(',','').replace("'",'').strip() print 'Plural: ',plural
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
diminutive=part.replace(',','').strip()
|
diminutive=part.replace(',','').replace("'",'').strip() print 'Diminutive: ',diminutive
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
if line[:1] == "{{":
|
if line[:2] == "{{":
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
if line[:0] == "
|
if line[:1] == "
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
label=line[pos+4,pos2]
|
label=line[pos+4:pos2]
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
print label
|
print 'label:',label
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
if line[:1] == "
|
if line[:2] == "
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
"""
|
raw_input("")
|
def parseWikiPage(ofn,wikilang,pagetopic): '''This function will parse the content of a Wiktionary page and read it into our object structure''' apage = WiktionaryPage(wikilang,pagetopic) templist = [] context = {} content = open(ofn).readlines() splitcontent=[] for line in content: # Let's get rid of line breaks and extraneous white space line=line.replace('\n','').strip() # Let's start by looking for general stuff, that provides information which is # interesting to store at the page level if line.lower().find('{wikipedia}')!=-1: apage.addLink('wikipedia') continue if line.find('[[Category:')!=-1: category=line.split(':')[1].replace(']','') apage.addCategory(category) continue if line.find('|')==-1: bracketspos=line.find('[[') colonpos=line.find(':') if bracketspos!=-1 and colonpos!=-1 and bracketspos < colonpos: # This seems to be an interwikilink # If there is a pipe in it, it's not a simple interwikilink linkparts=line.replace(']','').replace('[','').split(':') lang=linkparts[0] linkto=linkparts[1] if len(lang)>1 and len(lang)<4: apage.addLink(lang+':'+linkto) continue # nothing to do on empty lines if len(line) <2: continue
|
return html2unicode(s, language = incode)
|
return unicodeName(s, language = incode)
|
def interwikiFormat(links, incode): """Create a suitable string to start a wikipedia page consisting of interwikilinks given as a dictionary of code:pagename in the argument. """ s = [] ar = links.keys() ar.sort() if mylang in config.interwiki_englishfirst: if 'en' in ar: del ar[ar.index('en')] ar[:0]=['en'] for code in ar: try: s.append(links[code].aslink()) except AttributeError: s.append('[[%s:%s]]' % (code, links[code])) s=config.interwiki_langs_separator.join(s) + '\r\n' return html2unicode(s, language = incode)
|
True if the page except for language links and category links has less than 4 characters, False otherwise. Can raise the same exceptions as get()
|
True if the page has less than 4 characters, except for language links and category links, False otherwise. Can raise the same exceptions as get()
|
def isEmpty(self): """ True if the page except for language links and category links has less than 4 characters, False otherwise. Can raise the same exceptions as get() """ txt = self.get() txt = removeLanguageLinks(txt) txt = removeCategoryLinks(txt, site = self.site()) if len(txt) < 4: return True else: return False
|
def getReferences(self, follow_redirects = True, offset = 0): """ Returns a list of pages that link to the page.
|
def getReferences(self, follow_redirects=True, offset=0): """ Return a list of pages that link to the page.
|
def isDisambig(self): defdis = self.site().family.disambig( "_default" ) locdis = self.site().family.disambig( self._site.lang )
|
txt = site.getUrl(path) txt = txt.replace('<a', 'a') txt = txt.replace('</a', '/a') txt = txt.replace('<li', 'li') txt = txt.replace('</li', 'li') if not follow_redirects: cascadedListR = re.compile(r"(.*<ul>[^<]*)<ul>[^<]*<\/ul>([^<]*</\ul>.*)") pos = 0 while cascadedListR.search(txt): m = cascadedListR.search(txt) txt = m.group(1) + m.group(2) Rref = re.compile('li>a href.*="([^"]*)"') refTitles = Rref.findall(txt) if self.site().lang == 'eo': refTistles = map(resolveEsperantoXConvention,refTitles[:])
|
delay = 1 while True: txt = site.getUrl(path) startmarker = u"<!-- start content -->" endmarker = u"<!-- end content -->" try: start = txt.index(startmarker) + len(startmarker) end = txt.index(endmarker) except ValueError: output( u"Invalid page received from server.... Retrying in %i minutes." % delay) time.sleep(delay * 60.) delay *= 2 if delay > 30: delay = 30 continue txt = txt[start:end] break try: start = txt.index(u"<ul>") end = txt.rindex(u"</ul>") except ValueError: return [] txt = txt[start:end+5] txtlines = txt.split(u"\n") listitempattern = re.compile(r"<li><a href=.*>(.*)</a></li>") redirectpattern = re.compile(r"<li><a href=.*>(.*)</a> \(.*\)") redirect = 0 refTitles = set() redirTitles = set() for num, line in enumerate(txtlines): if line == u"</ul>": continue if not redirect: item = listitempattern.search(line) if item: refTitles.add(item.group(1)) continue item = redirectpattern.search(line) if item: refTitles.add(item.group(1)) redirTitles.add(item.group(1)) redirect += 1 else: output(u"DBG> Unparsed line:") output(u"(%i) %s" % (num, line)) else: if line == u"</li>": redirect -= 1 continue item = listitempattern.search(line) if item: if follow_redirects: refTitles.add(item.group(1)) continue item = redirectpattern.search(line) if item: output(u"WARNING: [[%s]] is a double-redirect.") if follow_redirects: refTitles.add(item.group(1)) redirTitles.add(item.group(1)) redirect += 1 else: output(u"DBG> Unparsed line:") output(u"(%i) %s" % (num, line)) refTitles = list(refTitles)
|
def getReferences(self, follow_redirects = True, offset = 0): """ Returns a list of pages that link to the page. If follow_redirects is True, also returns pages that link to a redirect pointing to the page. If offset is non-zero, skips that many references before loading. """ site = self.site() path = site.references_address(self.urlname()) if offset: path = path + "&offset=%i" % offset output(u'Getting references to %s' % self.aslink()) txt = site.getUrl(path) # remove brackets which would disturb the regular expression cascadedListR # TODO: rewrite regex txt = txt.replace('<a', 'a') txt = txt.replace('</a', '/a') txt = txt.replace('<li', 'li') txt = txt.replace('</li', 'li') if not follow_redirects: # remove these links from HTML which are in an unordered # list at level > 1. cascadedListR = re.compile(r"(.*<ul>[^<]*)<ul>[^<]*<\/ul>([^<]*</\ul>.*)") # current index in txt string pos = 0 while cascadedListR.search(txt): m = cascadedListR.search(txt) txt = m.group(1) + m.group(2) Rref = re.compile('li>a href.*="([^"]*)"') refTitles = Rref.findall(txt) if self.site().lang == 'eo': refTistles = map(resolveEsperantoXConvention,refTitles[:]) refTitles.sort() refPages = [] # create list of Page objects, removing duplicates for refTitle in refTitles: page = Page(site, refTitle) if page not in refPages: refPages.append(page) return refPages
|
if page not in refPages: refPages.append(page)
|
if refTitle in redirTitles: page._redirarg = self.title() refPages.append(page)
|
def getReferences(self, follow_redirects = True, offset = 0): """ Returns a list of pages that link to the page. If follow_redirects is True, also returns pages that link to a redirect pointing to the page. If offset is non-zero, skips that many references before loading. """ site = self.site() path = site.references_address(self.urlname()) if offset: path = path + "&offset=%i" % offset output(u'Getting references to %s' % self.aslink()) txt = site.getUrl(path) # remove brackets which would disturb the regular expression cascadedListR # TODO: rewrite regex txt = txt.replace('<a', 'a') txt = txt.replace('</a', '/a') txt = txt.replace('<li', 'li') txt = txt.replace('</li', 'li') if not follow_redirects: # remove these links from HTML which are in an unordered # list at level > 1. cascadedListR = re.compile(r"(.*<ul>[^<]*)<ul>[^<]*<\/ul>([^<]*</\ul>.*)") # current index in txt string pos = 0 while cascadedListR.search(txt): m = cascadedListR.search(txt) txt = m.group(1) + m.group(2) Rref = re.compile('li>a href.*="([^"]*)"') refTitles = Rref.findall(txt) if self.site().lang == 'eo': refTistles = map(resolveEsperantoXConvention,refTitles[:]) refTitles.sort() refPages = [] # create list of Page objects, removing duplicates for refTitle in refTitles: page = Page(site, refTitle) if page not in refPages: refPages.append(page) return refPages
|
def exceptionApplies(self, original_text): """ Returns True iff one of the exceptions applies for the given text.
|
def checkExceptions(self, original_text): """ If one of the exceptions applies for the given text, returns the substring. which matches the exception. Otherwise it returns None.
|
def exceptionApplies(self, original_text): """ Returns True iff one of the exceptions applies for the given text. """ for exception in self.exceptions: if self.regex: exception = re.compile(exception) hit = exception.search(original_text) if hit: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), hit.group(0))) return True else: hit = original_text.find(exception) if hit != -1: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), original_text[hit:hit + len(exception)])) return True return False
|
wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), hit.group(0))) return True
|
return hit.group(0)
|
def exceptionApplies(self, original_text): """ Returns True iff one of the exceptions applies for the given text. """ for exception in self.exceptions: if self.regex: exception = re.compile(exception) hit = exception.search(original_text) if hit: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), hit.group(0))) return True else: hit = original_text.find(exception) if hit != -1: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), original_text[hit:hit + len(exception)])) return True return False
|
wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), original_text[hit:hit + len(exception)])) return True return False
|
return original_text[hit:hit + len(exception)] return None
|
def exceptionApplies(self, original_text): """ Returns True iff one of the exceptions applies for the given text. """ for exception in self.exceptions: if self.regex: exception = re.compile(exception) hit = exception.search(original_text) if hit: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), hit.group(0))) return True else: hit = original_text.find(exception) if hit != -1: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), original_text[hit:hit + len(exception)])) return True return False
|
match = self.checkExceptions(original_text)
|
def run(self): """ Starts the robot. """ # Run the generator which will yield PageLinks to pages which might need to be # changed. for pl in self.generator.generate(): print '' try: # Load the page's text from the wiki original_text = pl.get() except wikipedia.NoPage: wikipedia.output(u'Page %s not found' % pl.linkname()) continue except wikipedia.LockedPage: wikipedia.output(u'Skipping locked page %s' % pl.linkname()) continue except wikipedia.IsRedirectPage: continue # skip all pages that contain certain texts if not self.exceptionApplies(original_text): new_text = self.doReplacements(original_text) if new_text == original_text: wikipedia.output('No changes were necessary in %s' % pl.linkname()) else: wikipedia.showColorDiff(original_text, new_text) if not self.acceptall: choice = wikipedia.input(u'Do you want to accept these changes? [y|n|a(ll)]') if choice in ['a', 'A']: self.acceptall = True if self.acceptall or choice in ['y', 'Y']: pl.put(new_text)
|
|
if not self.exceptionApplies(original_text):
|
if match: wikipedia.output(u'Skipping %s because it contains %s' % (pl.linkname(), match)) else:
|
def run(self): """ Starts the robot. """ # Run the generator which will yield PageLinks to pages which might need to be # changed. for pl in self.generator.generate(): print '' try: # Load the page's text from the wiki original_text = pl.get() except wikipedia.NoPage: wikipedia.output(u'Page %s not found' % pl.linkname()) continue except wikipedia.LockedPage: wikipedia.output(u'Skipping locked page %s' % pl.linkname()) continue except wikipedia.IsRedirectPage: continue # skip all pages that contain certain texts if not self.exceptionApplies(original_text): new_text = self.doReplacements(original_text) if new_text == original_text: wikipedia.output('No changes were necessary in %s' % pl.linkname()) else: wikipedia.showColorDiff(original_text, new_text) if not self.acceptall: choice = wikipedia.input(u'Do you want to accept these changes? [y|n|a(ll)]') if choice in ['a', 'A']: self.acceptall = True if self.acceptall or choice in ['y', 'Y']: pl.put(new_text)
|
result[code] = m.group(1)
|
if m.group(1): result[code] = m.group(1) else: print "ERROR: empty link to %s:"%(code)
|
def getLanguageLinks(text): """Returns a dictionary of other language links mentioned in the text in the form {code:pagename}""" result = {} for code in langs: m=re.search(r'\[\['+code+':([^\]]*)\]\]', text) if m: result[code] = m.group(1) return result
|
title = title.split(':')
|
splitTitle = title.split(':')
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. title = title.strip() if title[0]==':': title = title[1:] title = title.split(':') # translate a default namespace name into the local namespace name if len(title) > 1: for ns in site.family.namespaces.keys(): if title[0] == site.family.namespace('_default',ns): title[0] = site.namespace(ns) title = ':'.join(title) self._urlname = link2url(title, site = self._site, insite = insite) self._linkname = url2link(self._urlname, site = self._site, insite = self._tosite)
|
if len(title) > 1:
|
if len(splitTitle) > 1:
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. title = title.strip() if title[0]==':': title = title[1:] title = title.split(':') # translate a default namespace name into the local namespace name if len(title) > 1: for ns in site.family.namespaces.keys(): if title[0] == site.family.namespace('_default',ns): title[0] = site.namespace(ns) title = ':'.join(title) self._urlname = link2url(title, site = self._site, insite = insite) self._linkname = url2link(self._urlname, site = self._site, insite = self._tosite)
|
if title[0] == site.family.namespace('_default',ns): title[0] = site.namespace(ns) title = ':'.join(title)
|
if splitTitle[0] == site.family.namespace('_default', ns): splitTitle[0] = site.namespace(ns)
|
def __init__(self, site, title = None, insite = None, tosite = None): """ Constructor. Normally called with two arguments: Parameters: 1) The wikimedia site on which the page resides 2) The title of the page as a unicode string The argument insite can be specified to help decode the name; it is the wikimedia site where this link was found. """ self._site = site if tosite: self._tosite = tosite else: self._tosite = getSite() # Default to home wiki # Clean up the name, it can come from anywhere. title = title.strip() if title[0]==':': title = title[1:] title = title.split(':') # translate a default namespace name into the local namespace name if len(title) > 1: for ns in site.family.namespaces.keys(): if title[0] == site.family.namespace('_default',ns): title[0] = site.namespace(ns) title = ':'.join(title) self._urlname = link2url(title, site = self._site, insite = insite) self._linkname = url2link(self._urlname, site = self._site, insite = self._tosite)
|
print e
|
wikipedia.output(u'%s' % e)
|
def run(self): commons = wikipedia.Site('commons', 'commons') comment = wikipedia.translate(self.site, nowCommonsMessage) for page in self.getPageGenerator(): wikipedia.output(u'\n\n>> %s <<\n' % page.title()) try: localImagePage = wikipedia.ImagePage(self.site, page.title()) if localImagePage.fileIsOnCommons(): wikipedia.output(u'File is already on Commons.') continue md5 = localImagePage.getFileMd5Sum() localText = localImagePage.get() match = self.nowCommonsR.search(localText) if not match: wikipedia.output(u'NowCommons template not found.') continue filename = match.group('filename') or localImagePage.titleWithoutNamespace() commonsImagePage = wikipedia.ImagePage(commons, 'Image:%s' % filename) if len(localImagePage.getFileVersionHistory()) > 1: wikipedia.output(u'This image has a version history. Please manually delete it after making sure that the old versions aren\'t worth keeping.') continue commonsText = commonsImagePage.get() if md5 == commonsImagePage.getFileMd5Sum(): wikipedia.output(u'The image is identical to the one on Commons.') wikipedia.output(u'\n\n>>>>>>> Description on %s <<<<<<\n\n' % repr(self.site)) wikipedia.output(localText) wikipedia.output(u'\n\n>>>>>> Description on Commons <<<<<<\n\n') wikipedia.output(commonsText) choice = wikipedia.inputChoice(u'Does the description on Commons contain all required source and license information?', ['yes', 'no'], ['y', 'N'], 'N') if choice == 'y': localImagePage.delete(comment, prompt = False) else: wikipedia.output(u'The image is not identical to the one on Commons!') except (wikipedia.NoPage, wikipedia.IsRedirectPage), e: print e continue
|
if search: index = text.lower().index(search.lower()) line = text[:index].count('\n') column = index - (text[:index].rfind('\n') + 1) else: line = column = 0
|
def edit(self, text, search = None): """ Calls the editor and thus allows the user to change the text. Returns the modified text. Halts the thread's operation until the editor is closed. Returns None if the user didn't save the text file in his text editor. """ if config.editor: tempFilename = '%s.%s' % (tempfile.mktemp(), config.editor_filename_extension) tempFile = open(tempFilename, 'w') tempFile.write(text.encode(config.editor_encoding)) tempFile.close() creationDate = os.stat(tempFilename).st_atime command = "%s %s" % (config.editor, tempFilename) # Some editors make it possible to mark occurences of substrings, or # to jump to the line of the first occurence. # TODO: Find a better solution than hardcoding these, e.g. a config # option. if config.editor == 'kate': lineOfFirstOccurence = text[:text.index(search)].count('\n') command += " -l %i" % lineOfFirstOccurence elif config.editor == 'jedit': lineOfFirstOccurence = text[:text.index(search)].count('\n') + 1 command += " +line:%i" % lineOfFirstOccurence #print command os.system(command) lastChangeDate = os.stat(tempFilename).st_atime if lastChangeDate == creationDate: # Nothing changed return None else: newcontent = open(tempFilename).read().decode(config.editor_encoding) os.unlink(tempFilename) return newcontent else: return wikipedia.ui.editText(text, search = search)
|
|
lineOfFirstOccurence = text[:text.index(search)].count('\n') command += " -l %i" % lineOfFirstOccurence
|
command += " -l %i -c %i" % (line, column)
|
def edit(self, text, search = None): """ Calls the editor and thus allows the user to change the text. Returns the modified text. Halts the thread's operation until the editor is closed. Returns None if the user didn't save the text file in his text editor. """ if config.editor: tempFilename = '%s.%s' % (tempfile.mktemp(), config.editor_filename_extension) tempFile = open(tempFilename, 'w') tempFile.write(text.encode(config.editor_encoding)) tempFile.close() creationDate = os.stat(tempFilename).st_atime command = "%s %s" % (config.editor, tempFilename) # Some editors make it possible to mark occurences of substrings, or # to jump to the line of the first occurence. # TODO: Find a better solution than hardcoding these, e.g. a config # option. if config.editor == 'kate': lineOfFirstOccurence = text[:text.index(search)].count('\n') command += " -l %i" % lineOfFirstOccurence elif config.editor == 'jedit': lineOfFirstOccurence = text[:text.index(search)].count('\n') + 1 command += " +line:%i" % lineOfFirstOccurence #print command os.system(command) lastChangeDate = os.stat(tempFilename).st_atime if lastChangeDate == creationDate: # Nothing changed return None else: newcontent = open(tempFilename).read().decode(config.editor_encoding) os.unlink(tempFilename) return newcontent else: return wikipedia.ui.editText(text, search = search)
|
lineOfFirstOccurence = text[:text.index(search)].count('\n') + 1 command += " +line:%i" % lineOfFirstOccurence
|
lineOfFirstOccurence += 1 command += " +line:%i" % line
|
def edit(self, text, search = None): """ Calls the editor and thus allows the user to change the text. Returns the modified text. Halts the thread's operation until the editor is closed. Returns None if the user didn't save the text file in his text editor. """ if config.editor: tempFilename = '%s.%s' % (tempfile.mktemp(), config.editor_filename_extension) tempFile = open(tempFilename, 'w') tempFile.write(text.encode(config.editor_encoding)) tempFile.close() creationDate = os.stat(tempFilename).st_atime command = "%s %s" % (config.editor, tempFilename) # Some editors make it possible to mark occurences of substrings, or # to jump to the line of the first occurence. # TODO: Find a better solution than hardcoding these, e.g. a config # option. if config.editor == 'kate': lineOfFirstOccurence = text[:text.index(search)].count('\n') command += " -l %i" % lineOfFirstOccurence elif config.editor == 'jedit': lineOfFirstOccurence = text[:text.index(search)].count('\n') + 1 command += " +line:%i" % lineOfFirstOccurence #print command os.system(command) lastChangeDate = os.stat(tempFilename).st_atime if lastChangeDate == creationDate: # Nothing changed return None else: newcontent = open(tempFilename).read().decode(config.editor_encoding) os.unlink(tempFilename) return newcontent else: return wikipedia.ui.editText(text, search = search)
|
def initialise_data(self): """Set editor, page and pagelink attributes""" self.setpage()
|
def __init__(self, args): """Takes one argument, usually this is sys.argv[1:]""" self.all_args = args self.set_options() self.site = wikipedia.getSite()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.