rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
if (not 'lon' in nc.variables.keys()) ^ ( not 'lat' in nc.variables.keys()):
if (not 'lon' in nc.variables.keys()) or ( not 'lat' in nc.variables.keys()): print("No lat/lon coordinates found, creating it")
def get_projection_from_file(nc): ## First, check if we have a global attribute 'projection' ## which contains a Proj4 string: try: p = Proj(nc.projection) print('Found projection information in global attribute, using it') except: try: ## go through variables and look for 'grid_mapping' attribute for var in nc.variables.keys(): if hasattr(nc.variables[var],'grid_mapping'): mappingvarname = nc.variables[var].grid_mapping exit print('Found projection information in variable %s, using it' % mappingvarname) var_mapping = nc.variables[mappingvarname] p = Proj(proj = "stere", ellps = var_mapping.ellipsoid, datum = var_mapping.ellipsoid, units = "m", lat_ts = var_mapping.standard_parallel, lat_0 = var_mapping.latitude_of_projection_origin, lon_0 = var_mapping.straight_vertical_longitude_from_pole, x_0 = var_mapping.false_easting, y_0 = var_mapping.false_northing) except: print('No mapping information found, exiting.') exit(1) return p
Hresult = integrate.odeint(SMsurface,[h_f],x_grid,atol=1.e-9,rtol=1.e-9)
Hresult = odeint(SMsurface,[h_f],x_grid,atol=1.e-9,rtol=1.e-9)
def SMsurface(h,x): b_x = SMcold_bedslope(x) s = a * abs(x) # ??? return b_x - (C / rho_g) * s**m / h**(m+1)
mbobj = json.loads(unicode(mb, 'utf-8'))
if isinstance(mb, str): mb = unicode(mb, 'utf-8') mbobj = json.loads(mb)
def make_collection_id(data): sio = StringIO.StringIO() for key in ( _version.version, 'base_url', 'script_extension', 'template_blacklist', 'template_exclusion_category', 'print_template_prefix', 'print_template_pattern', 'login_credentials', ): sio.write(repr(data.get(key))) mb = data.get('metabook') if mb: mbobj = json.loads(unicode(mb, 'utf-8')) sio.write(calc_checksum(mbobj)) num_articles = len(list(mbobj.articles())) sys.stdout.write("new-collection %s\t%r\t%r\n" % (num_articles, data.get("base_url"), data.get("writer"))) return md5(sio.getvalue()).hexdigest()[:16]
ext = ext.replace(' ', '')
def normalize_and_get_image_path(self, name): assert isinstance(name, basestring) name = unicode(name) ns, partial, fqname = self.nshandler.splitname(name, defaultns=6) if ns != 6: return
for p in "pnb ckb mwl mhr ace".split():
for p in "pnb ckb mwl mhr ace krc pcd".split():
def fix_wikipedia_siteinfo(siteinfo): # --- http://code.pediapress.com/wiki/ticket/754 if u'\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd\ufffd' in [x.get("prefix", u"")[2:] for x in siteinfo.get("interwikimap", [])]: print "WARNING: interwikimap contains garbage" from mwlib import siteinfo as simod en = simod.get_siteinfo("en") siteinfo['interwikimap'] = list(en["interwikimap"]) prefixes = [x['prefix'] for x in siteinfo['interwikimap']] for p in "pnb ckb mwl mhr ace".split(): if p in prefixes: return siteinfo['interwikimap'].append({ 'prefix': p, 'language': p, 'url': 'http://%s.wikipedia.org/wiki/$1' % (p, ), 'local': '', })
namespaces = self.source.namespaces or {}
try: namespaces = self.siteinfo["namespaces"] except (AttributeError, KeyError): from mwlib import siteinfo namespaces = siteinfo.get_siteinfo("en")["namespaces"]
def NS(self, args): """Returns the name of a given namespace number.""" namespaces = self.source.namespaces or {} ns = args[0] try: retval = namespaces[ns]['*'] except KeyError: retval = ''
if getattr(img_node, 'align', None) != 'center':
if getattr(img_node, 'align', None) not in ['center', 'none']:
def getImageSize(self, img_node, img_path=None, max_print_width=None, max_print_height=None, fullsize_thumbs=False, img_size=None): max_w = getattr(img_node, 'width', None) max_h = getattr(img_node, 'height', None) if img_path: img = Image.open(img_path) px_w, px_h = img.size else: px_w, px_h = img_size ar = px_w/px_h if max_h and max_w: if max_h*ar > max_w: max_h = max_w/ar elif max_w/ar > max_h: max_w = max_h*ar if max_h and not max_w: max_w = max_h*ar
self.cache_dir = utils.ensure_dir(cache_dir)
self.cache_dir = cache_dir
def __init__(self, cache_dir="cache", default_writer='rl', report_from_mail=None, report_recipients=None): self.cache_dir = utils.ensure_dir(cache_dir) self.mwrender_cmd = "mw-render" self.mwrender_logfile = "render-logfile" self.mwzip_cmd = "mw-zip" self.mwzip_logfile = "zip-logfile" self.mwpost_cmd = "mw-post" self.mwpost_logfile = "post-logfile" self.default_writer = default_writer self.report_from_mail = report_from_mail self.report_recipients = report_recipients
for i in range(0x100, 0x200): p = os.path.join(self.cache_dir, hex(i)[3:]) if not os.path.isdir(p): os.mkdir(p)
def __init__(self, cache_dir="cache", default_writer='rl', report_from_mail=None, report_recipients=None): self.cache_dir = utils.ensure_dir(cache_dir) self.mwrender_cmd = "mw-render" self.mwrender_logfile = "render-logfile" self.mwzip_cmd = "mw-zip" self.mwzip_logfile = "zip-logfile" self.mwpost_cmd = "mw-post" self.mwpost_logfile = "post-logfile" self.default_writer = default_writer self.report_from_mail = report_from_mail self.report_recipients = report_recipients
application = Application()
cachedir = "cache" cachedir = utils.ensure_dir(cachedir) for i in range(0x100, 0x200): p = os.path.join(cachedir, hex(i)[3:]) if not os.path.isdir(p): os.mkdir(p) def app(*args, **kwargs): return Application(cachedir)(*args, **kwargs)
def main(): from gevent.wsgi import WSGIServer application = Application() address = "0.0.0.0", 8899 server = WSGIServer(address, application) try: print "listening on %s:%d" % address server.serve_forever() except KeyboardInterrupt: server.stop() print "bye."
server = WSGIServer(address, application)
server = WSGIServer(address, app)
def main(): from gevent.wsgi import WSGIServer application = Application() address = "0.0.0.0", 8899 server = WSGIServer(address, application) try: print "listening on %s:%d" % address server.serve_forever() except KeyboardInterrupt: server.stop() print "bye."
targetpath = os.path.normpath(os.path.join(dstdir, member.filename))
fn = member.filename if isinstance(fn, str): fn = unicode(fn, 'utf-8') targetpath = os.path.normpath(os.path.join(dstdir, fn))
def extract_member(zipfile, member, dstdir): """Copied and adjusted from Python 2.6 stdlib zipfile.py module. Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ assert dstdir.endswith(os.path.sep), "/ missing at end" targetpath = os.path.normpath(os.path.join(dstdir, member.filename)) if not targetpath.startswith(dstdir): raise RuntimeError("bad filename in zipfile %r" % (targetpath, )) # Create all upper directories if necessary. if member.filename.endswith("/"): upperdirs = targetpath else: upperdirs = os.path.dirname(targetpath) if not os.path.isdir(upperdirs): os.makedirs(upperdirs) if not member.filename.endswith("/"): open(targetpath, 'wb').write(zipfile.read(member.filename))
raise RuntimeError("%s: [fetching %s]" % (error.get("info", ""), self._build_url(**kwargs)))
raise RuntimeError("%r: [fetching %r]" % (error.get("info", ""), self._build_url(**kwargs)))
def got_result(data): try: error = data.get("error") except: print "ERROR:", data, kwargs raise if error: raise RuntimeError("%s: [fetching %s]" % (error.get("info", ""), self._build_url(**kwargs))) merge_data(retval, data["query"]) qc = data.get("query-continue", {}).values() if qc and query_continue: kw = kwargs.copy() for d in qc: for k,v in d.items(): # dict of len(1) kw[str(k)] = v
self.qpagename = pagename.replace(' ', '_')
def __init__(self, pagename='', server="http://en.wikipedia.org", revisionid=0): self.pagename = pagename self.qpagename = pagename.replace(' ', '_') self.server = server self.revisionid = revisionid self.niceurl = urlparse.urljoin(self.server, 'wiki')
def PAGENAME(self, args):
def _wrap_pagename(f): @wraps(f) def wrapper(self, args): pagename = self.pagename if args.args: pagename = args.args[0] return f(self, pagename) return wrapper def _quoted(f): @wraps(f) def wrapper(*args, **kwargs): return urlquote(f(*args, **kwargs).replace(' ', '_')) return wrapper @_wrap_pagename def PAGENAME(self, pagename):
def __init__(self, pagename='', server="http://en.wikipedia.org", revisionid=0): self.pagename = pagename self.qpagename = pagename.replace(' ', '_') self.server = server self.revisionid = revisionid self.niceurl = urlparse.urljoin(self.server, 'wiki')
return self.pagename def PAGENAMEE(self, args): """same as PAGENAME but More URL-friendly percent encoded special characters (To use an articlename in an external link). """ return urlquote(self.qpagename) def FULLPAGENAME(self, args): return self.pagename def FULLPAGENAMEE(self, args): return urlquote(self.qpagename) def SUBPAGENAME(self, args):
return self.nshandler.splitname(pagename)[1] """same as PAGENAME but More URL-friendly percent encoded special characters (To use an articlename in an external link). """ PAGENAMEE = _quoted(PAGENAME) @_wrap_pagename def FULLPAGENAME(self, pagename): return pagename FULLPAGENAMEE = _quoted(FULLPAGENAME) @_wrap_pagename def SUBPAGENAME(self, pagename):
def PAGENAME(self, args): """Returns the name of the current page, including all levels (Title/Subtitle/Sub-subtitle)""" return self.pagename
return self.pagename.split('/')[-1] def SUBPAGENAMEE(self, args): return urlquote(self.qpagename.split('/')[-1]) def BASEPAGENAME(self, args):
return pagename.split('/')[-1] SUBPAGENAMEE = _quoted(SUBPAGENAME) @_wrap_pagename def BASEPAGENAME(self, pagename):
def SUBPAGENAME(self, args): """[MW1.6+] Returns the name of the current page, excluding parent pages ('Title/Subtitle' becomes 'Subtitle'). """ return self.pagename.split('/')[-1]
return self.pagename.rsplit('/', 1)[0] def BASEPAGENAMEE(self, args): """[MW1.7+] The basename of a subpage ('Title/Subtitle' becomes 'Title') """ return urlquote(self.qpagename.rsplit('/', 1)[0]) def NAMESPACE(self, args):
return pagename.rsplit('/', 1)[0] BASEPAGENAMEE = _quoted(BASEPAGENAME) @_wrap_pagename def NAMESPACE(self, pagename):
def BASEPAGENAME(self, args): """[MW1.7+] The basename of a subpage ('Title/Subtitle' becomes 'Title') """ return self.pagename.rsplit('/', 1)[0]
ns, partial, full = self.nshandler.splitname(self.pagename)
ns, partial, full = self.nshandler.splitname(pagename)
def NAMESPACE(self, args): """Returns the name of the namespace the current page resides in.""" ns, partial, full = self.nshandler.splitname(self.pagename) return full[:-len(partial)-1]
def NAMESPACEE(self, args): """Returns the name of the namespace the current page resides in. (quoted)""" return urlquote(self.NAMESPACE(args))
NAMESPACEE = _quoted(NAMESPACE)
def NAMESPACEE(self, args): """Returns the name of the namespace the current page resides in. (quoted)""" return urlquote(self.NAMESPACE(args))
if 'general' in siteinfo and siteinfo['general'].get('server').endswith(".wikipedia.org") and 'interwikimap' in siteinfo:
if 'general' in siteinfo and siteinfo['general'].get('server', '').endswith(".wikipedia.org") and 'interwikimap' in siteinfo:
def __init__(self, siteinfo): assert siteinfo is not None
if node.children and not _any([ parent.__class__ in self.removeNodesAllChildren.get(node.__class__) for parent in node.parents]):
if node.children and not _any([ parent.__class__ in self.removeNodesAllChildren.get(node.__class__, []) for parent in node.parents]):
def removeBrokenChildren(self, node): """Remove Nodes (while keeping their children) which can't be nested with their parents.""" if node.__class__ in self.removeNodes.keys(): if _any([parent.__class__ in self.removeNodes[node.__class__] for parent in node.parents]): if node.children and not _any([ parent.__class__ in self.removeNodesAllChildren.get(node.__class__) for parent in node.parents]): children = node.children self.report('replaced child', node, children) node.parent.replaceChild(node, newchildren=children) else: self.report('removed child', node) node.parent.removeChild(node) #return
assert False, 'bla' assert cell.colspan == 2
assert cell.colspan == 1
def test_fixTableColspans2(): '''http://es.wikipedia.org/w/index.php?title=Rep%C3%BAblica_Dominicana&oldid=36394218''' raw = r'''
while sections and level<sections[-1].level:
while sections and level<=sections[-1].level:
def create(): if current.start is None or current.endtitle is None: return False l1 = tokens[current.start].text.count("=") l2 = tokens[current.endtitle].text.count("=") level = min (l1, l2)
if sections and level>sections[-1].level:
if sections:
def create(): if current.start is None or current.endtitle is None: return False l1 = tokens[current.start].text.count("=") l2 = tokens[current.endtitle].text.count("=") level = min (l1, l2)
def login(self, username, password, domain=None):
def login(self, username, password, domain=None, lgtoken=None):
def login(self, username, password, domain=None): args = dict(action="login", lgname=username.encode("utf-8"), lgpassword=password.encode("utf-8"), format="json", ) if domain is not None: args['lgdomain'] = domain.encode('utf-8') headers = {"Content-Type": "application/x-www-form-urlencoded"} postdata = urllib.urlencode(args) def got_page(res): res = loads(res) if res["login"]["result"]=="Success": return self raise RuntimeError("login failed: %r" % (res, )) return client.getPage(self.baseurl, method="POST", postdata=postdata, headers=headers, cookies=self.cookies).addCallback(got_page)
if 'general' in siteinfo and siteinfo['general'].get('sitename') == 'Wikipedia' and 'interwikimap' in siteinfo: fix_wikipedia_siteinfo(siteinfo)
def set_redirect_matcher(self, siteinfo): if 'general' in siteinfo and siteinfo['general'].get('sitename') == 'Wikipedia' and 'interwikimap' in siteinfo: fix_wikipedia_siteinfo(siteinfo) self.redirect_matcher = get_redirect_matcher(siteinfo, self)
self.contentWithoutTextClasses = [Gallery, ImageLink]
self.contentWithoutTextClasses = [Gallery, ImageLink, ReferenceList]
def __init__(self, tree, save_reports=False, nesting_strictness='loose', status_cb=None): """Init with parsetree.
if miscutils.hasInfoboxAttrs(table):
if miscutils.hasInfoboxAttrs(table) and article_ns != 100:
def markInfoboxes(self, node): if node.__class__ == Article: tables = node.getChildNodesByClass(Table) found_infobox = False for table in tables: if miscutils.hasInfoboxAttrs(table): table.isInfobox = found_infobox = True if found_infobox or not tables: return if miscutils.articleStartsWithTable(node, max_text_until_infobox=200): tables[0].isInfobox = True return
if miscutils.articleStartsWithTable(node, max_text_until_infobox=200):
if miscutils.articleStartsWithTable(node, max_text_until_infobox=200) and article_ns != 100:
def markInfoboxes(self, node): if node.__class__ == Article: tables = node.getChildNodesByClass(Table) found_infobox = False for table in tables: if miscutils.hasInfoboxAttrs(table): table.isInfobox = found_infobox = True if found_infobox or not tables: return if miscutils.articleStartsWithTable(node, max_text_until_infobox=200): tables[0].isInfobox = True return
self.contentWithoutTextClasses = [Gallery, ImageLink, ReferenceList]
self.contentWithoutTextClasses = [Gallery, ImageLink]
def __init__(self, tree, save_reports=False, nesting_strictness='loose', status_cb=None): """Init with parsetree.
if node.children and not _any([ parent.__class__ in self.removeNodesAllChildren[node.__class__] for parent in node.parents]):
if node.children and not _any([ parent.__class__ in self.removeNodesAllChildren.get(node.__class__) for parent in node.parents]):
def removeBrokenChildren(self, node): """Remove Nodes (while keeping their children) which can't be nested with their parents.""" if node.__class__ in self.removeNodes.keys(): if _any([parent.__class__ in self.removeNodes[node.__class__] for parent in node.parents]): if node.children and not _any([ parent.__class__ in self.removeNodesAllChildren[node.__class__] for parent in node.parents]): children = node.children self.report('replaced child', node, children) node.parent.replaceChild(node, newchildren=children) else: self.report('removed child', node) node.parent.removeChild(node) #return
if ref_name != ref_name.strip('"'):
if ref_name and ref_name != ref_name.strip('"'):
def fixReferenceNodes(self, node): ref_nodes = node.getChildNodesByClass(Reference) name2children = {} for ref_node in ref_nodes: ref_name = ref_node.attributes.get('name') if ref_name != ref_name.strip('"'): ref_name = ref_name.strip('"') ref_node.vlist['name'] = ref_name if ref_name and ref_node.children and not name2children.has_key(ref_name): name2children[ref_name] = ref_node.children
Status(status_path)(status='job queued', progres=0)
Status(status_path)(status='job queued', progress=0)
def do_render(self, collection_id, post_data, is_new=False): metabook_data = post_data.get('metabook') if is_new and not metabook_data: return self.error_response('POST argument metabook or collection_id required') if not is_new and metabook_data: return self.error_response('Specify either metabook or collection_id, not both') try: writer = post_data.get('writer', self.default_writer) except KeyError, exc: return self.error_response('POST argument required: %s' % exc) base_url = post_data.get('base_url') if base_url and not self.is_good_baseurl(base_url): log.bad("bad base_url: %r" % (base_url, )) return self.error_response("bad base_url %r. check your $wgServer and $wgScriptPath variables" % (base_url, )) writer_options = post_data.get('writer_options', '') template_blacklist = post_data.get('template_blacklist', '') template_exclusion_category = post_data.get('template_exclusion_category', '') print_template_prefix = post_data.get('print_template_prefix', '') print_template_pattern = post_data.get('print_template_pattern', '') login_credentials = post_data.get('login_credentials', '') force_render = bool(post_data.get('force_render')) script_extension = post_data.get('script_extension', '') language = post_data.get('language', '') log.info('render %s %s' % (collection_id, writer)) response = { 'collection_id': collection_id, 'writer': writer, 'is_cached': False, } pid_path = self.get_path(collection_id, self.pid_filename, writer) if os.path.exists(pid_path): log.info('mw-render already running for collection %r' % collection_id) return response output_path = self.get_path(collection_id, self.output_filename, writer) if os.path.exists(output_path): if force_render: log.info('removing rendered file %r (forced rendering)' % output_path) utils.safe_unlink(output_path) else: log.info('re-using rendered file %r' % output_path) response['is_cached'] = True return response error_path = self.get_path(collection_id, self.error_filename, writer) if os.path.exists(error_path): log.info('removing error file %r' % error_path) utils.safe_unlink(error_path) mail_sent = self.get_path(collection_id, "mail-sent") if os.path.exists(mail_sent): utils.safe_unlink(mail_sent) force_render = True status_path = self.get_path(collection_id, self.status_filename, writer) if os.path.exists(status_path): if force_render: log.info('removing status file %r (forced rendering)' % status_path) utils.safe_unlink(status_path) else: log.info('status file exists %r' % status_path) return response if self.mwrender_logfile: logfile = self.mwrender_logfile else: logfile = self.get_path(collection_id, self.mwrenderlog_filename, writer) args = [ self.mwrender_cmd, '--logfile', logfile, '--error-file', error_path, '--status-file', status_path, '--writer', writer, '--output', output_path, '--pid-file', pid_path, ] zip_path = self.get_path(collection_id, self.zip_filename) if not force_render and os.path.exists(zip_path): log.info('using existing ZIP file to render %r' % output_path) args.extend(['--config', zip_path]) if writer_options: args.extend(['--writer-options', writer_options]) if template_blacklist: args.extend(['--template-blacklist', template_blacklist]) if template_exclusion_category: args.extend(['--template-exclusion-category', template_exclusion_category]) if print_template_prefix: args.extend(['--print-template-prefix', print_template_prefix]) if print_template_pattern: args.extend(['--print-template-pattern', print_template_pattern]) if language: args.extend(['--language', language]) else: log.info('rendering %r' % output_path) metabook_path = self.get_path(collection_id, self.metabook_filename) if metabook_data: f = open(metabook_path, 'wb') f.write(metabook_data) f.close() args.extend([ '--metabook', metabook_path, '--keep-zip', zip_path, ]) if base_url: args.extend(['--config', base_url]) if writer_options: args.extend(['--writer-options', writer_options]) if template_blacklist: args.extend(['--template-blacklist', template_blacklist]) if template_exclusion_category: args.extend(['--template-exclusion-category', template_exclusion_category]) if print_template_prefix: args.extend(['--print-template-prefix', print_template_prefix]) if print_template_pattern: args.extend(['--print-template-pattern', print_template_pattern]) if login_credentials: login = login_credentials.split(":", 2) if len(login)==2: user, password = login domain=None elif len(login)==3: user, password, domain = login else: raise RuntimeError("bad login_credentials argument") args.extend(["--username", user, "--password", password]) if domain: args.extend(["--domain", domain]) if script_extension: args.extend(['--script-extension', script_extension]) if language: args.extend(['--language', language]) Status(status_path)(status='job queued', progres=0) self.queue_render_job('render', collection_id, args) return response
class handler(WSGIHandler): def alog_request(self, *args, **kwargs): pass
def do_zip_post(self, collection_id, post_data, is_new=False): try: metabook_data = post_data['metabook'] except KeyError, exc: return self.error_response('POST argument required: %s' % exc) base_url = post_data.get('base_url') template_blacklist = post_data.get('template_blacklist', '') template_exclusion_category = post_data.get('template_exclusion_category', '') print_template_prefix = post_data.get('print_template_prefix', '') print_template_pattern = post_data.get('print_template_pattern', '') login_credentials = post_data.get('login_credentials', '') script_extension = post_data.get('script_extension', '') pod_api_url = post_data.get('pod_api_url', '') if pod_api_url: result = json.loads(unicode(urllib2.urlopen(pod_api_url, data="any").read(), 'utf-8')) post_url = result['post_url'].encode('utf-8') response = { 'state': 'ok', 'redirect_url': result['redirect_url'].encode('utf-8'), } else: try: post_url = post_data['post_url'] except KeyError: return self.error_response('POST argument required: post_url') response = {'state': 'ok'} log.info('zip_post %s %s' % (collection_id, pod_api_url))
server = WSGIServer(address, application, handler_class=handler)
server = WSGIServer(address, application)
def main(): application = Application() address = "localhost", 8899 server = WSGIServer(address, application, handler_class=handler) try: print "listening on %s:%d" % address server.serve_forever() except KeyboardInterrupt: server.stop() print "Bye bye"
if last_cell.colspan != node.numcols:
if last_cell.__class__ != Cell or last_cell.colspan != node.numcols:
def unNestEndingCellContent(self, node): '''http://de.wikipedia.org/w/index.php?title=Bahnstrecke_Berlin%E2%80%93Dresden&oldid=72891289''' if node.__class__ == Table and not node.getParentNodesByClass(Table): last_row = node.children[-1] if not last_row or len(last_row.children) != 1: return last_cell = last_row.children[0] if last_cell.colspan != node.numcols: return if self._unNestCond(last_cell): d = Div() d.border = 1 d.vlist = last_cell.vlist for item in last_cell.children: d.appendChild(item) last_cell.children = [] d.moveto(node) self.report('moved content behind table', d)
del children[0]
if children[0].children: del children[0].children[0] else: del children[0]
def create_ref(self, name, vlist, inner, xopts): expander = xopts.expander if expander is not None and inner: inner = expander.parseAndExpand(inner, True)
section_title = node.children[0].children[0].caption
try: section_title = node.children[0].children[0].caption except IndexError: section_title = ''
def removeSeeAlso(self, node): try: seealso_section = _('See also') except NameError: seealso_section = 'See also'
lines = [" " + x for x in stdout[4096:].split("\n")]
lines = [" " + x for x in stdout[-4096:].split("\n")]
def system(args, timeout=None): stime=time.time() retcode, stdout = proc.run_cmd(args, timeout=timeout) d = time.time()-stime pub_args = garble_password(args) msg = [] a = msg.append a("%s %s %r\n" % (retcode, d, pub_args)) writemsg = lambda: sys.stderr.write("".join(msg)) if retcode != 0: a(stdout) a("\n====================\n") writemsg() lines = [" " + x for x in stdout[4096:].split("\n")] raise RuntimeError("command failed with returncode %s: %r\nLast Output:\n%s" % (retcode, pub_args, "\n".join(lines))) writemsg()
Table:[DefinitionList, DefinitionDescription]
Table:[DefinitionList, DefinitionDescription],
def __init__(self, tree, save_reports=False, nesting_strictness='loose', status_cb=None): """Init with parsetree.
self.namedargs[name] = val
do_strip = False if do_strip and isinstance(val, unicode): val = val.strip() self.namedargs[name] = (do_strip, val)
def get(self, n, default): self.count += 1 if isinstance(n, (int, long)): try: a=self.args[n] except IndexError: return default if isinstance(a, unicode): return a.strip() tmp = [] flatten(a, self.expander, self.variables, tmp) _insert_implicit_newlines(tmp) tmp = u"".join(tmp).strip() if len(tmp)>256*1024: raise MemoryLimitError("template argument too long: %s bytes" % len(tmp)) # FIXME: cache value ??? return tmp
val = self.namedargs[n]
do_strip, val = self.namedargs[n]
def get(self, n, default): self.count += 1 if isinstance(n, (int, long)): try: a=self.args[n] except IndexError: return default if isinstance(a, unicode): return a.strip() tmp = [] flatten(a, self.expander, self.variables, tmp) _insert_implicit_newlines(tmp) tmp = u"".join(tmp).strip() if len(tmp)>256*1024: raise MemoryLimitError("template argument too long: %s bytes" % len(tmp)) # FIXME: cache value ??? return tmp
tmp=u"".join(tmp).strip() self.namedargs[n] = tmp
tmp=u"".join(tmp) if do_strip: tmp = tmp.strip() self.namedargs[n] = (do_strip, tmp)
def get(self, n, default): self.count += 1 if isinstance(n, (int, long)): try: a=self.args[n] except IndexError: return default if isinstance(a, unicode): return a.strip() tmp = [] flatten(a, self.expander, self.variables, tmp) _insert_implicit_newlines(tmp) tmp = u"".join(tmp).strip() if len(tmp)>256*1024: raise MemoryLimitError("template argument too long: %s bytes" % len(tmp)) # FIXME: cache value ??? return tmp
if chunk.find('<div id="sign_in_box">') != -1:
if chunk.find('<div class="signin">') != -1:
def download_file(filename,url): global options global checksums global filesizes resume_start = 0 if os.path.exists(filename): if filename in checksums: print 'Checking existing ' + filename file_size = os.stat(filename).st_size if file_size == filesizes[filename]: file_checksum = md5_checksum(filename) if file_checksum == checksums[filename]: if options.progress: print '- OK ' + filename return True elif file_size < filesizes[filename]: if options.progress: print '- %s is too short' % (filename) if options.debug: print '- %s is %d bytes, should be %d bytes' % (filename, file_size, filesizes[filename]) if options.resume: resume_start = file_size if options.dryrun and not re.match(r"release_metadata", filename): global download_list download_info = "download %s %s" % (filename, url) download_list.append(download_info) return True print 'Downloading ' + filename global headers request_headers = headers.copy() # want a fresh copy for each request if resume_start > 0: request_headers['Range'] = "bytes=%d-%d" % (resume_start, filesizes[filename]) req = urllib2.Request(url, None, request_headers) CHUNK = 128 * 1024 size = 0 filesize = -1 start_time = time.time() last_time = start_time last_size = size try: response = urllib2.urlopen(req) chunk = response.read(CHUNK) if chunk.find('<div id="sign_in_box">') != -1: # our urllib2 cookies have gone awol - login again login(False) req = urllib2.Request(url, None, request_headers) response = urllib2.urlopen(req) chunk = response.read(CHUNK) if chunk.find('<div id="sign_in_box">') != -1: # still broken - give up on this one print "*** ERROR trying to download %s" % (filename) return False info = response.info() if 'Content-Length' in info: filesize = resume_start + int(info['Content-Length']) # NB. length of the requested content, taking into account the range if resume_start > 0 and 'Content-Range' not in info: # server doesn't believe in our range filesize = int(info['Content-Length']) if options.debug: print "Server reports filesize as %d, ignoring our range request (%d-%d)" % (filesize, resume_start, filesizes[filename]) resume_start = 0; # will have to download from scratch if filename in filesizes: if filesize != filesizes[filename]: print "WARNING: %s size %d does not match release_metadata.xml (%d)" % ( filename, filesize, filesizes[filename]) else: match = re.search('>([^>]+Licen[^<]+)<', chunk, re.IGNORECASE) if match: license = match.group(1).replace('&amp;','&') print "*** %s is subject to the %s which you have not yet accepted\n" % (filename,license) return False print "*** HTTP response did not contain 'Content-Length' when expected" if options.debug: print info print chunk return False except urllib2.URLError, e: print '- ERROR: Failed to start downloading ' + filename if hasattr(e, 'reason'): print 'Reason: ', e.reason elif hasattr(e, 'code'): print 'Error code: ', e.code return False # we are now up and running, and chunk contains the start of the download if options.debug: print "\nReading %s from effective URL %s" % (filename, response.geturl()) try: if resume_start > 0: fp = open(filename, 'a+b') # append to existing content if options.progress: print " - Resuming at offset %d" % (resume_start) size = resume_start last_size = size else: fp = open(filename, 'wb') # write new file md5 = hashlib.md5() while True: fp.write(chunk) md5.update(chunk) size += len(chunk) now = time.time() if options.progress and now-last_time > 20: rate = (size-last_size)/(now-last_time) estimate = "" if filesize > 0 and rate > 0: remaining_seconds = (filesize-size)/rate if remaining_seconds > 110: remaining = "%d minutes" % (remaining_seconds/60) else: remaining = "%d seconds" % remaining_seconds estimate = "- %d%% est. %s" % ((100*size/filesize), remaining) print "- %d Kb (%d Kb/s) %s" % (size/1024, (rate/1024)+0.5, estimate) last_time = now last_size = size chunk = response.read(CHUNK) if not chunk: break fp.close() #handle errors except urllib2.URLError, e: print '- ERROR: Failed while downloading ' + filename if hasattr(e, 'reason'): print 'Reason: ', e.reason elif hasattr(e, 'code'): print 'Error code: ', e.code return False if options.debug: info = response.info() print "Info from final response of transfer:" print response.info() if filesize > 0 and size != filesize: print "Incomplete transfer - only received %d bytes of the expected %d byte file" % (size, filesize) return False if options.progress: now = time.time() print "- Completed %s - %d Kb in %d seconds" % (filename, (filesize/1024)+0.5, now-start_time) if filename in checksums: download_checksum = md5.hexdigest().upper() if resume_start > 0: # did a partial download, so need to checksum the whole file download_checksum = md5_checksum(filename) if download_checksum != checksums[filename]: if options.debug: print '- Checksum for %s was %s, expected %s' % (filename, download_checksum, checksums[filename]) print '- ERROR: %s checksum does not match' % filename return False return True
print "HTTP Error:",e.code , downloadurl
print "HTTP Error:",e.code , url
def download_file(filename,url): global options if options.dryrun : global download_list download_info = "download %s %s" % (filename, url) download_list.append(download_info) return True print 'Downloading ' + filename global headers req = urllib2.Request(url, None, headers) try: response = urllib2.urlopen(req) CHUNK = 128 * 1024 size = 0 filesize = -1 last_time = time.time() last_size = size fp = open(filename, 'wb') while True: chunk = response.read(CHUNK) if not chunk: break if size == 0 and chunk.find('<div id="sign_in_box">') != -1: # our urllib2 cookies have gone awol - login again login(False) req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) chunk = response.read(CHUNK) if chunk.find('<div id="sign_in_box">') != -1: # still broken - give up on this one print "*** ERROR trying to download %s" % (filename) break; if size == 0: info = response.info() if 'Content-Length' in info: filesize = int(info['Content-Length']) else: print "*** HTTP response did not contain 'Content-Length' when expected" print info break fp.write(chunk) size += len(chunk) now = time.time() if options.progress and now-last_time > 20: rate = (size-last_size)/(now-last_time) estimate = "" if filesize > 0 and rate > 0: remaining_seconds = (filesize-size)/rate if remaining_seconds > 110: remaining = "%d minutes" % (remaining_seconds/60) else: remaining = "%d seconds" % remaining_seconds estimate = "- %d%% est. %s" % ((100*size/filesize), remaining) print "- %d Kb (%d Kb/s) %s" % (size/1024, (rate/1024)+0.5, estimate) last_time = now last_size = size fp.close() if options.progress: now = time.time() print "- Completed %s - %d Kb in %d seconds" % (filename, (filesize/1024)+0.5, now-last_time) #handle errors except urllib2.HTTPError, e: print "HTTP Error:",e.code , downloadurl return False except urllib2.URLError, e: print "URL Error:",e.reason , downloadurl return False return True
print "URL Error:",e.reason , downloadurl
print "URL Error:",e.reason , url
def download_file(filename,url): global options if options.dryrun : global download_list download_info = "download %s %s" % (filename, url) download_list.append(download_info) return True print 'Downloading ' + filename global headers req = urllib2.Request(url, None, headers) try: response = urllib2.urlopen(req) CHUNK = 128 * 1024 size = 0 filesize = -1 last_time = time.time() last_size = size fp = open(filename, 'wb') while True: chunk = response.read(CHUNK) if not chunk: break if size == 0 and chunk.find('<div id="sign_in_box">') != -1: # our urllib2 cookies have gone awol - login again login(False) req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) chunk = response.read(CHUNK) if chunk.find('<div id="sign_in_box">') != -1: # still broken - give up on this one print "*** ERROR trying to download %s" % (filename) break; if size == 0: info = response.info() if 'Content-Length' in info: filesize = int(info['Content-Length']) else: print "*** HTTP response did not contain 'Content-Length' when expected" print info break fp.write(chunk) size += len(chunk) now = time.time() if options.progress and now-last_time > 20: rate = (size-last_size)/(now-last_time) estimate = "" if filesize > 0 and rate > 0: remaining_seconds = (filesize-size)/rate if remaining_seconds > 110: remaining = "%d minutes" % (remaining_seconds/60) else: remaining = "%d seconds" % remaining_seconds estimate = "- %d%% est. %s" % ((100*size/filesize), remaining) print "- %d Kb (%d Kb/s) %s" % (size/1024, (rate/1024)+0.5, estimate) last_time = now last_size = size fp.close() if options.progress: now = time.time() print "- Completed %s - %d Kb in %d seconds" % (filename, (filesize/1024)+0.5, now-last_time) #handle errors except urllib2.HTTPError, e: print "HTTP Error:",e.code , downloadurl return False except urllib2.URLError, e: print "URL Error:",e.reason , downloadurl return False return True
parser = OptionParser(version="%prog 0.6", usage="Usage: %prog [options] version")
parser = OptionParser(version="%prog 0.6.1", usage="Usage: %prog [options] version")
def downloadkit(version): global headers global options urlbase = 'http://developer.symbian.org/main/tools_and_kits/downloads/' viewid = 5 # default to Symbian^3 if version[0] == 2: viewid= 1 # Symbian^2 if version[0] == 3: viewid= 5 # Symbian^3 url = urlbase + ('view.php?id=%d'% viewid) + 'vId=' + version req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) doc=response.read() # BeatifulSoup chokes on some javascript, so we cut away everything before the <body> try: bodystart=doc.find('<body>') doc = doc[bodystart:] except: pass threadlist = [] # let's hope the HTML format never changes... # <a href='download.php?id=27&cid=60&iid=270' title='src_oss_mw.zip'> ...</a> soup=BeautifulSoup(doc) results=soup.findAll('a', href=re.compile("^download"), title=re.compile("\.(zip|xml)$")) results.sort(orderResults) for result in results: downloadurl = urlbase + result['href'] filename = result['title'] if options.nosrc and re.match(r"(src_sfl|src_oss)", filename) : continue # no snapshots of Mercurial source thanks... if download_file(filename, downloadurl) != True : continue # download failed # unzip the file (if desired) if re.match(r"patch", filename): complete_outstanding_unzips() # ensure that the thing we are patching is completed first if re.match(r"(bin|tools).*\.zip", filename): schedule_unzip(filename, 1, 0) # unzip once, don't delete elif re.match(r"src_.*\.zip", filename): schedule_unzip(filename, 1, 1) # zip of zips, delete top level elif re.match(r"build_BOM.zip", filename): schedule_unzip(filename, 1, 1) # unpack then delete zip as it's not needed again # wait for the unzipping threads to complete complete_outstanding_unzips() return 1
parser = OptionParser(version="%prog 0.8", usage="Usage: %prog [options] version")
parser = OptionParser(version="%prog 0.9", usage="Usage: %prog [options] version")
def downloadkit(version): global headers global options urlbase = 'http://developer.symbian.org/main/tools_and_kits/downloads/' viewid = 5 # default to Symbian^3 if version[0] == 2: viewid= 1 # Symbian^2 if version[0] == 3: viewid= 5 # Symbian^3 url = urlbase + ('view.php?id=%d'% viewid) + 'vId=' + version req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) doc=response.read() # BeatifulSoup chokes on some javascript, so we cut away everything before the <body> try: bodystart=doc.find('<body>') doc = doc[bodystart:] except: pass threadlist = [] # let's hope the HTML format never changes... # <a href='download.php?id=27&cid=60&iid=270' title='src_oss_mw.zip'> ...</a> soup=BeautifulSoup(doc) results=soup.findAll('a', href=re.compile("^download"), title=re.compile("\.(zip|xml)$")) results.sort(orderResults) for result in results: downloadurl = urlbase + result['href'] filename = result['title'] if options.nosrc and re.match(r"(src_sfl|src_oss)", filename) : continue # no snapshots of Mercurial source thanks... if download_file(filename, downloadurl) != True : continue # download failed # unzip the file (if desired) if re.match(r"patch", filename): complete_outstanding_unzips() # ensure that the thing we are patching is completed first if re.match(r"release_metadata", filename): parse_release_metadata(filename) # read the md5 checksums etc elif re.match(r"(bin|tools).*\.zip", filename): schedule_unzip(filename, 1, 0) # unzip once, don't delete elif re.match(r"src_.*\.zip", filename): schedule_unzip(filename, 1, 1) # zip of zips, delete top level elif re.match(r"build_BOM.zip", filename): schedule_unzip(filename, 1, 1) # unpack then delete zip as it's not needed again # wait for the unzipping threads to complete complete_outstanding_unzips() return 1
password=''
password='', debug=False
def downloadkit(version): global headers global options urlbase = 'http://developer.symbian.org/main/tools_and_kits/downloads/' viewid = 5 # default to Symbian^3 if version[0] == 2: viewid= 1 # Symbian^2 if version[0] == 3: viewid= 5 # Symbian^3 url = urlbase + ('view.php?id=%d'% viewid) + 'vId=' + version req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) doc=response.read() # BeatifulSoup chokes on some javascript, so we cut away everything before the <body> try: bodystart=doc.find('<body>') doc = doc[bodystart:] except: pass threadlist = [] # let's hope the HTML format never changes... # <a href='download.php?id=27&cid=60&iid=270' title='src_oss_mw.zip'> ...</a> soup=BeautifulSoup(doc) results=soup.findAll('a', href=re.compile("^download"), title=re.compile("\.(zip|xml)$")) results.sort(orderResults) for result in results: downloadurl = urlbase + result['href'] filename = result['title'] if options.nosrc and re.match(r"(src_sfl|src_oss)", filename) : continue # no snapshots of Mercurial source thanks... if download_file(filename, downloadurl) != True : continue # download failed # unzip the file (if desired) if re.match(r"patch", filename): complete_outstanding_unzips() # ensure that the thing we are patching is completed first if re.match(r"release_metadata", filename): parse_release_metadata(filename) # read the md5 checksums etc elif re.match(r"(bin|tools).*\.zip", filename): schedule_unzip(filename, 1, 0) # unzip once, don't delete elif re.match(r"src_.*\.zip", filename): schedule_unzip(filename, 1, 1) # zip of zips, delete top level elif re.match(r"build_BOM.zip", filename): schedule_unzip(filename, 1, 1) # unpack then delete zip as it's not needed again # wait for the unzipping threads to complete complete_outstanding_unzips() return 1
unzipthread = unzipfile(filename, unziplevels, deletelevels)
unzipthread = unzipfile(filename, unziplevel, deletelevel)
def schedule_unzip(filename, unziplevel, deletelevel): global options if options.nounzip : return if options.dryrun : global unzip_list if unziplevel > 0: unzip_list.append("7z x -y %s" % filename) if unziplevel > 1: unzip_list.append("# unzip recursively %d more times" % unziplevel-1) if deletelevel > 0: unzip_list.append("# delete %s" % filename) if deletelevel > 1: unzip_list.append("# delete zip files recursively %d more times" % deletelevel-1) return unzipthread = unzipfile(filename, unziplevels, deletelevels) global threadlist threadlist.append(unzipthread) unzipthread.start()
first_chunk = True
size = 0 last_time = time.time() last_size = size
def download_file(filename,url): global options if options.dryrun : global download_list download_info = "download %s %s" % (filename, url) download_list.append(download_info) return True print 'Downloading ' + filename global headers req = urllib2.Request(url, None, headers) try: response = urllib2.urlopen(req) CHUNK = 128 * 1024 first_chunk = True fp = open(filename, 'wb') while True: chunk = response.read(CHUNK) if not chunk: break if first_chunk and chunk.find('<div id="sign_in_box">') != -1: # our urllib2 cookies have gone awol - login again login(False) req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) chunk = response.read(CHUNK) fp.write(chunk) first_chunk = False fp.close() #handle errors except urllib2.HTTPError, e: print "HTTP Error:",e.code , downloadurl return False except urllib2.URLError, e: print "URL Error:",e.reason , downloadurl return False return True
if first_chunk and chunk.find('<div id="sign_in_box">') != -1:
if size == 0 and chunk.find('<div id="sign_in_box">') != -1:
def download_file(filename,url): global options if options.dryrun : global download_list download_info = "download %s %s" % (filename, url) download_list.append(download_info) return True print 'Downloading ' + filename global headers req = urllib2.Request(url, None, headers) try: response = urllib2.urlopen(req) CHUNK = 128 * 1024 first_chunk = True fp = open(filename, 'wb') while True: chunk = response.read(CHUNK) if not chunk: break if first_chunk and chunk.find('<div id="sign_in_box">') != -1: # our urllib2 cookies have gone awol - login again login(False) req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) chunk = response.read(CHUNK) fp.write(chunk) first_chunk = False fp.close() #handle errors except urllib2.HTTPError, e: print "HTTP Error:",e.code , downloadurl return False except urllib2.URLError, e: print "URL Error:",e.reason , downloadurl return False return True
first_chunk = False
size += len(chunk) now = time.time() if options.progress and now-last_time > 20: print "- %d Kb (%d Kb/s)" % (size/1024, ((size-last_size)/1024/(now-last_time))+0.5) last_time = now last_size = size
def download_file(filename,url): global options if options.dryrun : global download_list download_info = "download %s %s" % (filename, url) download_list.append(download_info) return True print 'Downloading ' + filename global headers req = urllib2.Request(url, None, headers) try: response = urllib2.urlopen(req) CHUNK = 128 * 1024 first_chunk = True fp = open(filename, 'wb') while True: chunk = response.read(CHUNK) if not chunk: break if first_chunk and chunk.find('<div id="sign_in_box">') != -1: # our urllib2 cookies have gone awol - login again login(False) req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) chunk = response.read(CHUNK) fp.write(chunk) first_chunk = False fp.close() #handle errors except urllib2.HTTPError, e: print "HTTP Error:",e.code , downloadurl return False except urllib2.URLError, e: print "URL Error:",e.reason , downloadurl return False return True
parser = OptionParser(usage="Usage: %prog [options] version", version="%prog 0.4")
parser = OptionParser(version="%prog 0.5", usage="Usage: %prog [options] version")
def downloadkit(version): global headers global options urlbase = 'http://developer.symbian.org/main/tools_and_kits/downloads/' viewid = 5 # default to Symbian^3 if version[0] == 2: viewid= 1 # Symbian^2 if version[0] == 3: viewid= 5 # Symbian^3 url = urlbase + ('view.php?id=%d'% viewid) + 'vId=' + version req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) doc=response.read() # BeatifulSoup chokes on some javascript, so we cut away everything before the <body> try: bodystart=doc.find('<body>') doc = doc[bodystart:] except: pass threadlist = [] # let's hope the HTML format never changes... # <a href='download.php?id=27&cid=60&iid=270' title='src_oss_mw.zip'> ...</a> soup=BeautifulSoup(doc) results=soup.findAll('a', href=re.compile("^download"), title=re.compile("\.(zip|xml)$")) results.sort(orderResults) for result in results: downloadurl = urlbase + result['href'] filename = result['title'] if options.nosrc and re.match(r"(src_sfl|src_oss)", filename) : continue # no snapshots of Mercurial source thanks... if download_file(filename, downloadurl) != True : continue # download failed # unzip the file (if desired) if re.match(r"patch", filename): complete_outstanding_unzips() # ensure that the thing we are patching is completed first if re.match(r"(bin|tools).*\.zip", filename): schedule_unzip(filename, 1, 0) # unzip once, don't delete elif re.match(r"src_.*\.zip", filename): schedule_unzip(filename, 1, 1) # zip of zips, delete top level elif re.match(r"build_BOM.zip", filename): schedule_unzip(filename, 1, 1) # unpack then delete zip as it's not needed again # wait for the unzipping threads to complete complete_outstanding_unzips() return 1
parser.set_defaults(dryrun=False, nosrc=False, nounzip=False)
parser.add_option("--progress", action="store_true", dest="progress", help="Report download progress") parser.set_defaults(dryrun=False, nosrc=False, nounzip=False, progress=False)
def downloadkit(version): global headers global options urlbase = 'http://developer.symbian.org/main/tools_and_kits/downloads/' viewid = 5 # default to Symbian^3 if version[0] == 2: viewid= 1 # Symbian^2 if version[0] == 3: viewid= 5 # Symbian^3 url = urlbase + ('view.php?id=%d'% viewid) + 'vId=' + version req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) doc=response.read() # BeatifulSoup chokes on some javascript, so we cut away everything before the <body> try: bodystart=doc.find('<body>') doc = doc[bodystart:] except: pass threadlist = [] # let's hope the HTML format never changes... # <a href='download.php?id=27&cid=60&iid=270' title='src_oss_mw.zip'> ...</a> soup=BeautifulSoup(doc) results=soup.findAll('a', href=re.compile("^download"), title=re.compile("\.(zip|xml)$")) results.sort(orderResults) for result in results: downloadurl = urlbase + result['href'] filename = result['title'] if options.nosrc and re.match(r"(src_sfl|src_oss)", filename) : continue # no snapshots of Mercurial source thanks... if download_file(filename, downloadurl) != True : continue # download failed # unzip the file (if desired) if re.match(r"patch", filename): complete_outstanding_unzips() # ensure that the thing we are patching is completed first if re.match(r"(bin|tools).*\.zip", filename): schedule_unzip(filename, 1, 0) # unzip once, don't delete elif re.match(r"src_.*\.zip", filename): schedule_unzip(filename, 1, 1) # zip of zips, delete top level elif re.match(r"build_BOM.zip", filename): schedule_unzip(filename, 1, 1) # unpack then delete zip as it's not needed again # wait for the unzipping threads to complete complete_outstanding_unzips() return 1
reporting_url = "http://developer.symbian.org/downloadkit_report/%s/%s/args=" % (version, what)
reporting_url = "http://developer-secure.symbian.org/downloadkit_report/%s/%s/args=" % (version, what)
def report_to_symbian(version, what): global options if not options.publicity: return reporting_url = "http://developer.symbian.org/downloadkit_report/%s/%s/args=" % (version, what) if options.dryrun: reporting_url += "+dryrun" if options.nosrc: reporting_url += "+nosrc" if options.nowinscw: reporting_url += "+nowinscw" if options.noarmv5: reporting_url += "+noarmv5" if options.nounzip: reporting_url += "+nounzip" if options.nodelete: reporting_url += "+nodelete" if options.progress: reporting_url += "+progress" if options.resume: reporting_url += "+resume" if options.debug: reporting_url += "+debug" req = urllib2.Request(reporting_url, None, headers) try: urllib2.urlopen(req) # ignore the response, which will always be 404 except urllib2.URLError, e: return
if options.debug: f = open("downloadpage.html","w") print >>f, doc f.close()
def downloadkit(version): global headers global options global failure_list urlbase = top_level_url + '/main/tools_and_kits/downloads/' viewid = 5 # default to Symbian^3 if version[0] == '2': viewid = 1 # Symbian^2 if version[0] == '3': viewid = 5 # Symbian^3 if version.startswith('lucky'): viewid = 12 # Do you feel lucky? version = version[5:] url = urlbase + ('view.php?id=%d'% viewid) if len(version) > 1: # single character version means "give me the latest" url = url + '&vId=' + version req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) doc=response.read() # BeatifulSoup chokes on some javascript, so we cut away everything before the <body> try: bodystart=doc.find('<body>') doc = doc[bodystart:] except: pass if options.debug: f = open("downloadpage.html","w") print >>f, doc f.close() soup=BeautifulSoup(doc) # check that this is the right version match = re.search(' v(\S+)</h2>', doc, re.IGNORECASE) if not match: print "*** ERROR: no version information in the download page" return 0 if len(version) > 1: if match.group(1) != version: print "*** ERROR: version %s is not available" % version print "*** the website is offering version %s instead" % match.group(1) return 0 else: print "The latest version of Symbian^%s is PDK %s" % (version, match.group(1)) # let's hope the HTML format never changes... # <a href='download.php?id=27&cid=60&iid=270' title='src_oss_mw.zip'> ...</a> threadlist = [] results=soup.findAll('a', href=re.compile("^download"), title=re.compile("\.(zip|xml)$")) results.sort(orderResults) for result in results: downloadurl = urlbase + result['href'] filename = result['title'] if options.nosrc and re.match(r"(src_sfl|src_oss)", filename) : continue # no snapshots of Mercurial source thanks... if options.nowinscw and re.search(r"winscw", filename) : continue # no winscw emulator... if options.noarmv5 and re.search(r"armv5", filename) : continue # no armv5 emulator... if options.noarmv5 and options.nowinscw and re.search(r"binaries_epoc.zip|binaries_epoc_sdk", filename) : continue # skip binaries_epoc and binaries_sdk ... if download_file(filename, downloadurl) != True : failure_list.append(filename) continue # download failed # unzip the file (if desired) if re.match(r"patch", filename): complete_outstanding_unzips() # ensure that the thing we are patching is completed first if re.match(r"release_metadata", filename): parse_release_metadata(filename) # read the md5 checksums etc elif re.match(r"(bin|tools).*\.zip", filename): schedule_unzip(filename, 1, 0) # unzip once, don't delete elif re.match(r"src_.*\.zip", filename): schedule_unzip(filename, 1, 1) # zip of zips, delete top level elif re.match(r"build_BOM.zip", filename): schedule_unzip(filename, 1, 1) # unpack then delete zip as it's not needed again report_to_symbian(version, "downfailures_%d" % len(failure_list)) # wait for the unzipping threads to complete complete_outstanding_unzips() if len(failure_list) > 0: print "\n" print "Downloading completed, with failures in %d files\n" % (len(failure_list)) print "\n\t".join(failure_list) print "\n" elif not options.dryrun: print "\nDownloading completed successfully" return 1
webhost = 'developer.symbian.org',
webhost = 'developer-secure.symbian.org',
def downloadkit(version): global headers global options global failure_list urlbase = top_level_url + '/main/tools_and_kits/downloads/' viewid = 5 # default to Symbian^3 if version[0] == '2': viewid = 1 # Symbian^2 if version[0] == '3': viewid = 5 # Symbian^3 if version.startswith('lucky'): viewid = 12 # Do you feel lucky? version = version[5:] url = urlbase + ('view.php?id=%d'% viewid) if len(version) > 1: # single character version means "give me the latest" url = url + '&vId=' + version req = urllib2.Request(url, None, headers) response = urllib2.urlopen(req) doc=response.read() # BeatifulSoup chokes on some javascript, so we cut away everything before the <body> try: bodystart=doc.find('<body>') doc = doc[bodystart:] except: pass if options.debug: f = open("downloadpage.html","w") print >>f, doc f.close() soup=BeautifulSoup(doc) # check that this is the right version match = re.search(' v(\S+)</h2>', doc, re.IGNORECASE) if not match: print "*** ERROR: no version information in the download page" return 0 if len(version) > 1: if match.group(1) != version: print "*** ERROR: version %s is not available" % version print "*** the website is offering version %s instead" % match.group(1) return 0 else: print "The latest version of Symbian^%s is PDK %s" % (version, match.group(1)) # let's hope the HTML format never changes... # <a href='download.php?id=27&cid=60&iid=270' title='src_oss_mw.zip'> ...</a> threadlist = [] results=soup.findAll('a', href=re.compile("^download"), title=re.compile("\.(zip|xml)$")) results.sort(orderResults) for result in results: downloadurl = urlbase + result['href'] filename = result['title'] if options.nosrc and re.match(r"(src_sfl|src_oss)", filename) : continue # no snapshots of Mercurial source thanks... if options.nowinscw and re.search(r"winscw", filename) : continue # no winscw emulator... if options.noarmv5 and re.search(r"armv5", filename) : continue # no armv5 emulator... if options.noarmv5 and options.nowinscw and re.search(r"binaries_epoc.zip|binaries_epoc_sdk", filename) : continue # skip binaries_epoc and binaries_sdk ... if download_file(filename, downloadurl) != True : failure_list.append(filename) continue # download failed # unzip the file (if desired) if re.match(r"patch", filename): complete_outstanding_unzips() # ensure that the thing we are patching is completed first if re.match(r"release_metadata", filename): parse_release_metadata(filename) # read the md5 checksums etc elif re.match(r"(bin|tools).*\.zip", filename): schedule_unzip(filename, 1, 0) # unzip once, don't delete elif re.match(r"src_.*\.zip", filename): schedule_unzip(filename, 1, 1) # zip of zips, delete top level elif re.match(r"build_BOM.zip", filename): schedule_unzip(filename, 1, 1) # unpack then delete zip as it's not needed again report_to_symbian(version, "downfailures_%d" % len(failure_list)) # wait for the unzipping threads to complete complete_outstanding_unzips() if len(failure_list) > 0: print "\n" print "Downloading completed, with failures in %d files\n" % (len(failure_list)) print "\n\t".join(failure_list) print "\n" elif not options.dryrun: print "\nDownloading completed successfully" return 1
version = '2.1b1'
version = '2.1b2'
def read(*rnames): return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
+ '\n' + 'Contributors\n' '************\n'
def read(*rnames): return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
banner_brains = self.context.getFolderContents()
catalog = getToolByName(self.context, 'portal_catalog') banner_brains = catalog.searchResults({ 'path': '/'.join(self.context.getPhysicalPath()), 'object_provides': ICarouselBanner.__identifier__, })
def getBanners(self): """ Returns a list of objects that provide ICarouselBanner. """ banner_brains = [] if IFolderish.providedBy(self.context): banner_brains = self.context.getFolderContents() elif IATTopic.providedBy(self.context): banner_brains = self.context.queryCatalog() banner_objects = [b.getObject() for b in banner_brains] return [b for b in banner_objects if ICarouselBanner.providedBy(b)]
banner_objects = []
banner_brains = []
def getBanners(self): """ Returns a list of objects that provide ICarouselBanner. """ banner_objects = [] if IFolderish.providedBy(self.context): banner_objects = self.context.objectValues() elif IATTopic.providedBy(self.context): banner_objects = [brain.getObject() for brain \ in self.context.queryCatalog()] return [b for b in banner_objects if ICarouselBanner.providedBy(b)]
banner_objects = self.context.objectValues()
banner_brains = self.context.getFolderContents()
def getBanners(self): """ Returns a list of objects that provide ICarouselBanner. """ banner_objects = [] if IFolderish.providedBy(self.context): banner_objects = self.context.objectValues() elif IATTopic.providedBy(self.context): banner_objects = [brain.getObject() for brain \ in self.context.queryCatalog()] return [b for b in banner_objects if ICarouselBanner.providedBy(b)]
banner_objects = [brain.getObject() for brain \ in self.context.queryCatalog()]
banner_brains = self.context.queryCatalog() banner_objects = [b.getObject() for b in banner_brains]
def getBanners(self): """ Returns a list of objects that provide ICarouselBanner. """ banner_objects = [] if IFolderish.providedBy(self.context): banner_objects = self.context.objectValues() elif IATTopic.providedBy(self.context): banner_objects = [brain.getObject() for brain \ in self.context.queryCatalog()] return [b for b in banner_objects if ICarouselBanner.providedBy(b)]
$self.width(500);
if ($self.is('.image-left') || $self.is('.image-right')) $self.width(230) else $self.width(500);
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; var $self = $(this); if ($self.is('.minimal')) { config.plugins.controls = null; }; var audio = $self.is('.audio'); if (audio && !$self.is('.minimal')) { $self.width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($self.is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $self.find('a').attr('href'); // Ignore global autoplay settings if ($self.find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $self.empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var $self = $(this); var audio = $self.is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($self.is('.minimal')) { config.plugins.controls = null; } if ($self.find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $self.parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $self.parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio && !$self.is('.minimal')) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $self.show(); $('.flowPlayerMessage').remove();
catalog = getToolByName(self.context, 'portal_catalog')
def videos(self): catalog = getToolByName(self.context, 'portal_catalog') results = [] for brain in self._query(): video = brain.getObject() if not IFlowPlayable.providedBy(video): continue view = component.getMultiAdapter( (video, self.request), interface.Interface, 'flowplayer') results.append(dict(url=view.href(), title=brain.Title, description=brain.Description, height=view.height, width=view.width, audio_only=view.audio_only())) return results
if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500);
var $self = $(this); if ($self.is('.minimal')) { config.plugins.controls = null; }; var audio = $self.is('.audio'); if (audio && !$self.is('.minimal')) { $self.width(500);
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
if ($(this).is('div')) {
if ($self.is('div')) {
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
config.clip.url = $(this).find('a').attr('href');
config.clip.url = $self.find('a').attr('href');
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
if ($(this).find('img').length == 0) {
if ($self.find('img').length == 0) {
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
$(this).empty();
$self.empty();
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
var audio = $(this).is('.audio');
var $self = $(this); var audio = $self.is('.audio');
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) {
if ($self.is('.minimal')) { config.plugins.controls = null; } if ($self.find('img').length > 0) {
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
portlet_parents = $(this).parents('.portlet');
portlet_parents = $self.parents('.portlet');
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
$(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id);
$self.parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id);
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
if (audio) {
if (audio && !$self.is('.minimal')) {
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
$(this).show();
$self.show();
def __call__(self, request=None, response=None): """ Returns global configuration of the Flowplayer taken from portal_properties """ self.update() self.request.response.setHeader("Content-type", "text/javascript") return """(function($) { $(function() { $('.autoFlowPlayer').each(function() { var config = %(config)s; if ($(this).is('.minimal')) { config.plugins.controls = null; } var audio = $(this).is('.audio'); if (audio) { $(this).width(500); config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = true; } if ($(this).is('div')) { // comming from Kupu, there are relative urls config.clip.baseUrl = $('base').attr('href'); config.clip.url = $(this).find('a').attr('href'); // Ignore global autoplay settings if ($(this).find('img').length == 0) { // no image. Don't autoplay, remove all elements inside the div to show player directly. config.clip.autoPlay = false; $(this).empty(); } else { // Clip is probably linked as image, so autoplay the clip after image is clicked config.clip.autoPlay = true; } } flowplayer(this, %(params)s, config)%(events)s; $('.flowPlayerMessage').remove(); }); $('.playListFlowPlayer').each(function() { var config = %(config)s; var audio = $(this).is('.audio'); if (audio) { config.plugins.controls.fullscreen = false; } if ($(this).is('.minimal')) { config.plugins.controls = null; } if ($(this).find('img').length > 0) { // has splash config.clip.autoPlay = true; } portlet_parents = $(this).parents('.portlet'); var playlist_selector = 'div#flowPlaylist'; if (portlet_parents.length > 0) { var portlet = true; // playlist has to be bound to unique item playlist_selector_id = portlet_parents.parent().attr('id')+'-playlist'; $(this).parent().find('.flowPlaylist-portlet-marker').attr('id', playlist_selector_id); playlist_selector = '#'+playlist_selector_id; if (audio) { config.plugins.controls.all = false; config.plugins.controls.play = true; config.plugins.controls.scrubber = true; config.plugins.controls.mute = true; config.plugins.controls.volume = false; } } else { var portlet = false; } if (!portlet) { $("#pl").scrollable({items:playlist_selector, size:4, clickable:false}); } // manual = playlist is setup using HTML tags, not using playlist array in config flowplayer(this, %(params)s, config).playlist(playlist_selector, {loop: true, manual: true})%(events)s; $(this).show(); $('.flowPlayerMessage').remove();
os.lseek(fd, size, 0)
if size <= 0: size = 1 os.lseek(fd, size-1, 0)
def expand(self, create = False, size = None): flags = os.O_WRONLY if create: flags |= os.O_CREAT makedirs(os.path.dirname(self.lofile))
args = ["/sbin/dmsetup", "create", self.__name, "--table", table]
args = ["/sbin/dmsetup", "create", self.__name, "--uuid", "LIVECD-%s" % self.__name, "--table", table]
def create(self): if self.__created: return
long = long,
long = long + " (Basic Video)",
def __get_image_stanzas(self, isodir): versions = [] kernels = self._get_kernel_versions() for kernel in kernels: for version in kernels[kernel]: versions.append(version)
try: os.unlink(self._instroot + "/etc/sysconfig/mkinitrd") except: pass
self.__restore_file(self._instroot + "/etc/sysconfig/mkinitrd") self.__restore_file(self._instroot + "/etc/dracut.conf")
def _unmount_instroot(self): try: os.unlink(self._instroot + "/etc/sysconfig/mkinitrd") except: pass LoopImageCreator._unmount_instroot(self)
f.write('MODULES+="squashfs ext4 ext3 ext2 vfat msdos "\n') f.write('MODULES+="sr_mod sd_mod ide-cd cdrom "\n') for module in self.__modules: if module == "=usb": f.write('MODULES+="ehci_hcd uhci_hcd ohci_hcd "\n') f.write('MODULES+="usb_storage usbhid "\n') elif module == "=firewire": f.write('MODULES+="firewire-sbp2 firewire-ohci "\n') f.write('MODULES+="sbp2 ohci1394 ieee1394 "\n') elif module == "=mmc": f.write('MODULES+="mmc_block sdhci sdhci-pci "\n') elif module == "=pcmcia": f.write('MODULES+="pata_pcmcia "\n') else: f.write('MODULES+="' + module + ' "\n')
f.write('MODULES+="' + self.__extra_filesystems() + '"\n') f.write('MODULES+="' + self.__extra_drivers() + '"\n') f.close() def __write_dracut_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a") f.write('filesystems+="' + self.__extra_filesystems() + ' "\n') f.write('drivers+="' + self.__extra_drivers() + ' "\n')
def __write_initrd_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a")
self.__write_initrd_conf(self._instroot + "/etc/dracut.conf")
self.__write_initrd_conf(self._instroot + "/etc/sysconfig/mkinitrd")
def _mount_instroot(self, base_on = None): LoopImageCreator._mount_instroot(self, base_on) self.__write_initrd_conf(self._instroot + "/etc/dracut.conf")
os.unlink(self._instroot + "/etc/dracut.conf")
os.unlink(self._instroot + "/etc/sysconfig/mkinitrd")
def _unmount_instroot(self): try: os.unlink(self._instroot + "/etc/dracut.conf") except: pass LoopImageCreator._unmount_instroot(self)
f.write('add_drivers+="squashfs ext4 ext3 ext2 vfat msdos "\n') f.write('add_drivers+="sr_mod sd_mod ide-cd cdrom "\n')
f.write('MODULES+="squashfs ext4 ext3 ext2 vfat msdos "\n') f.write('MODULES+="sr_mod sd_mod ide-cd cdrom "\n')
def __write_initrd_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a")
f.write('add_drivers+="ehci_hcd uhci_hcd ohci_hcd "\n') f.write('add_drivers+="usb_storage usbhid "\n')
f.write('MODULES+="ehci_hcd uhci_hcd ohci_hcd "\n') f.write('MODULES+="usb_storage usbhid "\n')
def __write_initrd_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a")
f.write('add_drivers+="firewire-sbp2 firewire-ohci "\n') f.write('add_drivers+="sbp2 ohci1394 ieee1394 "\n')
f.write('MODULES+="firewire-sbp2 firewire-ohci "\n') f.write('MODULES+="sbp2 ohci1394 ieee1394 "\n')
def __write_initrd_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a")
f.write('add_drivers+="mmc_block sdhci sdhci-pci "\n')
f.write('MODULES+="mmc_block sdhci sdhci-pci "\n')
def __write_initrd_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a")
f.write('add_drivers+="pata_pcmcia "\n')
f.write('MODULES+="pata_pcmcia "\n')
def __write_initrd_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a")
f.write('add_drivers+="' + module + ' "\n')
f.write('MODULES+="' + module + ' "\n')
def __write_initrd_conf(self, path): if not os.path.exists(os.path.dirname(path)): makedirs(os.path.dirname(path)) f = open(path, "a")
"/usr/share/anaconda/syslinux-vesa-splash.jpg"
"/usr/share/anaconda/boot/syslinux-vesa-splash.jpg"
def __copy_syslinux_background(self, isodest): background_path = self._instroot + \ "/usr/share/anaconda/syslinux-vesa-splash.jpg"
append initrd=initrd%(index)s.img root=%(rootlabel)s rootfstype=%(isofstype)s %(liveargs)s %(extra)s label %(short)s menu label %(long)s %(basicvideo)s kernel vmlinuz%(index)s
def __get_image_stanza(self, is_xen, isDracut, **args): if isDracut: args["rootlabel"] = "live:CDLABEL=%(fslabel)s" % args else: args["rootlabel"] = "CDLABEL=%(fslabel)s" % args
kernel mboot.c32 append xen%(index)s.gz --- vmlinuz%(index)s root=%(rootlabel)s rootfstype=%(isofstype)s %(liveargs)s %(extra)s --- initrd%(index)s.img label %(short)s menu label %(long)s %(basicvideo)s
def __get_image_stanza(self, is_xen, isDracut, **args): if isDracut: args["rootlabel"] = "live:CDLABEL=%(fslabel)s" % args else: args["rootlabel"] = "CDLABEL=%(fslabel)s" % args
xdriver = "xdriver=vesa"
xdriver = "xdriver=vesa nomodeset"
def __get_image_stanzas(self, isodir): versions = [] kernels = self._get_kernel_versions() for kernel in kernels: for version in kernels[kernel]: versions.append(version)
if default: cfg += "menu default\n"
def __get_image_stanzas(self, isodir): versions = [] kernels = self._get_kernel_versions() for kernel in kernels: for version in kernels[kernel]: versions.append(version)
self.call(["/sbin/setfiles", "/etc/selinux/targeted/contexts/files/file_contexts", "-e", "/proc", "-e", "/sys", "-e", "/dev", "-e", "/selinux", "/"])
self.call(["/sbin/setfiles", "-e", "/proc", "-e", "/sys", "-e", "/dev", "-e", "/selinux", "/etc/selinux/targeted/contexts/files/file_contexts", "/"])
def relabel(self, ksselinux): # touch some files which get unhappy if they're not labeled correctly for fn in ("/etc/resolv.conf",): path = self.path(fn) f = file(path, "w+") os.chmod(path, 0644) f.close()
"/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg"
"/usr/share/anaconda/syslinux-vesa-splash.jpg"
def __copy_syslinux_background(self, isodest): background_path = self._instroot + \ "/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg"
return False
background_path = self._instroot + \ "/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg" if not os.path.exists(background_path): return False
def __copy_syslinux_background(self, isodest): background_path = self._instroot + \ "/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg"
v = v.replace("'", "\\'")
if v != None: v = v.replace("'", "\\'")
def linedataToObject(self, linedata, o): """ @type linedata: dict @param linedata: One line of BP data dict-ified. @type o: instance of mapper class from stampede_schema module. @param o: Passed in by the appropriate event handler method. Takes the dict of BP linedata, assigns contents to the class o as attributes, and does any global type massaging like transforming dict strings to numeric types. """ for k,v in linedata.items(): if k == 'level': continue if k == 'wf.id': k = 'wf_uuid' if k == 'condor.id': k = 'condor_id' if k == 'job.id': k = 'job_submit_seq' if k == 'task.id': k = 'task_submit_seq' if k == 'js.id': k = 'jobstate_submit_seq' if k == 'parent.wf.id': k = 'parent_workflow_id' if k == 'arguments': v = v.replace("'", "\\'") try: exec("o.%s = '%s'" % (k,v)) except: self.log.error('linedataToObject', msg='unable to process attribute %s with values: %s' % (k,v)) # global type re-assignments if hasattr(o, 'ts'): # make all timestamp values floats o.ts = float(o.ts) if hasattr(o, 'start_time') and o.start_time != None: o.start_time = float(o.start_time) if hasattr(o, 'cluster_start_time') and o.cluster_start_time != None: o.cluster_start_time = float(o.cluster_start_time) if hasattr(o, 'cluster_duration') and o.cluster_duration != None: o.cluster_duration = float(o.cluster_duration) if hasattr(o, 'duration') and o.duration != None: o.duration = float(o.duration) if hasattr(o, 'restart_count') and o.restart_count != None: o.restart_count = int(o.restart_count) return o