rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
if pathlist is None: self.path = sys.path else: self.path = pathlist
|
self.path = pathlist
|
def __init__(self, pathlist=None, importers=None, ownertypes=None): if pathlist is None: self.path = sys.path else: self.path = pathlist if ownertypes == None: self.ownertypes = _globalownertypes else: self.ownertypes = ownertypes if importers: self.shadowpath = importers else: self.shadowpath = {} self.inMakeOwner = 0 self.building = {}
|
return str(self.path)
|
return str(self.path or sys.path)
|
def __str__(self): return str(self.path)
|
for thing in self.path:
|
for thing in (self.path or sys.path):
|
def getmod(self, nm): mod = None for thing in self.path: if isinstance(thing, STRINGTYPE): owner = self.shadowpath.get(thing, -1) if owner == -1: owner = self.shadowpath[thing] = self.makeOwner(thing) if owner: mod = owner.getmod(nm) else: mod = thing.getmod(nm) if mod: break return mod
|
print files
|
def runtests(alltests, filters=None, configfile=None, run_executable=1): info = "Executing PyInstaller tests in: %s" % os.getcwd() print "*" * min(80, len(info)) print info print "*" * min(80, len(info)) OPTS = '' if configfile: # todo: quote correctly OTPS = ' -c "%s"' % configfile build_python = open("python_exe.build", "w") build_python.write(sys.executable+"\n") build_python.write("debug=%s" % __debug__+"\n") build_python.close() if not filters: tests = alltests else: tests = [] for part in filters: tests += [t for t in alltests if part in t and t not in tests] tests = [(len(x), x) for x in tests] tests.sort() path = os.environ["PATH"] counter = { "passed": [], "failed": [], "skipped": [] } for _,test in tests: test = os.path.splitext(os.path.basename(test))[0] if test in MIN_VERSION and MIN_VERSION[test] > sys.version_info: counter["skipped"].append(test) continue if test in DEPENDENCIES: failed = False for mod in DEPENDENCIES[test]: res = os.system(PYTHON + ' -c "import %s"' % mod) if res != 0: failed = True break if failed: print "Skipping test because module %s is missing" % mod counter["skipped"].append(test) continue _msg("BUILDING TEST", test) prog = string.join([PYTHON, PYOPTS, os.path.join(HOME, 'Build.py'), OPTS, test+".spec"], ' ') print "BUILDING:", prog res = os.system(prog) if res == 0 and run_executable: files = glob.glob(os.path.join('dist', test + '*')) print files for exe in files: exe = os.path.splitext(exe)[0] res_tmp = test_exe(exe[5:]) res = res or res_tmp if res == 0: _msg("FINISHING TEST", test, short=1) counter["passed"].append(test) else: _msg("TEST", test, "FAILED", short=1, sep="!!") counter["failed"].append(test) pprint.pprint(counter)
|
|
for row in category.get_model(): if row[0].lower() == feed_category.lower():
|
for row in categories: if row.lower() == feed_category.lower():
|
def update_details_page(): iface = iface_cache.get_interface(model.canonical_iface_uri(uri.get_text())) about.set_text('%s - %s' % (iface.get_name(), iface.summary)) icon_path = iface_cache.get_icon_path(iface) from zeroinstall.gtkui import icon icon_pixbuf = icon.load_icon(icon_path) if icon_pixbuf: icon_widget.set_from_pixbuf(icon_pixbuf)
|
xdgutils.add_to_menu(iface, icon_path, category.get_active_text())
|
xdgutils.add_to_menu(iface, icon_path, categories[category.get_active()])
|
def finish(): import xdgutils iface = iface_cache.get_interface(model.canonical_iface_uri(uri.get_text()))
|
name_to_impl[name] = (iface, impl)
|
name_to_impl[name] = (iface, impl, arch)
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri) iface_name = 'i%d' % len(ifaces_processed)
|
self.requires[iface] = selected_requires = []
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri) iface_name = 'i%d' % len(ifaces_processed)
|
|
selected_requires.append(d)
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri) iface_name = 'i%d' % len(ifaces_processed)
|
|
iface, impl = name_to_impl[bit]
|
iface, impl, arch = name_to_impl[bit]
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri) iface_name = 'i%d' % len(ifaces_processed)
|
class StupidSolver(Solver): """The standard (rather naive) Zero Install solver.""" def __init__(self, network_use, iface_cache, stores, extra_restrictions = None): """ @param network_use: how much use to make of the network @type network_use: L{model.network_levels} @param iface_cache: a cache of feeds containing information about available versions @type iface_cache: L{iface_cache.IfaceCache} @param stores: a cached of implementations (affects choice when offline or when minimising network use) @type stores: L{zerostore.Stores} @param extra_restrictions: extra restrictions on the chosen implementations @type extra_restrictions: {L{model.Interface}: [L{model.Restriction}]} """ Solver.__init__(self) self.network_use = network_use self.iface_cache = iface_cache self.stores = stores self.help_with_testing = False self.extra_restrictions = extra_restrictions or {} def solve(self, root_interface, arch): self.selections = {} self.requires = {} self.feeds_used = set() self.details = self.record_details and {} self._machine_group = None restrictions = {} debug(_("Solve! root = %s"), root_interface) def process(dep, arch): ready = True iface = self.iface_cache.get_interface(dep.interface) if iface in self.selections: debug("Interface requested twice; skipping second %s", iface) if dep.restrictions: warn("Interface requested twice; I've already chosen an implementation " "of '%s' but there are more restrictions! Ignoring the second set.", iface) return ready self.selections[iface] = None self.requires[iface] = selected_requires = [] assert iface not in restrictions restrictions[iface] = dep.restrictions impl = get_best_implementation(iface, arch) if impl: debug(_("Will use implementation %(implementation)s (version %(version)s)"), {'implementation': impl, 'version': impl.get_version()}) self.selections[iface] = impl if self._machine_group is None and impl.machine and impl.machine != 'src': self._machine_group = machine_groups.get(impl.machine, 0) debug(_("Now restricted to architecture group %s"), self._machine_group) for d in impl.requires: debug(_("Considering dependency %s"), d) use = d.metadata.get("use", None) if use not in arch.use: info("Skipping dependency; use='%s' not in %s", use, arch.use) continue if not process(d, arch.child_arch): ready = False selected_requires.append(d) else: debug(_("No implementation chould be chosen yet")); ready = False return ready def get_best_implementation(iface, arch): debug(_("get_best_implementation(%(interface)s), with feeds: %(feeds)s"), {'interface': iface, 'feeds': iface.feeds}) iface_restrictions = restrictions.get(iface, []) extra_restrictions = self.extra_restrictions.get(iface, None) if extra_restrictions: iface_restrictions = iface_restrictions + extra_restrictions impls = [] for f in usable_feeds(iface, arch): self.feeds_used.add(f) debug(_("Processing feed %s"), f) try: feed = self.iface_cache.get_interface(f)._main_feed if not feed.last_modified: continue if feed.name and iface.uri != feed.url and iface.uri not in feed.feed_for: info(_("Missing <feed-for> for '%(uri)s' in '%(feed)s'"), {'uri': iface.uri, 'feed': f}) if feed.implementations: impls.extend(feed.implementations.values()) except Exception, ex: warn(_("Failed to load feed %(feed)s for %(interface)s: %(exception)s"), {'feed': f, 'interface': iface, 'exception': str(ex)}) if not impls: info(_("Interface %s has no implementations!"), iface) return None if self.record_details: impls.sort(lambda a, b: compare(iface, a, b, iface_restrictions, arch)) best = impls[0] self.details[iface] = [(impl, get_unusable_reason(impl, iface_restrictions, arch)) for impl in impls] else: best = impls[0] for x in impls[1:]: if compare(iface, x, best, iface_restrictions, arch) < 0: best = x unusable = get_unusable_reason(best, iface_restrictions, arch) if unusable: info(_("Best implementation of %(interface)s is %(best)s, but unusable (%(unusable)s)"), {'interface': iface, 'best': best, 'unusable': unusable}) return None return best def compare(interface, b, a, iface_restrictions, arch): """Compare a and b to see which would be chosen first. @param interface: The interface we are trying to resolve, which may not be the interface of a or b if they are from feeds. @rtype: int""" a_stab = a.get_stability() b_stab = b.get_stability() r = cmp(is_unusable(b, iface_restrictions, arch), is_unusable(a, iface_restrictions, arch)) if r: return r r = cmp(a_stab == model.preferred, b_stab == model.preferred) if r: return r if self.network_use != model.network_full: r = cmp(get_cached(a), get_cached(b)) if r: return r stab_policy = interface.stability_policy if not stab_policy: if self.help_with_testing: stab_policy = model.testing else: stab_policy = model.stable if a_stab >= stab_policy: a_stab = model.preferred if b_stab >= stab_policy: b_stab = model.preferred r = cmp(a_stab, b_stab) if r: return r r = cmp(a.version, b.version) if r: return r r = cmp(arch.os_ranks.get(b.os, None), arch.os_ranks.get(a.os, None)) if r: return r r = cmp(arch.machine_ranks.get(b.machine, None), arch.machine_ranks.get(a.machine, None)) if r: return r if self.network_use == model.network_full: r = cmp(get_cached(a), get_cached(b)) if r: return r return cmp(a.id, b.id) def usable_feeds(iface, arch): """Return all feeds for iface that support arch. @rtype: generator(ZeroInstallFeed)""" yield iface.uri for f in iface.feeds: if f.os in arch.os_ranks and \ (f.machine is None or f.machine in arch.machine_ranks): yield f.uri else: debug(_("Skipping '%(feed)s'; unsupported architecture %(os)s-%(machine)s"), {'feed': f, 'os': f.os, 'machine': f.machine}) def is_unusable(impl, restrictions, arch): """@return: whether this implementation is unusable. @rtype: bool""" return get_unusable_reason(impl, restrictions, arch) != None def get_unusable_reason(impl, restrictions, arch): """ @param impl: Implementation to test. @type restrictions: [L{model.Restriction}] @return: The reason why this impl is unusable, or None if it's OK. @rtype: str @note: The restrictions are for the interface being requested, not the interface of the implementation; they may be different when feeds are being used.""" machine = impl.machine if machine and self._machine_group is not None: if machine_groups.get(machine, 0) != self._machine_group: return _("Incompatible with another selection from a different architecture group") for r in restrictions: if not r.meets_restriction(impl): return _("Incompatible with another selected implementation") stability = impl.get_stability() if stability <= model.buggy: return stability.name if self.network_use == model.network_offline and not get_cached(impl): return _("Not cached and we are off-line") if impl.os not in arch.os_ranks: return _("Unsupported OS") if machine not in arch.machine_ranks: if machine == 'src': return _("Source code") return _("Unsupported machine type") return None def get_cached(impl): """Check whether an implementation is available locally. @type impl: model.Implementation @rtype: bool """ if isinstance(impl, model.DistributionImplementation): return impl.installed if impl.local_path: return os.path.exists(impl.local_path) else: try: path = self.stores.lookup_any(impl.digests) assert path return True except BadDigest: return False except NotStored: return False self.ready = process(model.InterfaceDependency(root_interface), arch)
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri) iface_name = 'i%d' % len(ifaces_processed)
|
|
loader = gtk.gdk.PixbufLoader(format)
|
loader = gtk.gdk.PixbufLoader('png')
|
def size_prepared_cb(loader, width, height): dest_width = icon_width or width dest_height = icon_height or height
|
for mo in glob.glob("locale/*/LC_MESSAGES/zero-install.mo"):
|
mo_files = glob.glob("share/locale/*/LC_MESSAGES/zero-install.mo") assert mo_files for mo in mo_files:
|
def _compile_po_files(self): i18nfiles = [] for mo in glob.glob("locale/*/LC_MESSAGES/zero-install.mo"): dest = os.path.dirname(os.path.join('share', mo)) i18nfiles.append((dest, [mo])) return i18nfiles
|
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
|
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
|
def testLangs(self): try: locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
|
full = os.path.join(root, str.replace(sub[1:], '/', os.sep))
|
full = os.path.join(root, sub[1:].replace('/', os.sep))
|
def recurse(sub): # To ensure that a line-by-line comparison of the manifests # is possible, we require that filenames don't contain newlines. # Otherwise, you can name a file so that the part after the \n # would be interpreted as another line in the manifest. if '\n' in sub: raise BadDigest("Newline in filename '%s'" % sub) assert sub.startswith('/')
|
print bit
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri)
|
|
print "%s (%s)" % (iface, impl.get_version())
|
if comment_problem: print "%s (%s)" % (iface, impl.get_version())
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri)
|
print line
|
if comment_problem: print line
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri)
|
print line
|
warn("Unexpected output from solver: %s", line)
|
def add_iface(uri, arch): """Name implementations from feed, assign costs and assert that one one can be selected.""" if uri in ifaces_processed: return ifaces_processed.add(uri)
|
class TestUnpackPython(BaseTest, AbstractTestUnpack):
|
class TestUnpackPython(AbstractTestUnpack, BaseTest):
|
def assert_manifest(self, required): alg_name = required.split('=', 1)[0] manifest.fixup_permissions(self.tmpdir) sha1 = alg_name + '=' + manifest.add_manifest_file(self.tmpdir, manifest.get_algorithm(alg_name)).hexdigest() self.assertEquals(sha1, required)
|
class TestUnpackGNU(BaseTest, AbstractTestUnpack):
|
class TestUnpackGNU(AbstractTestUnpack, BaseTest):
|
def setUp(self): AbstractTestUnpack.setUp(self) unpack._tar_version = 'Solaris tar' assert not unpack._gnu_tar()
|
def solve(self, root_interface, arch):
|
def solve(self, root_interface, root_arch):
|
def solve(self, root_interface, arch): """Get the best implementation of root_interface and all of its dependencies. @param root_interface: the URI of the program to be solved @type root_interface: str @param arch: the desired target architecture @type arch: L{arch.Architecture} @postcondition: self.ready, self.selections and self.feeds_used are updated""" raise NotImplementedError("Abstract")
|
@param arch: the desired target architecture @type arch: L{arch.Architecture}
|
@param root_arch: the desired target architecture @type root_arch: L{arch.Architecture}
|
def solve(self, root_interface, arch): """Get the best implementation of root_interface and all of its dependencies. @param root_interface: the URI of the program to be solved @type root_interface: str @param arch: the desired target architecture @type arch: L{arch.Architecture} @postcondition: self.ready, self.selections and self.feeds_used are updated""" raise NotImplementedError("Abstract")
|
_version_start_reqexp = '-[0-9]'
|
_name_version_regexp = '^(.+)-([^-]+)$' nameversion = re.compile(_name_version_regexp)
|
def get_package_info(self, package, factory): _version_start_reqexp = '-[0-9]'
|
match = re.search(_version_start_reqexp, pkgname)
|
match = nameversion.search(pkgname)
|
def get_package_info(self, package, factory): _version_start_reqexp = '-[0-9]'
|
warn(_('Cannot parse version from Ports package named "%(pkgname)s"'), {'name': pkgname})
|
warn(_('Cannot parse version from Ports package named "%(pkgname)s"'), {'pkgname': pkgname})
|
def get_package_info(self, package, factory): _version_start_reqexp = '-[0-9]'
|
name = pkgname[0:match.start()] version = try_cleanup_distro_version(pkgname[match.start() + 1:])
|
name = match.group(1) if name != package: continue version = try_cleanup_distro_version(match.group(2))
|
def get_package_info(self, package, factory): _version_start_reqexp = '-[0-9]'
|
if selected == item:
|
if selected is item:
|
def set_items(self, items): self.model.clear() selected = self.policy.solver.selections.get(self.interface, None) for item, unusable in items: new = self.model.append() self.model[new][ITEM] = item self.model[new][VERSION] = item.get_version() self.model[new][RELEASED] = item.released or "-" self.model[new][FETCH] = utils.get_fetch_info(self.policy, item) if item.user_stability: if item.user_stability == model.insecure: self.model[new][STABILITY] = _('INSECURE') elif item.user_stability == model.buggy: self.model[new][STABILITY] = _('BUGGY') elif item.user_stability == model.developer: self.model[new][STABILITY] = _('DEVELOPER') elif item.user_stability == model.testing: self.model[new][STABILITY] = _('TESTING') elif item.user_stability == model.stable: self.model[new][STABILITY] = _('STABLE') elif item.user_stability == model.packaged: self.model[new][STABILITY] = _('PACKAGED') elif item.user_stability == model.preferred: self.model[new][STABILITY] = _('PREFERRED') else: self.model[new][STABILITY] = _(str(item.upstream_stability) or str(model.testing)) self.model[new][ARCH] = item.arch or _('any') if selected == item: self.model[new][WEIGHT] = pango.WEIGHT_BOLD else: self.model[new][WEIGHT] = pango.WEIGHT_NORMAL self.model[new][UNUSABLE] = bool(unusable) self.model[new][LANGS] = item.langs or '-' self.model[new][NOTES] = _(unusable)
|
dest = os.path.dirname(os.path.join('share', mo))
|
dest = os.path.dirname(mo)
|
def _compile_po_files(self): i18nfiles = [] mo_files = glob.glob("share/locale/*/LC_MESSAGES/zero-install.mo") assert mo_files for mo in mo_files: dest = os.path.dirname(os.path.join('share', mo)) i18nfiles.append((dest, [mo])) return i18nfiles
|
name = stability.get_model()[i][0].lower()
|
name = ['stable', 'testing', 'developer'][i-1]
|
def set_stability_policy(combo): i = stability.get_active() if i == 0: new_stability = None else: name = stability.get_model()[i][0].lower() new_stability = stability_levels[name] interface.set_stability_policy(new_stability) writer.save_interface(interface) policy.recalculate()
|
gtkutils.show_message_box(self, _("Failed to delete:\n%s") % '\n'.join(errors))
|
gtkutils.show_message_box(self.window, _("Failed to delete:\n%s") % '\n'.join(errors))
|
def _delete(self): errors = []
|
if not r.meets_restriction(candidate):
|
if candidate.__class__ is not _DummyImpl and not r.meets_restriction(candidate):
|
def find_dependency_candidates(requiring_impl_var, dependency): dep_iface = self.iface_cache.get_interface(dependency.interface) dep_union = [sat.neg(requiring_impl_var)] for candidate in impls_for_iface[dep_iface]: for r in dependency.restrictions: if not r.meets_restriction(candidate): #warn("%s rejected due to %s", candidate.get_version(), r) if candidate.version is not None: break # else it's the dummy version that matches everything else: c_var = impl_to_var.get(candidate, None) if c_var is not None: dep_union.append(c_var) # else we filtered that version out, so ignore it if dep_union: problem.add_clause(dep_union) else: problem.assign(requiring_impl_var, 0)
|
result, __, __, __ = self._transaction.proxy.GetProgress()
|
result, __, __, __ = self.proxy.GetProgress()
|
def getPercentage(self): result = self.get_prop('Percentage') if result is None: result, __, __, __ = self._transaction.proxy.GetProgress() return result
|
_logger_pk.warn(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
|
_logger_pk.warn(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package_name})
|
def __package_cb(self, status, id, summary): from zeroinstall.injector import distro
|
app_label = "Async Queue"
|
def __unicode__(self): return self.name
|
|
override = kwargs.pop('priority') or 2
|
override_priority = kwargs.pop('priority') or 2
|
def delay(self, *args, **kwargs): try: override = kwargs.pop('priority') or 2 except KeyError: override = priority
|
override = priority
|
override_priority = priority try: override_bucket = kwargs.pop('bucket') or None except KeyError: override_bucket = bucket
|
def delay(self, *args, **kwargs): try: override = kwargs.pop('priority') or 2 except KeyError: override = priority
|
when='onetime', label=label, bucket=bucket, priority=override)
|
when='onetime', label=label, bucket=override_bucket, priority=override_priority)
|
def delay(self, *args, **kwargs): try: override = kwargs.pop('priority') or 2 except KeyError: override = priority
|
def get_tasks(bucket=None, schedule=None, status=None):
|
def get_tasks(self, bucket=None, schedule=None, status=None):
|
def get_tasks(bucket=None, schedule=None, status=None): rs = AsyncTask.objects.get_query_set() if bucket: rs = rs.filter(bucket=bucket) if schedule: rs = rs.filter(task_type=schedule) if status: rs = rs.filter(status=status) return [ t for t in rs.all() ]
|
c = RequestContext( {'errormessage': errormessage,
|
d = {'errormessage': errormessage,
|
def testdatabase(request, configuration_id): """Checks connection status of database connection, given configuration_id only for staff member! #todo: this function is still partly hard-coded for Jdbc2Ei """ if not(request.user.is_staff): raise Http404 is_standalone = request.GET.get('is_standalone', None) errormessage = '' configuration = Configuration.objects.get(pk=int(configuration_id)) try: connector = configuration.getConnector() except: #geen DataSource ingesteld errormessage = _("No DataSource could be found for this " "configuration, please configure a DataSource.") try: isAlive = connector.isAlive() except: isAlive = 'Connector is not reachable' try: data = connector.executeTest() dataCount = len(data) canExecuteQuery = True except: canExecuteQuery = False dataCount = -1 #ei specific. getUrl must be called after execute, because execute # can set/change the url try: url = connector.getUrl() except: url = 'could not connector.getUrl()' t = loader.get_template('base/testdatabase.html') c = RequestContext( {'errormessage': errormessage, 'configuration': configuration, 'is_standalone': get_and_set_is_standalone(request, is_standalone), 'isAlive': isAlive, 'canExecuteQuery': canExecuteQuery, 'dataCount': dataCount, 'url': url, 'user': request.user, 'breadcrumbs': [{'name': u'%s' % _('Database connection list'), 'url': reverse('testdatabase_list')}, {'name': u'%s' % _('Test database connection')}], 'LANGUAGES': settings.LANGUAGES, }) return HttpResponse(t.render(c))
|
'LANGUAGES': settings.LANGUAGES, }) return HttpResponse(t.render(c))
|
} return render_to_response( 'base/testdatabase.html', d, context_instance=RequestContext(request))
|
def testdatabase(request, configuration_id): """Checks connection status of database connection, given configuration_id only for staff member! #todo: this function is still partly hard-coded for Jdbc2Ei """ if not(request.user.is_staff): raise Http404 is_standalone = request.GET.get('is_standalone', None) errormessage = '' configuration = Configuration.objects.get(pk=int(configuration_id)) try: connector = configuration.getConnector() except: #geen DataSource ingesteld errormessage = _("No DataSource could be found for this " "configuration, please configure a DataSource.") try: isAlive = connector.isAlive() except: isAlive = 'Connector is not reachable' try: data = connector.executeTest() dataCount = len(data) canExecuteQuery = True except: canExecuteQuery = False dataCount = -1 #ei specific. getUrl must be called after execute, because execute # can set/change the url try: url = connector.getUrl() except: url = 'could not connector.getUrl()' t = loader.get_template('base/testdatabase.html') c = RequestContext( {'errormessage': errormessage, 'configuration': configuration, 'is_standalone': get_and_set_is_standalone(request, is_standalone), 'isAlive': isAlive, 'canExecuteQuery': canExecuteQuery, 'dataCount': dataCount, 'url': url, 'user': request.user, 'breadcrumbs': [{'name': u'%s' % _('Database connection list'), 'url': reverse('testdatabase_list')}, {'name': u'%s' % _('Test database connection')}], 'LANGUAGES': settings.LANGUAGES, }) return HttpResponse(t.render(c))
|
locations_from_filters = connector.execute( ('select distinct locationid from filters' 'where id=\'%s\' order by location') % filter_id, ['id'])
|
q = ('select distinct locationid from filters ' + \ 'where id=\'%s\' order by location') % filter_id locations_from_filters = connector.execute(q, ['id'])
|
def service_get_locations(request, configuration_id, filter_id): """get locations given configuration_id and filter_id return id, name (from filters) x, y, parentid (from locations) """ configuration = get_object_or_404(Configuration, pk=configuration_id) connector = configuration.getConnector() if configuration.datasourcetype == Configuration.DATASOURCE_TYPE_EI: #some 'hacking' is needed to get the desired result from Jdbc... #first get the locations from filters locations_from_filters = connector.execute( ('select distinct locationid from filters' 'where id=\'%s\' order by location') % filter_id, ['id']) #then join the the locations from locations, to get all location data location_array = [] parent_dict = {} location_all_dict = cache.get('all_locations' + str(configuration_id)) if location_all_dict == None: query = ('select id, name, parentid, longitude, latitude from ' 'locations order by name') all_locations = connector.execute( query, ['id', 'name', 'parentid', 'longitude', 'latitude'], debug=settings.DEBUG) location_all_dict = {} for row in all_locations: location_all_dict[row['id']] = row cache.set('all_locations' + str(configuration_id), location_all_dict, 300) for row in locations_from_filters: id = row['id'] result = location_all_dict[id] result['in_filter'] = 1 location_array.append(result) parent_dict[result['id']] = True # loop again to find missing parents, and again, and again until all # parents are found added = True # Unused for row in location_array: id = row['parentid'] if not(id is None or id in parent_dict): result = location_all_dict[id] result['in_filter'] = 0 location_array.append(result) parent_dict[id] = True elif configuration.datasourcetype == Configuration.DATASOURCE_TYPE_DUMMY: location_array = connector.execute('get_locations') #print location_dict return render_to_response('base/location.json', {'data': location_array})
|
return self.context.context.absolute_url()
|
return self.context.aq_parent.absolute_url()
|
def nextURL(self): return self.context.context.absolute_url()
|
write_line(fid, 'HEIGHT_ABOVE_VENT = %s' % Height_above_vent_string, indent=5)
|
write_line(fid, 'HEIGHT_ABOVE_VENT_(M) = %s' % Height_above_vent_string, indent=5)
|
def write_input_file(self, verbose=False): """Generate input file for Fall3d-6 """
|
self.databasefile, 'profile', self.topography)
|
self.databasefile, self.topography, 'profile')
|
def set_database(self, verbose=True): """Create meteorological database Requires - input file - topography - windprofile """ executable = os.path.join(self.utilities_dir, 'SetDbs', 'SetDbs.PUB.exe') logfile = self.basepath + '.SetDbs.log' if verbose: header('Building meteorological database (SetDbs)')
|
cmd = '%s '*6 % (executable, logfile,
|
cmd = '%s '*8 % (executable, logfile,
|
def set_source(self, verbose=True): """Create eruptive source file Requires - input file - grain file - database file """ executable = os.path.join(self.utilities_dir, 'SetSrc', 'SetSrc.PUB.exe') logfile = self.basepath + '.SetSrc.log'
|
self.databasefile)
|
self.databasefile, 'FALL3D', 'YES')
|
def set_source(self, verbose=True): """Create eruptive source file Requires - input file - grain file - database file """ executable = os.path.join(self.utilities_dir, 'SetSrc', 'SetSrc.PUB.exe') logfile = self.basepath + '.SetSrc.log'
|
if output_dir is not None:
|
if output_dir is None:
|
def __init__(self, params, timestamp_output=True, store_locally=False, dircomment=None, output_dir=None, echo=True, verbose=True): """Create AIM instance, common file names Optional arguments: timestamp_output: If True, create unique output directory with timestamp If False, overwrite output at every run store_locally: If True, store in same directory where scenario scripts are stored If False, use environment variable TEPHRADATA for output. dircomment (string or None): Optional comment added to output dir echo (True or False): Optionally print output to screen as well as log file. Default True. verbose: (True, False) determine if diagnostic output is to be printed """ params = params.copy() # Ensure modifications are kept local #--------------------------------- # AIM names, files and directories #--------------------------------- # AIM names and directories self.scenario_name = scenario_name = params['scenario_name'] import sys if len(sys.argv) > 1: # Assume that only postprocessing is requested using data in provided directory. self.postprocessing = True output_dir = sys.argv[1] else: # Create output dir self.postprocessing = False
|
self.resultfile,
|
self.resultfile + '.nc',
|
def nc2grd(self, verbose=True): """Run nc2grd Requires - input file - source file - grain file - database file """ executable = os.path.join(self.utilities_dir, 'nc2grd', 'nc2grd.exe') logfile = self.basepath + '.nc2grd.log' if verbose: header('Running nc2grd')
|
except e: header('WARNING: Shortcut %s does not appear to be working. Use real directory %s instead.' % (symlink, target)) print 'Error message was', e
|
except: header('WARNING: Shortcut %s does not appear to be working. Use real directory %s instead.' % (aim.symlink, target))
|
def run_scenario(scenario, dircomment=None, store_locally=False, timestamp_output=True, verbose=True): """Run volcanic ash impact scenario The argument scenario can be either * A Python script or * A Dictionary In any case scenario must specify all required volcanological parameters as stated in the file required_parameters.txt. If any parameters are missing or if additional parameters are specified an exception will be raised. Optional parameters: dircomment: will be added to output dir for easy identification. store_locally: if True, don't use TEPHRAHOME for outputs timestamp_output: If True, add timestamp to output dir If False overwrite previous output with same name """ t_start = time.time() # Determine if scenario is a Python script or # a parameter dictionary try: # Get parameters specified in scenario_file params = get_scenario_parameters(scenario) except: # This is not a valid Python script. # See if it behaves like a dictionary try: scenario.keys() except: # Not a dictionary either. Raise exception msg = 'Argument scenario must be either the name of a ' msg += 'Python script or a dictionary' raise Exception(msg) else: # The scenario argument is the parameters dictionary params = scenario # Determine if any of the parameters provide are a tuple # in which case each combination is run separately for name in params: p = params[name] if type(p) is tuple: # Unpack tuple and run scenario for each parameter value # This recursion will continue until no parameters # have tuples as values params_unpacked = params.copy() for value in p: params_unpacked[name] = value aim = run_scenario(params_unpacked, dircomment=dircomment + '_%s_%s' % (name, value), store_locally=store_locally, timestamp_output=timestamp_output, verbose=verbose) return # Instantiate model object aim = AIM(params, dircomment=dircomment, store_locally=store_locally, timestamp_output=timestamp_output, verbose=verbose) if not aim.postprocessing: # Store scenario script, input data files and # actual parameters to provide a complete audit trail aim.store_inputdata(verbose=verbose) # Generate input file for Fall3d-6 aim.write_input_file(verbose=verbose) # Generate input data files in Fall3D format aim.generate_windprofile(verbose=verbose) aim.generate_topography(verbose=verbose) # Run scripts for Fall3d aim.set_granum(verbose=verbose) aim.set_database(verbose=verbose) aim.set_source(verbose=verbose) aim.run_fall3d(verbose=verbose) # Fall3d postprocessing nc2grd aim.nc2grd() # AIM post processing #aim.convert_ncgrids_to_asciigrids(verbose=verbose) aim.convert_surfergrids_to_asciigrids() aim.generate_contours(verbose=True) aim.organise_output() # Done if verbose: header('Simulation finished in %.2f seconds, output data are in %s' % (time.time() - t_start, aim.output_dir)) try: target = os.readlink(aim.symlink) except e: header('WARNING: Shortcut %s does not appear to be working. Use real directory %s instead.' % (symlink, target)) print 'Error message was', e else: if target == aim.output_dir: header('Shortcut to output data is: %s -> %s' % (aim.symlink, target)) else: header('WARNING: Shortcut %s has been changed by more recent run to: %s' % (aim.symlink, target)) print # Return object in case user wants access to it # (e.g. for further postprocessing) return aim
|
timestamp = filename[0].split('.')[3] if timestamp != current_timestamp: if verbose: print 'Moving %s to /tmp' % filename cmd = 'cd %s; /bin/mv -f %s /tmp' % (work_area, filename) run(cmd, verbose=verbose)
|
if filename.endswith('.pressure.nc'): timestamp = filename.split('.')[3] if timestamp != current_timestamp: if verbose: print 'Moving %s to /tmp' % filename cmd = 'cd %s; /bin/mv -f %s /tmp' % (work_area, filename) run(cmd, verbose=verbose)
|
def download_wind_data(url, verbose=True): """Download data files """ # Get available files fid = urllib2.urlopen(url) print dir(fid) # Select files to download files = [] for line in fid.readlines(): fields = line.split() filename = fields[-1] fields = filename.split('.') if fields[0] == 'IDY25100': msg = 'File %s obtained from %s does not look like an ACCESS file. I expected suffix .pressure.nc' % (filename, url) assert filename.endswith('.pressure.nc'), msg if fields[1] == 'pop-flds' and fields[2] == 'pop-lvls': hour = int(fields[4]) if hour <= last_hour: files.append(filename) if len(files) == 0: msg = 'Did not get any suitable ACCESS wind files from %s' % url raise Exception(msg) makedir(work_area) # Clear out files different from this batch (i.e. older) current_timestamp = files[0].split('.')[3] for filename in os.listdir(work_area): timestamp = filename[0].split('.')[3] if timestamp != current_timestamp: if verbose: print 'Moving %s to /tmp' % filename cmd = 'cd %s; /bin/mv -f %s /tmp' % (work_area, filename) run(cmd, verbose=verbose) # Download them if not already there for filename in files: if verbose: header('Downloading %s from %s' % (filename, url)) cmd = 'cd %s; wget -c %s/%s' % (work_area, url, filename) # -c option requests wget to continue partial downloads run(cmd, verbose=verbose)
|
os.chdir(os.path.join(FALL3DHOME, fall3d, 'Scripts')) for program in ['SetDbs', 'SetGrn', 'SetSrc', 'manager', 'Fall3d_Pub']: replace_string_in_file('Script-' + program, 'set HOME=/Users/arnaufolch/Documents/Software/Fall3d-6.0/PUB/Fall3d-6.2-PUB', 'set HOME=%s' % os.path.join(FALL3DHOME, fall3d), verbose=False)
|
def set_compiler(filename): replace_string_in_file(filename, 'FC= ifort', '#FC= ifort') replace_string_in_file(filename, 'LINKER= ifort', '#LINKER= ifort') replace_string_in_file(filename, 'FFLAGS= -132', '#FFLAGS= -132') replace_string_in_file(filename, 'LINKFLAGS= -132', '#LINKFLAGS= -132') replace_string_in_file(filename, '#FC= gfortran', 'FC= gfortran') replace_string_in_file(filename, '#LINKER= gfortran', 'LINKER= gfortran') replace_string_in_file(filename, '#FFLAGS= -ffixed', 'FFLAGS= -ffixed') replace_string_in_file(filename, '#LINKFLAGS= -ffixed', 'LINKFLAGS= -ffixed')
|
|
t_start = self.params['Start_time_of_eruption']
|
try: t_start = float(self.params['Start_time_of_eruption']) except: t_start = float(self.params['Start_time_of_eruption'][0])
|
def generate_windprofile(self, verbose=False): """Read wind profile data in the format Hour 1 10 10 14 10 10 4 10 10 1 10 10 -2 10 10 -12 10 10 -30 Hour 2 10 10 14 10 10 4 ... Each row under each Hour heading correspond to an element in zlayers. Alternatively, this format can be specified as Constant 10 10 14 10 10 4 10 10 1 10 10 -2 10 10 -12 10 10 -30 in which case values will be reused for the simulation duration """ if self.meteorological_model == 'ncep1': return zlayers = self.params['wind_altitudes'] nz=len(zlayers)
|
current_timestamp = files[0].split()[3]
|
current_timestamp = files[0].split('.')[3]
|
def download_wind_data(url, verbose=True): """Download data files """ # Get available files fid = urllib2.urlopen(url) print dir(fid) # Select files to download files = [] for line in fid.readlines(): fields = line.split() filename = fields[-1] fields = filename.split('.') if fields[0] == 'IDY25100': msg = 'File %s obtained from %s does not look like an ACCESS file. I expected suffix .pressure.nc' % (filename, url) assert filename.endswith('.pressure.nc'), msg if fields[1] == 'pop-flds' and fields[2] == 'pop-lvls': hour = int(fields[4]) if hour <= last_hour: files.append(filename) if len(files) == 0: msg = 'Did not get any suitable ACCESS wind files from %s' % url raise Exception(msg) makedir(work_area) # Clear out files different from this batch (i.e. older) current_timestamp = files[0].split()[3] for filename in os.listdir(work_area): timestamp = filename[0].split()[3] if timestamp != current_timestamp: if verbose: print 'Moving %s to /tmp' % filename cmd = 'cd %s; /bin/mv -f %s /tmp' % (work_area, filename) run(cmd, verbose=verbose) # Download them if not already there for filename in files: if verbose: header('Downloading %s from %s' % (filename, url)) cmd = 'cd %s; wget -c %s/%s' % (work_area, url, filename) # -c option requests wget to continue partial downloads run(cmd, verbose=verbose)
|
timestamp = filename[0].split()[3]
|
timestamp = filename[0].split('.')[3]
|
def download_wind_data(url, verbose=True): """Download data files """ # Get available files fid = urllib2.urlopen(url) print dir(fid) # Select files to download files = [] for line in fid.readlines(): fields = line.split() filename = fields[-1] fields = filename.split('.') if fields[0] == 'IDY25100': msg = 'File %s obtained from %s does not look like an ACCESS file. I expected suffix .pressure.nc' % (filename, url) assert filename.endswith('.pressure.nc'), msg if fields[1] == 'pop-flds' and fields[2] == 'pop-lvls': hour = int(fields[4]) if hour <= last_hour: files.append(filename) if len(files) == 0: msg = 'Did not get any suitable ACCESS wind files from %s' % url raise Exception(msg) makedir(work_area) # Clear out files different from this batch (i.e. older) current_timestamp = files[0].split()[3] for filename in os.listdir(work_area): timestamp = filename[0].split()[3] if timestamp != current_timestamp: if verbose: print 'Moving %s to /tmp' % filename cmd = 'cd %s; /bin/mv -f %s /tmp' % (work_area, filename) run(cmd, verbose=verbose) # Download them if not already there for filename in files: if verbose: header('Downloading %s from %s' % (filename, url)) cmd = 'cd %s; wget -c %s/%s' % (work_area, url, filename) # -c option requests wget to continue partial downloads run(cmd, verbose=verbose)
|
run('chmod +w %s' % actual_params_file, verbose=verbose)
|
if os.path.isfile(actual_params_file): run('chmod +w %s' % actual_params_file, verbose=verbose)
|
def store_inputdata(self, verbose=False): """Create exact copy of input data into output area The intention is to ensure that all output has an audit trail. """ audit_dir = os.path.join(self.output_dir, 'input_data') makedir(audit_dir) # Store input files s = 'cp %s %s' % (self.wind_profile, audit_dir) run(s, verbose=verbose) #s = 'cp %s %s' % (self.topography_grid, audit_dir) #run(s, verbose=verbose) scenario_file = self.params['scenario_name'] + '.py' s = 'cp %s %s' % (scenario_file, audit_dir) run(s, verbose=verbose) # Store actual parameters (as Python file) actual_params_file = os.path.join(audit_dir, 'actual_parameters.py') run('chmod +w %s' % actual_params_file, verbose=verbose) # In case it was there already fid = open(actual_params_file, 'w') fid.write('"""All actual parameters used in scenario %s\n\n'\ % self.basepath) fid.write('This file is automatically generated by AIM\n') fid.write('and in serves a log of all input parameters used in\n') fid.write('Fall3d/AIM whether supplied or derived.\n') fid.write('"""\n\n\n') for param in self.params: value = self.params[param] fid.write('%s = %s\n' % (param, value)) fid.close() # Set all files to read only to avoid accidental changes s = 'chmod -R -w %s' % audit_dir run(s, verbose=verbose)
|
except e: print 'Could not clean up: %s ' % e
|
except: print 'Could not clean up'
|
def __init__(self, params, timestamp_output=True, store_locally=False, dircomment=None, verbose=True): """Create AIM instance, common file names and start logging Optional arguments: timestamp_output: If True, create unique output directory with timestamp If False, overwrite output at every run store_locally: If True, store in same directory where scenario scripts are stored If False, use environment variable TEPHRADATA for output. dircomment (string or None): Optional comment added to output dir verbose: (True, False) determine if diagnostic output is to be printed """ params = params.copy() # Ensure modifications are kept local #--------------------------------- # AIM names, files and directories #--------------------------------- # AIM names and directories self.scenario_name = scenario_name = params['scenario_name'] import sys if len(sys.argv) > 1: # Assume that only postprocessing is requested using data in provided directory. self.postprocessing = True output_dir = sys.argv[1] else: # Create output dir self.postprocessing = False if store_locally: # FIXME (Obsolete) output_dir = os.path.join(os.getcwd(), tephra_output_dir) else: output_dir = get_tephradata() # Build output datastructure like # $TEPHRADATA/<scenario>/<scenario>_user_timestamp output_dir = os.path.join(output_dir, 'scenarios') output_dir = os.path.join(output_dir, scenario_name) scenario_dir = get_username() if timestamp_output: scenario_dir += '_' + get_timestamp()
|
cellsize = (xmax-xmin)/(cols-1) assert cellsize == (ymax-ymin)/(rows-1)
|
cellsize = (xmax-xmin)/cols assert numpy.allclose(cellsize, (ymax-ymin)/rows)
|
def nc2asc(ncfilename, subdataset, projection=None, verbose=False): """Extract given subdataset from ncfile name and create one ASCII file for each band. This function is reading the NetCDF file using the Python Library Scientific.IO.NetCDF Time is assumed to be in whole hours. """ basename, _ = os.path.splitext(ncfilename) # Get rid of .nc basename, _ = os.path.splitext(basename) # Get rid of .res if verbose: print 'Converting layer %s in file %s to ASCII files' % (subdataset, ncfilename) infile = NetCDFFile(ncfilename) layers = infile.variables.keys() msg = 'Subdataset %s was not found in file %s. Options are %s.' % (subdataset, ncfilename, layers) assert subdataset in layers, msg units = infile.variables['time'].units msg = 'Time units must be "h". I got %s' % units assert units == 'h', msg A = infile.variables[subdataset].getValue() msg = 'Data must have 3 dimensions: Time, X and Y. I got shape: %s' % str(A.shape) assert len(A.shape) == 3, msg times = infile.variables['time'].getValue() assert A.shape[0] == len(times) cols = infile.dimensions['x'] rows = infile.dimensions['y'] assert A.shape[1] == rows assert A.shape[2] == cols # Header information xmin = float(infile.XMIN) xmax = float(infile.XMAX) ymin = float(infile.YMIN) ymax = float(infile.YMAX) cellsize = (xmax-xmin)/(cols-1) assert cellsize == (ymax-ymin)/(rows-1) header = 'ncols %i\n' % cols header += 'nrows %i\n' % rows header += 'xllcorner %.1f\n' % xmin header += 'yllcorner %.1f\n' % ymin header += 'cellsize %.1f\n' % cellsize header += 'NODATA_value -9999\n' # Loop through time slices and name files by hour. for k, t in enumerate(times): hour = str(int(t)).zfill(2) + 'h' asciifilename = basename + '.' + hour + '.' + subdataset.lower() + '.asc' prjfilename = asciifilename[:-4] + '.prj' outfile = open(asciifilename, 'w') outfile.write(header) for j in range(rows)[::-1]: # Rows are upside down for i in range(cols): outfile.write('%f ' % A[k, j, i]) outfile.write('\n') outfile.close() if projection: # Create associated projection file fid = open(prjfilename, 'w') fid.write(projection) fid.close() infile.close()
|
vent_location = (params['X_coordinate_of_vent'], params['Y_coordinate_of_vent'],
|
vent_location = (params['x_coordinate_of_vent'], params['y_coordinate_of_vent'],
|
def __init__(self, params, timestamp_output=True, store_locally=False, dircomment=None, echo=True, verbose=True): """Create AIM instance, common file names Optional arguments: timestamp_output: If True, create unique output directory with timestamp If False, overwrite output at every run store_locally: If True, store in same directory where scenario scripts are stored If False, use environment variable TEPHRADATA for output. dircomment (string or None): Optional comment added to output dir echo (True or False): Optionally print output to screen as well as log file. Default True. verbose: (True, False) determine if diagnostic output is to be printed """ params = params.copy() # Ensure modifications are kept local #--------------------------------- # AIM names, files and directories #--------------------------------- # AIM names and directories self.scenario_name = scenario_name = params['scenario_name'] import sys if len(sys.argv) > 1: # Assume that only postprocessing is requested using data in provided directory. self.postprocessing = True output_dir = sys.argv[1] else: # Create output dir self.postprocessing = False if store_locally: # FIXME (Obsolete) output_dir = os.path.join(os.getcwd(), tephra_output_dir) else: output_dir = get_tephradata() # Build output datastructure like # $TEPHRADATA/<scenario>/<scenario>_user_timestamp output_dir = os.path.join(output_dir, 'scenarios') output_dir = os.path.join(output_dir, scenario_name) scenario_dir = get_username() if timestamp_output: scenario_dir += '_' + get_timestamp()
|
if fields[-2] == 'depload':
|
if fields[-2] == 'load':
|
def generate_contours(self, verbose=True): """Contour ASCII grids into shp and kml files The function uses model parameters Load_contours, Thickness_contours and Thickness_units. """ if verbose: header('Contouring ASCII grids to SHP and KML files') for filename in os.listdir(self.output_dir): if filename.endswith('.asc'): if verbose: print 'Processing %s:\t' % filename fields = filename.split('.') if fields[-2] == 'depload': units = 'kg/m^2' contours = self.params['Load_contours'] attribute_name = 'Load[%s]' % units elif fields[-2] == 'depthick': units = self.params['Thickness_units'].lower() contours = self.params['Thickness_contours'] attribute_name = 'Thickness[%s]' % units else: attribute_name = 'Value' units = 'default' # Unit is implied by .inp file contours = True # Default is fixed number of contours _generate_contours(filename, contours, units, attribute_name, output_dir=self.output_dir, meteorological_model=self.meteorological_model, WKT_projection=self.WKT_projection, verbose=verbose)
|
elif fields[-2] == 'depthick':
|
elif fields[-2] == 'thickness':
|
def generate_contours(self, verbose=True): """Contour ASCII grids into shp and kml files The function uses model parameters Load_contours, Thickness_contours and Thickness_units. """ if verbose: header('Contouring ASCII grids to SHP and KML files') for filename in os.listdir(self.output_dir): if filename.endswith('.asc'): if verbose: print 'Processing %s:\t' % filename fields = filename.split('.') if fields[-2] == 'depload': units = 'kg/m^2' contours = self.params['Load_contours'] attribute_name = 'Load[%s]' % units elif fields[-2] == 'depthick': units = self.params['Thickness_units'].lower() contours = self.params['Thickness_contours'] attribute_name = 'Thickness[%s]' % units else: attribute_name = 'Value' units = 'default' # Unit is implied by .inp file contours = True # Default is fixed number of contours _generate_contours(filename, contours, units, attribute_name, output_dir=self.output_dir, meteorological_model=self.meteorological_model, WKT_projection=self.WKT_projection, verbose=verbose)
|
fn = extract_access_windprofile(access_dir=work_dir,
|
fn = extract_access_windprofile(access_dir=work_area,
|
def get_profile_from_web(url, vent_coordinates, verbose=True): """Download data files and create FALL3D wind profile Input url: web address where ACCESS wind profiles are stored vent_coordinates: UTM location of vent (x_coordinate_of_vent, y_coordinate_of_vent, zone, hemisphere) Output: profile_name: Name of generated wind profile """ # Get the data from the web download_wind_data(url) # Convert downloaded data to FALL3D wind profile at fn = extract_access_windprofile(access_dir=work_dir, utm_vent_coordinates=vent_coordinates, verbose=verbose) return fn
|
def post(self):
|
def get(self):
|
def post(self): """ Called when a new ride needs to be added to the database. Probably with all of this data it should be done as a form post. Arguments: - `self`: Web Arguments: - max_passengers - num_passengers - driver - start_point_title - start_point_lat - start_point_long - destination_title - destination_lat - destination_long - part_of_day - ToD - contact - ridecomments - driver """
|
number = inumber[0:3]+'-'+inumber[3:6]+'-'+inumber[6:]
|
if not "-" in inumber: number = inumber[0:3]+'-'+inumber[3:6]+'-'+inumber[6:] else: number = inumber
|
def post(self): """ Called when a new ride needs to be added to the database. Probably with all of this data it should be done as a form post. Arguments: - `self`: Web Arguments: - max_passengers - num_passengers - driver - start_point_title - start_point_lat - start_point_long - destination_title - destination_lat - destination_long - part_of_day - ToD - contact - ridecomments - driver """
|
isDriver = self.request.get("driver")
|
isDriver = self.request.get("isDriver")
|
def post(self): """ Called when a new ride needs to be added to the database. Probably with all of this data it should be done as a form post. Arguments: - `self`: Web Arguments: - max_passengers - num_passengers - driver - start_point_title - start_point_lat - start_point_long - destination_title - destination_lat - destination_long - part_of_day - ToD - contact - ridecomments - driver """
|
checked = self.request.get("checked")
|
checked = self.request.get("toLuther")
|
def post(self): """ Called when a new ride needs to be added to the database. Probably with all of this data it should be done as a form post. Arguments: - `self`: Web Arguments: - max_passengers - num_passengers - driver - start_point_title - start_point_lat - start_point_long - destination_title - destination_lat - destination_long - part_of_day - ToD - contact - ridecomments - driver """
|
newRide.comment = self.request.get("ridecomment")
|
newRide.comment = self.request.get("comment")
|
def post(self): """ Called when a new ride needs to be added to the database. Probably with all of this data it should be done as a form post. Arguments: - `self`: Web Arguments: - max_passengers - num_passengers - driver - start_point_title - start_point_lat - start_point_long - destination_title - destination_lat - destination_long - part_of_day - ToD - contact - ridecomments - driver """
|
("/newride", NewRideHandler),
|
("/newride.*", NewRideHandler),
|
def main(): global MAP_APIKEY, FROM_EMAIL_ADDR, NOTIFY_EMAIL_ADDR logging.getLogger().setLevel(logging.DEBUG) # prepopulate the database query = db.Query(Ride) if query.count() < 2: newRide = Ride() newRide.max_passengers = 3 newRide.num_passengers = 0 newRide.driver = users.User("[email protected]") newRide.start_point_title = "Luther College, Decorah, IA" newRide.start_point_long, newRide.start_point_lat = geocode(newRide.start_point_title) newRide.destination_title = "Plymouth, MN" newRide.destination_long, newRide.destination_lat = geocode(newRide.destination_title) newRide.part_of_day = 'Early Morning' newRide.ToD = datetime.datetime(2009,9,15) newRide.passengers = [] newRide.put() newRide = Ride() newRide.max_passengers = 1 newRide.num_passengers = 0 newRide.driver = users.User("[email protected]") newRide.start_point_title = "Luther College, Decorah, IA" newRide.start_point_long, newRide.start_point_lat = geocode(newRide.start_point_title) newRide.destination_title = "Des Moines, IA" newRide.destination_long, newRide.destination_lat = geocode(newRide.destination_title) newRide.part_of_day = 'Late Afternoon' newRide.ToD = datetime.datetime(2009,9,17) newRide.passengers = [] newRide.put() apiQuery = db.Query(ApplicationParameters) if apiQuery.count() < 1: bootstrap = ApplicationParameters() bootstrap.apikey = 'ABQIAAAAg9WbCE_zwMIRW7jDFE_3ixQ2JlMNfqnGb2qqWZtmZLchh1TSjRS0zuchuhlR8g4tlMGrjg34sNmyjQ' bootstrap.notifyEmailAddr = '[email protected]' bootstrap.fromEmailAddr = '[email protected]' MAP_APIKEY = bootstrap.apikey FROM_EMAIL_ADDR = bootstrap.fromEmailAddr NOTIFY_EMAIL_ADDR = bootstrap.notifyEmailAddr bootstrap.put() else: apilist = apiQuery.fetch(limit=1) MAP_APIKEY = apilist[0].apikey FROM_EMAIL_ADDR = apilist[0].fromEmailAddr NOTIFY_EMAIL_ADDR = apilist[0].notifyEmailAddr application = webapp.WSGIApplication([('/', MainHandler), ('/getrides', RideQueryHandler ), ("/newride", NewRideHandler), ("/addpass", AddPassengerHandler), ("/adddriver", AddDriverHandler), ('/home', HomeHandler), ('/rideinfo', RideInfoHandler), ('/deleteride', DeleteRideHandler), ('/editride', EditRideHandler), ('/applyedits', ChangeRideHandler), ('/removepassenger', RemovePassengerHandler), ('/.*', IncorrectHandler), ], debug=True) wsgiref.handlers.CGIHandler().run(application)
|
if k != 'ToD' and k != 'driver':
|
logging.debug("key = %s" % k) if k != 'ToD' and k != 'driver' and k != 'passengers':
|
def to_dict(self): res = {} for k in Ride._properties: ## special case ToD if k != 'ToD' and k != 'driver': res[k] = getattr(self,k) #eval('self.'+k) res['ToD'] = str(self.ToD) res['driver'] = self.driver.email() return res
|
'mapkey':MAP_APIKEY
|
'mapkey':MAP_APIKEY,
|
def get(self): query = db.Query(Ride) query.filter("ToD > ", datetime.datetime.now()) ride_list = query.fetch(limit=100) user = users.get_current_user() greeting = '' logout = '' if user: greeting = ("Welcome, %s! (<a href=\"%s\">sign out</a>) Go to your <a href='/home'>Home Page</a>" % (user.nickname(), users.create_logout_url("/"))) logout = users.create_logout_url("/") self.response.out.write(template.render('index.html', { 'ride_list': ride_list, 'greeting' : greeting, 'nick' : user.nickname(), 'logout':logout, 'mapkey':MAP_APIKEY }))
|
'message': message
|
'message': message, 'mapkey' : MAP_APIKEY,
|
def post(self): """ Called when a new ride needs to be added to the database. Probably with all of this data it should be done as a form post. Arguments: - `self`: Web Arguments: - max_passengers - num_passengers - driver - start_point_title - start_point_lat - start_point_long - destination_title - destination_lat - destination_long - part_of_day - ToD - contact """
|
'message': message
|
'message': message, 'mapkey':MAP_APIKEY,
|
def post(self): """ Called when adding a passenger to a ride Arguments: - 'self' Web Arguments: - user_name - ride_key """ # The current user can add himself to the ride. No need for this in the form. user_name = users.get_current_user() ride_key = self.request.get('ride_key') contact = self.request.get('contact') address = self.request.get('address') lat = float(self.request.get('lat')) lng = float(self.request.get('lng')) ride = db.get(db.Key(ride_key)) if ride == None: # Check if the ride was found temp = os.path.join(os.path.dirname(__file__), 'templates/error.html') outstr = template.render(temp, {'error_message': 'Error in ride matching'}) self.response.out.write(outstr) # Check if the current user is the driver elif ride.max_passengers == ride.num_passengers: doRender(self, 'error.html', {'error_message': 'This ride is full'}) # Check if the current user is already on the ride already = False for p in ride.passengers: if db.get(p).name == user_name: already = True if already: temp = os.path.join(os.path.dirname(__file__), 'templates/error.html') outstr = template.render(temp, {'error_message': 'You are already registered for this ride!'}) self.response.out.write(outstr) # Check if the current user is already the driver for the ride elif user_name == ride.driver: doRender(self, 'error.html', {'error_message': 'You can\'t be a passenger for a ride which you a driving.'}) else: passenger = Passenger() passenger.name = user_name passenger.contact = contact passenger.location = address passenger.lat = lat passenger.lng = lng passenger.ride = db.Key(ride_key) pass_key = passenger.put() ride.num_passengers = ride.num_passengers + 1 ride.passengers.append(pass_key) ride.put()
|
'message' : message
|
'message' : message, 'mapkey':MAP_APIKEY,
|
def get(self): key = self.request.get('key') ride = db.get(key) if ride == None: doRender(self, 'error.html', { 'error_message': "No such ride exists."}) else: db.delete(ride) query = db.Query(Ride) query.filter("ToD > ", datetime.datetime.now()) ride_list = query.fetch(limit=100) user = users.get_current_user() greeting = '' if user: greeting = ("Welcome, %s! (<a href=\"%s\">sign out</a>) Go to your <a href='/home'>Home Page</a>" % (user.nickname(), users.create_logout_url("/"))) message = 'Your ride has been deleted.' self.response.out.write(template.render('index.html', { 'ride_list': ride_list, 'greeting' : greeting, 'message' : message }))
|
'message' : message
|
'message' : message, 'mapkey' : MAP_APIKEY,
|
def get(self): rkey = self.request.get('rkey') ride = db.get(rkey) pkey = self.request.get('pkey') passenger = db.get(pkey) #self.response.out.write('Would remove %s from %s ride' % (passenger.name, ride.driver)) if ride == None: doRender(self, 'error.html', { 'error_message': "No such ride exists."}) elif passenger == None: doRender(self, 'error.html', { 'error_message': "No such passenger exists."}) else: name = passenger.name ride.passengers.remove(passenger.key()) passenger.delete() ride.num_passengers -= 1 ride.put() query = db.Query(Ride) query.filter("ToD > ", datetime.datetime.now()) ride_list = query.fetch(limit=100) user = users.get_current_user() greeting = '' if user: greeting = ("Welcome, %s! (<a href=\"%s\">sign out</a>) Go to your <a href='/home'>Home Page</a>" % (user.nickname(), users.create_logout_url("/"))) message = '%s has been removed from %s\'s ride.' % (name, ride.driver) self.response.out.write(template.render('index.html', { 'ride_list': ride_list, 'greeting' : greeting, 'message' : message }))
|
mainpage = XML.ElementFromURL(VIDEOS_PAGE % 'en',isHTML=True)
|
mainpage = HTML.ElementFromURL(VIDEOS_PAGE % 'en')
|
def VideoMainMenu(): dir = MediaContainer(viewGroup="List") mainpage = XML.ElementFromURL(VIDEOS_PAGE % 'en',isHTML=True) for category in mainpage.xpath('//ul[@id="nav"]/li[not(@class="selected") and not(@class="lastItem")]/a'): dir.Append(Function(DirectoryItem(CategoryParsing,category.text,thumb=R(ICON),art=R(ART)),path = category.get('href'))) return dir
|
pagetoscrape = XML.ElementFromURL(BASE_ADDRESS + path,isHTML=True)
|
pagetoscrape = HTML.ElementFromURL(BASE_ADDRESS + path)
|
def CategoryParsing(sender,path): dir = MediaContainer(viewGroup="List") pagetoscrape = XML.ElementFromURL(BASE_ADDRESS + path,isHTML=True) for category in pagetoscrape.xpath("//div[@id='listChannel']/ul//a"): dir.Append(Function(DirectoryItem(SubCategoryParsing,category.text,thumb=R(ICON),art=R(ART)),path = category.get('href'))) return dir
|
sys.stdout.write('\r filling records: %5s%% done ' % (int(100.0*i/len (image_directories))))
|
sys.stdout.write('\r filling records: %5s%% done (%.2f Mbytes/s)' % (int(100.0*i/len (image_directories)), (image_data_offset-first_image_data_offset)/(time.time ()-start_time)/1024**2))
|
def write_file(self, filename, compression='none', strip_size = 2**13): """ Write image data to TIFF file.
|
sys.stdout.write ('\r'+40*' ')
|
sys.stdout.write ('\r'+70*' ')
|
def write_file(self, filename, compression='none', strip_size = 2**13): """ Write image data to TIFF file.
|
for j in range(samples_per_pixel): bytes = bits_per_sample[j] // 8 * width * length
|
if isinstance(bits_per_sample, numpy.ndarray): for j in range(samples_per_pixel): bytes = bits_per_sample[j] // 8 * width * length samples.append(arr[:,k:k+bytes].reshape((depth, width, length))) k += bytes else: assert samples_per_pixel==1,`samples_per_pixel, bits_per_sample` bytes = bits_per_sample // 8 * width * length
|
def get_samples(self, subfile_type=0, verbose=False): """ Return samples and sample names.
|
k += bytes
|
def get_samples(self, subfile_type=0, verbose=False): """ Return samples and sample names.
|
|
def write_image(self, arr, compression=None):
|
def write_image(self, arr, compression=None, write_rgb=False):
|
def write_image(self, arr, compression=None): """ Write array as TIFF image.
|
for n in range(depth):
|
if depth == 3 and write_rgb:
|
def write_image(self, arr, compression=None): """ Write array as TIFF image.
|
self.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK) self.SetField(TIFFTAG_ORIENTATION, ORIENTATION_RIGHTTOP) self.SetField(TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG)
|
self.SetField(TIFFTAG_SAMPLESPERPIXEL, 3) self.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB) self.SetField(TIFFTAG_PLANARCONFIG, PLANARCONFIG_SEPARATE)
|
def write_image(self, arr, compression=None): """ Write array as TIFF image.
|
WriteStrip(0, arr[n].ctypes.data, size) self.WriteDirectory()
|
for n in range(depth): WriteStrip(n, arr[n, :, :].ctypes.data, size) else: for n in range(depth): self.SetField(TIFFTAG_IMAGEWIDTH, width) self.SetField(TIFFTAG_IMAGELENGTH, height) self.SetField(TIFFTAG_BITSPERSAMPLE, bits) self.SetField(TIFFTAG_COMPRESSION, COMPRESSION) self.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK) self.SetField(TIFFTAG_ORIENTATION, ORIENTATION_RIGHTTOP) self.SetField(TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG) if sample_format is not None: self.SetField(TIFFTAG_SAMPLEFORMAT, sample_format) WriteStrip(0, arr[n].ctypes.data, size) self.WriteDirectory()
|
def write_image(self, arr, compression=None): """ Write array as TIFF image.
|
byteorder = self.get_uint16(first_byte)
|
byteorder = self.data[first_byte:first_byte+2].view(dtype=numpy.uint16)
|
def __init__(self, filename, mode='r', first_byte = 0): if mode!='r': raise NotImplementedError(`mode`) self.filename = filename self.first_byte = first_byte self.data = numpy.memmap(filename, dtype=numpy.ubyte, mode=mode)
|
return self.data[offset:offset+2].view(dtype=numpy.uint16)[0]
|
return self.data[offset:offset+2].view(dtype=self.dtypes.uint16)[0]
|
def get_uint16(self, offset): return self.data[offset:offset+2].view(dtype=numpy.uint16)[0]
|
return self.data[offset:offset+4].view(dtype=numpy.uint32)[0]
|
return self.data[offset:offset+4].view(dtype=self.dtypes.uint32)[0]
|
def get_uint32(self, offset): return self.data[offset:offset+4].view(dtype=numpy.uint32)[0]
|
return self.data[offset:offset+2].view(dtype=numpy.int16)[0]
|
return self.data[offset:offset+2].view(dtype=self.dtypes.int16)[0]
|
def get_int16(self, offset): return self.data[offset:offset+2].view(dtype=numpy.int16)[0]
|
return self.data[offset:offset+4].view(dtype=numpy.int32)[0]
|
return self.data[offset:offset+4].view(dtype=self.dtypes.int32)[0]
|
def get_int32(self, offset): return self.data[offset:offset+4].view(dtype=numpy.int32)[0]
|
return self.data[offset:offset+4].view(dtype=numpy.float32)[0]
|
return self.data[offset:offset+4].view(dtype=self.dtypes.float32)[0]
|
def get_float32(self, offset): return self.data[offset:offset+4].view(dtype=numpy.float32)[0]
|
return self.data[offset:offset+8].view(dtype=numpy.float64)[0]
|
return self.data[offset:offset+8].view(dtype=self.dtypes.float64)[0]
|
def get_float64(self, offset): return self.data[offset:offset+8].view(dtype=numpy.float64)[0]
|
dtype = type2dtype.get(typ)
|
dtype = self.dtypes.type2dt.get(typ)
|
def get_values(self, offset, typ, count): if isinstance(typ, numpy.dtype): dtype = typ bytes = typ.itemsize elif isinstance(typ, type) and issubclass(typ, numpy.generic): dtype = typ bytes = typ().itemsize else: if isinstance(typ, str): typ = name2type.get(typ) dtype = type2dtype.get(typ) bytes = type2bytes.get(typ) if dtype is None or bytes is None: sys.stderr.write('get_values: incomplete info for type=%r: dtype=%s, bytes=%s' % (typ, dtype, bytes)) return return self.data[offset:offset+bytes*count].view(dtype=dtype)
|
dtype = getattr (numpy, 'uint%s' % (bits_per_sample[i])) else: dtype = getattr (numpy, 'uint%s' % (bits_per_sample))
|
dtype = getattr (self.dtypes, 'uint%s' % (bits_per_sample[i])) else: dtype = getattr (self.dtypes, 'uint%s' % (bits_per_sample))
|
def get_contiguous(self): """ Return memmap of a stack of images. """ if not self.is_contiguous (): raise ValueError('Image stack data not contiguous') ifd0 = self.IFD[0] ifd1 = self.IFD[-1] width = ifd0.get ('ImageWidth').value length = ifd0.get ('ImageLength').value assert width == ifd1.get ('ImageWidth').value assert length == ifd1.get ('ImageLength').value depth = len(self.IFD) compression = ifd.get('Compression').value if compression!=1: raise ValueError('Unable to get contiguous image stack from compressed data') bits_per_sample = ifd0.get('BitsPerSample').value photo_interp = ifd0.get('PhotometricInterpretation').value planar_config = ifd0.get('PlanarConfiguration').value strip_offsets0 = ifd0.get('StripOffsets').value strip_nbytes0 = ifd0.get('StripByteCounts').value strip_offsets1 = ifd1.get('StripOffsets').value strip_nbytes1 = ifd1.get('StripByteCounts').value samples_per_pixel = ifd1.get('SamplesPerPixel').value assert samples_per_pixel==1,`samples_per_pixel`
|
dtype = getattr (numpy, '%s%s' % (format, bits))
|
dtype = getattr (self.dtypes, '%s%s' % (format, bits))
|
def get_samples(self, subfile_type=0, verbose=False): """ Return samples and sample names.
|
dtype = getattr (numpy, '%s%s' % (format, bits_per_sample))
|
dtype = getattr (self.dtypes, '%s%s' % (format, bits_per_sample))
|
def get_samples(self, subfile_type=0, verbose=False): """ Return samples and sample names.
|
arr = numpy.empty(depth * bytes_per_image, dtype=numpy.uint8)
|
arr = numpy.empty(depth * bytes_per_image, dtype=self.dtypes.uint8)
|
def get_samples(self, subfile_type=0, verbose=False): """ Return samples and sample names.
|
dtype = getattr (numpy, 'uint%s' % (bits_per_sample[i]))
|
dtype = getattr (self.dtypes, 'uint%s' % (bits_per_sample[i]))
|
def get_contiguous(self, channel_name=None): """ Return memmap of an image.
|
dtype = getattr (numpy, 'uint%s' % (bits_per_sample))
|
dtype = getattr (self.dtypes, 'uint%s' % (bits_per_sample))
|
def get_contiguous(self, channel_name=None): """ Return memmap of an image.
|
arr = numpy.empty(bytes_per_image, dtype=numpy.uint8) assert len(l)==strips_per_image,`len(l), strips_per_image`
|
arr = numpy.empty(depth * bytes_per_image, dtype=numpy.uint8) assert len(l)==strips_per_image*depth,`len(l), strips_per_image, depth`
|
def get_samples(self, subfile_type=0, verbose=False): """ Return samples and sample names.
|
QCoreApplication.translate( "MergeShapes", "This version of RasterCalc requires at least QGIS version 1.0.0\nPlugin will not be enabled." ) )
|
QCoreApplication.translate( "MergeShapes", "This version of MergeShapes requires at least QGIS version 1.0.0\nPlugin will not be enabled." ) )
|
def initGui( self ): if int( self.QgisVersion ) < 1: QMessageBox.warning( self.iface.mainWindow(), "MergeShapes", QCoreApplication.translate( "MergeShapes", "Quantum GIS version detected: " ) + unicode( self.QgisVersion ) + ".xx\n" + QCoreApplication.translate( "MergeShapes", "This version of RasterCalc requires at least QGIS version 1.0.0\nPlugin will not be enabled." ) ) return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.