text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def appdatadirectory( ):
"""Attempt to retrieve the current user's app-data directory
This is the location where application-specific
files should be stored. On *nix systems, this will
be the ${HOME}/.config directory. On Win32 systems, it will be
the "Application Data" directory. Note that for
Win32 systems it is normal to create a sub-directory
for storing data in the Application Data directory.
"""
if shell:
# on Win32 and have Win32all extensions, best-case
return shell_getShellFolder(shellcon.CSIDL_APPDATA)
if _winreg:
# on Win32, but no Win32 shell com available, this uses
# a direct registry access, likely to fail on Win98/Me
return _winreg_getShellFolder( 'AppData' )
# okay, what if for some reason _winreg is missing? would we want to allow ctypes?
## default case, look for name in environ...
for name in ['APPDATA', 'HOME']:
if name in os.environ:
return os.path.join( os.environ[name], '.config' )
# well, someone's being naughty, see if we can get ~ to expand to a directory...
possible = os.path.abspath(os.path.expanduser( '~/.config' ))
if os.path.exists( possible ):
return possible
raise OSError( """Unable to determine user's application-data directory, no ${HOME} or ${APPDATA} in environment""" ) | 0.012436 |
def parse_imethodcall(self, tup_tree):
"""
::
<!ELEMENT IMETHODCALL (LOCALNAMESPACEPATH, IPARAMVALUE*)>
<!ATTLIST IMETHODCALL
%CIMName;>
"""
self.check_node(tup_tree, 'IMETHODCALL', ('NAME',))
k = kids(tup_tree)
if not k:
raise CIMXMLParseError(
_format("Element {0!A} missing child elements "
"(expecting child elements "
"(LOCALNAMESPACEPATH, IPARAMVALUE*))", name(tup_tree)),
conn_id=self.conn_id)
namespace = self.parse_localnamespacepath(k[0])
params = [self.parse_iparamvalue(x) for x in k[1:]]
return (name(tup_tree), attrs(tup_tree), namespace, params) | 0.002591 |
def connections_to_object(self, to_obj):
"""
Returns a ``Connection`` query set matching all connections with
the given object as a destination.
"""
self._validate_ctypes(None, to_obj)
return self.connections.filter(to_pk=to_obj.pk) | 0.007143 |
def db_create():
"""Create the database"""
try:
migrate_api.version_control(url=db_url, repository=db_repo)
db_upgrade()
except DatabaseAlreadyControlledError:
print 'ERROR: Database is already version controlled.' | 0.004 |
def cache(self, mode="r", encoding=Constants.default_codec, errors=Constants.codec_error):
"""
Reads given file content and stores it in the content cache.
:param mode: File read mode.
:type mode: unicode
:param encoding: File encoding codec.
:type encoding: unicode
:param errors: File encoding errors handling.
:type errors: unicode
:return: Method success.
:rtype: bool
"""
self.uncache()
if foundations.strings.is_website(self.__path):
try:
LOGGER.debug("> Caching '{0}' online file content.".format(self.__path))
self.__content = urllib2.urlopen(self.__path).readlines()
return True
except urllib2.URLError as error:
raise foundations.exceptions.UrlReadError(
"!> {0} | '{1}' url is not readable: '{2}'.".format(self.__class__.__name__, self.__path, error))
elif foundations.common.path_exists(self.__path):
if not is_readable(self.__path):
raise foundations.exceptions.FileReadError(
"!> {0} | '{1}' file is not readable!".format(self.__class__.__name__, self.__path))
with codecs.open(self.__path, mode, encoding, errors) as file:
LOGGER.debug("> Caching '{0}' file content.".format(self.__path))
self.__content = file.readlines()
return True
return False | 0.00466 |
def with_edges(structure, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(from_index, to_index, from_image, to_image): props},
where props is a dictionary of properties, including weight.
Props should be None if no additional properties are to be
specified.
:return: sg, a StructureGraph
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
from_image = edge[2]
to_image = edge[3]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index,"
" from_image, to_image) tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = sg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError("Edges cannot be added if nodes are not"
" present in the graph. Please check your"
" indices.")
sg.add_edge(from_index, to_index, from_jimage=from_image,
to_jimage=to_image, weight=weight,
edge_properties=props)
sg.set_node_attributes()
return sg | 0.001949 |
def calculate_energy(self, energies):
"""
Calculates the energy of the reaction.
Args:
energies ({Composition: float}): Energy for each composition.
E.g ., {comp1: energy1, comp2: energy2}.
Returns:
reaction energy as a float.
"""
return sum([amt * energies[c] for amt, c in zip(self._coeffs,
self._all_comp)]) | 0.004386 |
def reset_backend(self, warn_user=True, reset_interps=True):
"""
Resets GUI data and updates GUI displays such as plots, boxes, and
logger
Parameters
----------
warn_user : bool which decides if a warning dialog is displayed to
the user to ask about reseting data
reset_interps : bool which decides if interpretations are read in
for pmag tables or left alone
"""
if warn_user and not self.data_loss_warning():
return False
# reset backend, including get_data(), get_data_info()
self.quiet_reset_backend(reset_interps=reset_interps)
# reset specimens box
self.specimens_box.SetItems(self.specimens)
self.specimens_box.SetStringSelection(str(self.s))
# reset site level means box
self.level_names.Clear()
self.level_names.AppendItems(self.sites)
if self.sites:
self.level_names.SetSelection(0)
# reset coordinate system
self.COORDINATE_SYSTEM, self.coordinate_list = self.get_coordinate_system()
self.coordinates_box.Clear()
self.coordinates_box.AppendItems(self.coordinate_list)
self.coordinates_box.SetStringSelection(self.COORDINATE_SYSTEM)
# get cart rot
self.initialize_CART_rot(str(self.s))
# draw everything
if self.Data:
if not self.current_fit:
self.draw_figure(self.s)
self.update_selection()
else:
self.Add_text()
self.update_fit_boxes()
if self.ie_open:
self.ie.update_editor() | 0.001799 |
def get_attr_value(self, name):
'''Retrieve the ``value`` for the attribute ``name``. The ``name``
can be nested following the :ref:`double underscore <tutorial-underscore>`
notation, for example ``group__name``. If the attribute is not available it
raises :class:`AttributeError`.'''
if name in self._meta.dfields:
return self._meta.dfields[name].get_value(self)
elif not name.startswith('__') and JSPLITTER in name:
bits = name.split(JSPLITTER)
fname = bits[0]
if fname in self._meta.dfields:
return self._meta.dfields[fname].get_value(self, *bits[1:])
else:
return getattr(self, name)
else:
return getattr(self, name) | 0.002646 |
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, '__array__'):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None | 0.001131 |
def filepath(self) -> str:
"""The NetCDF file path."""
return os.path.join(self._dirpath, self.name + '.nc') | 0.016129 |
def _utf_strip_bom(self, encoding):
"""Return an encoding that will ignore the BOM."""
if encoding is None:
pass
elif encoding.lower() == 'utf-8':
encoding = 'utf-8-sig'
elif encoding.lower().startswith('utf-16'):
encoding = 'utf-16'
elif encoding.lower().startswith('utf-32'):
encoding = 'utf-32'
return encoding | 0.004878 |
def remove(text, exclude):
"""Remove ``exclude`` symbols from ``text``.
Example:
>>> remove("example text", string.whitespace)
'exampletext'
Args:
text (str): The text to modify
exclude (iterable): The symbols to exclude
Returns:
``text`` with ``exclude`` symbols removed
"""
exclude = ''.join(str(symbol) for symbol in exclude)
return text.translate(str.maketrans('', '', exclude)) | 0.002208 |
def _encrypt_password(self, password):
"""encrypt the password for given mode """
if self.encryption_mode.lower() == 'crypt':
return self._crypt_password(password)
elif self.encryption_mode.lower() == 'md5':
return self._md5_password(password)
elif self.encryption_mode.lower() == 'md5-base':
return self._md5_base_password(password)
else:
raise UnknownEncryptionMode(self.encryption_mode) | 0.004193 |
def build(level, code, validity=None):
'''Serialize a GeoID from its parts'''
spatial = ':'.join((level, code))
if not validity:
return spatial
elif isinstance(validity, basestring):
return '@'.join((spatial, validity))
elif isinstance(validity, datetime):
return '@'.join((spatial, validity.date().isoformat()))
elif isinstance(validity, date):
return '@'.join((spatial, validity.isoformat()))
else:
msg = 'Unknown GeoID validity type: {0}'
raise GeoIDError(msg.format(type(validity).__name__)) | 0.001751 |
def episode_info(self):
"""
Search for the episode with the requested experience Id
:return:
"""
if self.show_info:
for season in self.show_info["seasons"]:
for episode in season["episodes"]:
for lang in episode["languages"].values():
for alpha in lang["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return episode | 0.003953 |
def get_neutron_endpoint(cls, json_resp):
"""
Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service
Sends a CRITICAL service check when none are found registered in the Catalog
"""
catalog = json_resp.get('token', {}).get('catalog', [])
match = 'neutron'
neutron_endpoint = None
for entry in catalog:
if entry['name'] == match or 'Networking' in entry['name']:
valid_endpoints = {}
for ep in entry['endpoints']:
interface = ep.get('interface', '')
if interface in ['public', 'internal']:
valid_endpoints[interface] = ep['url']
if valid_endpoints:
# Favor public endpoints over internal
neutron_endpoint = valid_endpoints.get("public", valid_endpoints.get("internal"))
break
else:
raise MissingNeutronEndpoint()
return neutron_endpoint | 0.004721 |
def release():
"""Bump version, tag, build, gen docs."""
if check_staged():
raise EnvironmentError('There are staged changes, abort.')
if check_unstaged():
raise EnvironmentError('There are unstaged changes, abort.')
bump()
tag()
build()
doc_gen()
puts(colored.yellow("Remember to upload documentation and package:"))
with indent(2):
puts(colored.cyan("shovel doc.upload"))
puts(colored.cyan("shovel version.upload")) | 0.002062 |
def __getBucketVersioning(self, bucket):
"""
For newly created buckets get_versioning_status returns an empty dict. In the past we've
seen None in this case. We map both to a return value of False.
Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can
be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False
respectively. Note that we've never seen a versioning status of 'Disabled', only the
empty dictionary. Calling configure_versioning with False on a bucket will cause
get_versioning_status to then return 'Suspended' even on a new bucket that never had
versioning enabled.
"""
for attempt in retry_s3():
with attempt:
status = bucket.get_versioning_status()
return self.versionings[status['Versioning']] if status else False | 0.009783 |
def GetFirstWrittenEventSource(self):
"""Retrieves the first event source that was written after open.
Using GetFirstWrittenEventSource and GetNextWrittenEventSource newly
added event sources can be retrieved in order of addition.
Returns:
EventSource: event source or None if there are no newly written ones.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
"""
if not self._storage_file:
raise IOError('Unable to read from closed storage writer.')
event_source = self._storage_file.GetEventSourceByIndex(
self._first_written_event_source_index)
if event_source:
self._written_event_source_index = (
self._first_written_event_source_index + 1)
return event_source | 0.003731 |
def get_hyperedge_weight_matrix(H, hyperedge_ids_to_indices):
"""Creates the diagonal matrix W of hyperedge weights as a sparse matrix.
:param H: the hypergraph to find the weights.
:param hyperedge_weights: the mapping from the indices of hyperedge IDs to
the corresponding hyperedge weights.
:returns: sparse.csc_matrix -- the diagonal edge weight matrix as a
sparse matrix.
"""
# Combined 2 methods into 1; this could be written better
hyperedge_weights = {}
for hyperedge_id in H.hyperedge_id_iterator():
hyperedge_weights.update({hyperedge_ids_to_indices[hyperedge_id]:
H.get_hyperedge_weight(hyperedge_id)})
hyperedge_weight_vector = []
for i in range(len(hyperedge_weights.keys())):
hyperedge_weight_vector.append(hyperedge_weights.get(i))
return sparse.diags([hyperedge_weight_vector], [0]) | 0.00107 |
def add(self, phrase, id=None):
"""
Adds a new phrase to the dictionary
:param phrase: the new phrase as a list of tokens
:param phrase_id: optionally the phrase_id can be set on addition. Beware, if you set one id you should set
them all as the auto-generated ids do not take into account phrase_ids set this way.
:return: None
"""
if len(phrase) > 0 and type(phrase[0]) in (tuple, list):
phrase = [token[0] for token in phrase]
super(NounPhraseDictionary, self).add(phrase, id) | 0.007042 |
def add_router_to_hosting_device(self, client, hosting_device_id, body):
"""Adds a router to hosting device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.post((res_path + DEVICE_L3_ROUTERS) %
hosting_device_id, body=body) | 0.006734 |
def _get_samples(self, subset=None):
"""
Helper function to get sample names from subset.
Parameters
----------
subset : str
Subset name. If None, returns all samples.
Returns
-------
List of sample names
"""
if subset is None:
samples = self.subsets['All_Samples']
else:
try:
samples = self.subsets[subset]
except KeyError:
raise KeyError(("Subset '{:s}' does not ".format(subset) +
"exist.\nUse 'make_subset' to create a" +
"subset."))
return samples | 0.002882 |
def trace_job(self, jobId):
""" Get information about the specified remote job
:param jobId: the job identifier
:return: a dictionary with the information
"""
header = self.__check_authentication()
status_url = self.address + "/jobs/" + jobId + "/trace"
status_resp = requests.get(status_url, headers=header)
if status_resp.status_code != 200:
raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error")))
return status_resp.json() | 0.005445 |
def start(self, host='127.0.0.1', port=None, debug=False, **kwargs):
"""
Start the built in webserver, bound to the host and port you'd like.
Default host is `127.0.0.1` and port 8080.
:param host: The host you want to bind the build in webserver to
:param port: The port number you want the webserver to run on
:param debug: Set to `True` to enable debug level logging
:param kwargs: Additional arguments you'd like to pass to Flask
"""
self.server.run(host=host, port=port, debug=debug, **kwargs) | 0.003503 |
def remove(h, i):
"""Remove the item at position i of the heap."""
n = h.size() - 1
if n != i:
h.swap(i, n)
down(h, i, n)
up(h, i)
return h.pop() | 0.005376 |
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values()) | 0.007653 |
def getControllerState(self, unControllerDeviceIndex, unControllerStateSize=sizeof(VRControllerState_t)):
"""
Fills the supplied struct with the current state of the controller. Returns false if the controller index
is invalid. This function is deprecated in favor of the new IVRInput system.
"""
fn = self.function_table.getControllerState
pControllerState = VRControllerState_t()
result = fn(unControllerDeviceIndex, byref(pControllerState), unControllerStateSize)
return result, pControllerState | 0.010657 |
def current_timestamp(self) -> datetime:
"""Get the current state timestamp."""
timestamp = DB.get_hash_value(self._key, 'current_timestamp')
return datetime_from_isoformat(timestamp) | 0.009662 |
def join_accesses(unique_id, accesses, from_date, until_date, dayly_granularity):
"""
Esse metodo recebe 1 ou mais chaves para um documento em específico para que
os acessos sejam recuperados no Ratchet e consolidados em um unico id.
Esse processo é necessário pois os acessos de um documento podem ser registrados
para os seguintes ID's (PID, PID FBPE, Path PDF).
PID: Id original do SciELO ex: S0102-67202009000300001
PID FBPE: Id antigo do SciELO ex: S0102-6720(09)000300001
Path PDF: Quando o acesso é feito diretamente para o arquivo PDF no FS do
servidor ex: /pdf/rsp/v12n10/v12n10.pdf
"""
logger.debug('joining accesses for: %s' % unique_id)
joined_data = {}
listed_data = []
def joining_monthly(joined_data, atype, data):
if 'total' in data:
del(data['total'])
for year, months in data.items():
del(months['total'])
for month in months:
dt = '%s-%s' % (year[1:], month[1:])
if not dt >= from_date[:7] or not dt <= until_date[:7]:
continue
joined_data.setdefault(dt, {})
joined_data[dt].setdefault(atype, 0)
joined_data[dt][atype] += data[year][month]['total']
return joined_data
def joining_dayly(joined_data, atype, data):
if 'total' in data:
del(data['total'])
for year, months in data.items():
del(months['total'])
for month, days in months.items():
del(days['total'])
for day in days:
dt = '%s-%s-%s' % (year[1:], month[1:], day[1:])
if not dt >= from_date or not dt <= until_date:
continue
joined_data.setdefault(dt, {})
joined_data[dt].setdefault(atype, 0)
joined_data[dt][atype] += data[year][month][day]
return joined_data
joining = joining_monthly
if dayly_granularity:
joining = joining_dayly
for data in accesses:
for key, value in data.items():
if not key in ['abstract', 'html', 'pdf', 'readcube']:
continue
joined_data = joining(joined_data, key, value)
return joined_data | 0.002594 |
def _get_col_dimstr(tdim, is_string=False):
"""
not for variable length
"""
dimstr = ''
if tdim is None:
dimstr = 'array[bad TDIM]'
else:
if is_string:
if len(tdim) > 1:
dimstr = [str(d) for d in tdim[1:]]
else:
if len(tdim) > 1 or tdim[0] > 1:
dimstr = [str(d) for d in tdim]
if dimstr != '':
dimstr = ','.join(dimstr)
dimstr = 'array[%s]' % dimstr
return dimstr | 0.001984 |
def dsc_comment(self, comment):
""" Emit a comment into the PostScript output for the given surface.
The comment is expected to conform to
the PostScript Language Document Structuring Conventions (DSC).
Please see that manual for details on the available comments
and their meanings.
In particular, the ``%%IncludeFeature`` comment allows
a device-independent means of controlling printer device features.
So the PostScript Printer Description Files Specification
will also be a useful reference.
The comment string must begin with a percent character (%)
and the total length of the string
(including any initial percent characters)
must not exceed 255 bytes.
Violating either of these conditions will
place surface into an error state.
But beyond these two conditions,
this method will not enforce conformance of the comment
with any particular specification.
The comment string should not have a trailing newline.
The DSC specifies different sections
in which particular comments can appear.
This method provides for comments to be emitted
within three sections:
the header, the Setup section, and the PageSetup section.
Comments appearing in the first two sections
apply to the entire document
while comments in the BeginPageSetup section
apply only to a single page.
For comments to appear in the header section,
this method should be called after the surface is created,
but before a call to :meth:`dsc_begin_setup`.
For comments to appear in the Setup section,
this method should be called after a call to :meth:`dsc_begin_setup`
but before a call to :meth:`dsc_begin_page_setup`.
For comments to appear in the PageSetup section,
this method should be called after a call to
:meth:`dsc_begin_page_setup`.
Note that it is only necessary to call :meth:`dsc_begin_page_setup`
for the first page of any surface.
After a call to :meth:`~Surface.show_page`
or :meth:`~Surface.copy_page`
comments are unambiguously directed
to the PageSetup section of the current page.
But it doesn't hurt to call this method
at the beginning of every page
as that consistency may make the calling code simpler.
As a final note,
cairo automatically generates several comments on its own.
As such, applications must not manually generate
any of the following comments:
Header section: ``%!PS-Adobe-3.0``, ``%%Creator``, ``%%CreationDate``,
``%%Pages``, ``%%BoundingBox``, ``%%DocumentData``,
``%%LanguageLevel``, ``%%EndComments``.
Setup section: ``%%BeginSetup``, ``%%EndSetup``.
PageSetup section: ``%%BeginPageSetup``, ``%%PageBoundingBox``,
``%%EndPageSetup``.
Other sections: ``%%BeginProlog``, ``%%EndProlog``, ``%%Page``,
``%%Trailer``, ``%%EOF``.
"""
cairo.cairo_ps_surface_dsc_comment(
self._pointer, _encode_string(comment))
self._check_status() | 0.000615 |
def split_extension(file_name, special=['tar.bz2', 'tar.gz']):
"""
Find the file extension of a file name, including support for
special case multipart file extensions (like .tar.gz)
Parameters
----------
file_name: str, file name
special: list of str, multipart extensions
eg: ['tar.bz2', 'tar.gz']
Returns
----------
extension: str, last characters after a period, or
a value from 'special'
"""
file_name = str(file_name)
if file_name.endswith(tuple(special)):
for end in special:
if file_name.endswith(end):
return end
return file_name.split('.')[-1] | 0.001471 |
def generic_annotate(qs_model, generic_qs_model, aggregator, gfk_field=None, alias='score'):
"""
Find blog entries with the most comments:
qs = generic_annotate(Entry.objects.public(), Comment.objects.public(), Count('comments__id'))
for entry in qs:
print entry.title, entry.score
Find the highest rated foods:
generic_annotate(Food, Rating, Avg('ratings__rating'), alias='avg')
for food in qs:
print food.name, '- average rating:', food.avg
.. note::
In both of the above examples it is assumed that a GenericRelation exists
on Entry to Comment (named "comments") and also on Food to Rating (named "ratings").
If a GenericRelation does *not* exist, the query will still return correct
results but the code path will be different as it will use the fallback method.
.. warning::
If the underlying column type differs between the qs_model's primary
key and the generic_qs_model's foreign key column, it will use the fallback
method, which can correctly CASTself.
:param qs_model: A model or a queryset of objects you want to perform
annotation on, e.g. blog entries
:param generic_qs_model: A model or queryset containing a GFK, e.g. comments
:param aggregator: an aggregation, from django.db.models, e.g. Count('id') or Avg('rating')
:param gfk_field: explicitly specify the field w/the gfk
:param alias: attribute name to use for annotation
"""
return fallback_generic_annotate(qs_model, generic_qs_model, aggregator, gfk_field, alias) | 0.010398 |
def get_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version[4] > 0: # 0.2.1-alpha.1
return "%s.%s.%s-%s.%s" % (version[0], version[1], version[2], version[3], version[4])
elif version[3] != '': # 0.2.1-alpha
return "%s.%s.%s-%s" % (version[0], version[1], version[2], version[3])
elif version[2] > 0: # 0.2.1
return "%s.%s.%s" % (version[0], version[1], version[2])
else: # 0.2
return "%s.%s" % (version[0], version[1]) | 0.00335 |
def _javascript_helper(self, position):
""" Add javascript links for the current page and for the plugins """
if position not in ["header", "footer"]:
position = "footer"
# Load javascript files from plugins
if position == "header":
entries = [entry for entry in self._plugin_manager.call_hook("javascript_header") if entry is not None]
else:
entries = [entry for entry in self._plugin_manager.call_hook("javascript_footer") if entry is not None]
# Load javascript for the current page
entries += self._get_ctx()["javascript"][position]
entries = ["<script src='" + entry + "' type='text/javascript' charset='utf-8'></script>" for entry in entries]
return "\n".join(entries) | 0.006386 |
def card_transfer(provider: Provider, card: CardTransfer, inputs: dict,
change_address: str, locktime: int=0) -> Transaction:
'''Prepare the CardTransfer Transaction object
: card - CardTransfer object
: inputs - utxos (has to be owned by deck issuer)
: change_address - address to send the change to
: locktime - tx locked until block n=int
'''
network_params = net_query(provider.network)
pa_params = param_query(provider.network)
if card.deck_p2th is None:
raise Exception("card.deck_p2th required for tx_output")
outs = [
tx_output(network=provider.network,
value=pa_params.P2TH_fee,
n=0, script=p2pkh_script(address=card.deck_p2th,
network=provider.network)), # deck p2th
tx_output(network=provider.network,
value=Decimal(0), n=1,
script=nulldata_script(card.metainfo_to_protobuf)) # op_return
]
for addr, index in zip(card.receiver, range(len(card.receiver))):
outs.append( # TxOut for each receiver, index + 2 because we have two outs already
tx_output(network=provider.network, value=Decimal(0), n=index+2,
script=p2pkh_script(address=addr,
network=provider.network))
)
# first round of txn making is done by presuming minimal fee
change_sum = Decimal(inputs['total'] - network_params.min_tx_fee - pa_params.P2TH_fee)
outs.append(
tx_output(network=provider.network,
value=change_sum, n=len(outs)+1,
script=p2pkh_script(address=change_address,
network=provider.network))
)
unsigned_tx = make_raw_transaction(network=provider.network,
inputs=inputs['utxos'],
outputs=outs,
locktime=Locktime(locktime)
)
return unsigned_tx | 0.003302 |
def normal_cloud_im(self, ksize=3):
"""Generate a NormalCloudImage from the PointCloudImage using Sobel filtering.
Parameters
----------
ksize : int
Size of the kernel to use for derivative computation
Returns
-------
:obj:`NormalCloudImage`
The corresponding NormalCloudImage.
"""
# compute direction via cross product of derivatives
gy = cv2.Sobel(self.data, cv2.CV_64F, 1, 0, ksize=ksize)
gx = cv2.Sobel(self.data, cv2.CV_64F, 0, 1, ksize=ksize)
gx_data = gx.reshape(self.height * self.width, 3)
gy_data = gy.reshape(self.height * self.width, 3)
pc_grads = np.cross(gx_data, gy_data) # default to point toward camera
# normalize
pc_grad_norms = np.linalg.norm(pc_grads, axis=1)
pc_grads[pc_grad_norms > 0] = pc_grads[pc_grad_norms > 0] / np.tile(pc_grad_norms[pc_grad_norms > 0, np.newaxis], [1, 3])
pc_grads[pc_grad_norms == 0.0] = np.array([0,0,-1.0]) # zero norm means pointing toward camera
# reshape
normal_im_data = pc_grads.reshape(self.height, self.width, 3)
# preserve zeros
zero_px = self.zero_pixels()
normal_im_data[zero_px[:,0], zero_px[:,1], :] = np.zeros(3)
return NormalCloudImage(normal_im_data, frame=self.frame) | 0.008065 |
def delete(self, endpoint, params=None, version='1.1', json_encoded=False):
"""Shortcut for delete requests via :class:`request`"""
return self.request(endpoint, 'DELETE', params=params, version=version, json_encoded=json_encoded) | 0.012195 |
def _remove_boundaries(self, interval):
"""
Removes the boundaries of the interval from the boundary table.
"""
begin = interval.begin
end = interval.end
if self.boundary_table[begin] == 1:
del self.boundary_table[begin]
else:
self.boundary_table[begin] -= 1
if self.boundary_table[end] == 1:
del self.boundary_table[end]
else:
self.boundary_table[end] -= 1 | 0.004184 |
def save(self, *args, **kwargs):
"""
Save object in database, updating the datetimes accordingly.
"""
# Now in UTC
now_datetime = timezone.now()
# If we are in a creation, assigns creation_datetime
if not self.id:
self.creation_datetime = now_datetime
# Las update datetime is always updated
self.last_update_datetime = now_datetime
# Current user is creator
# (get current user with django-cuser middleware)
self.creator_user = None
current_user = CuserMiddleware.get_user()
if not current_user is None and not current_user.is_anonymous():
self.creator_user_id = current_user.id
# Parent constructor call
super(FieldTranslation, self).save(*args, **kwargs) | 0.038244 |
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
) | 0.001462 |
def getShareInfo(item):
'''
Get a dictionary of special annotations for a Telepath Proxy.
Args:
item: Item to inspect.
Notes:
This will set the ``_syn_telemeth`` attribute on the item
and the items class, so this data is only computed once.
Returns:
dict: A dictionary of methods requiring special handling by the proxy.
'''
key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}'
info = getattr(item, key, None)
if info is not None:
return info
meths = {}
info = {'meths': meths}
for name in dir(item):
if name.startswith('_'):
continue
attr = getattr(item, name, None)
if not callable(attr):
continue
# We know we can cleanly unwrap these functions
# for asyncgenerator inspection.
wrapped = getattr(attr, '__syn_wrapped__', None)
if wrapped in unwraps:
real = inspect.unwrap(attr)
if inspect.isasyncgenfunction(real):
meths[name] = {'genr': True}
continue
if inspect.isasyncgenfunction(attr):
meths[name] = {'genr': True}
try:
setattr(item, key, info)
except Exception as e: # pragma: no cover
logger.exception(f'Failed to set magic on {item}')
try:
setattr(item.__class__, key, info)
except Exception as e: # pragma: no cover
logger.exception(f'Failed to set magic on {item.__class__}')
return info | 0.0013 |
def delete_vnic_template_for_vlan(self, vlan_id):
"""Deletes VNIC Template for a vlan_id and physnet if it exists."""
with self.session.begin(subtransactions=True):
try:
self.session.query(ucsm_model.VnicTemplate).filter_by(
vlan_id=vlan_id).delete()
except orm.exc.NoResultFound:
return | 0.005277 |
def main():
"""
NAME
zeq.py
DESCRIPTION
plots demagnetization data. The equal area projection has the X direction (usually North in geographic coordinates)
to the top. The red line is the X axis of the Zijderveld diagram. Solid symbols are lower hemisphere.
The solid (open) symbols in the Zijderveld diagram are X,Y (X,Z) pairs. The demagnetization diagram plots the
fractional remanence remaining after each step. The green line is the fraction of the total remaence removed
between each step.
INPUT FORMAT
takes specimen_name treatment intensity declination inclination in space
delimited file
SYNTAX
zeq.py [command line options
OPTIONS
-f FILE for reading from command line
-u [mT,C] specify units of mT OR C, default is unscaled
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-beg [step number] treatment step for beginning of PCA calculation, 0 is default
-end [step number] treatment step for end of PCA calculation, last step is default
-ct [l,p,f] Calculation Type: best-fit line, plane or fisher mean; line is default
"""
files,fmt,plot={},'svg',0
end_pca,beg_pca="",""
calculation_type='DE-BFL'
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
else:
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-u' in sys.argv:
ind=sys.argv.index('-u')
units=sys.argv[ind+1]
if units=="C":SIunits="K"
if units=="mT":SIunits="T"
else:
units="U"
SIunits="U"
if '-sav' in sys.argv:plot=1
if '-ct' in sys.argv:
ind=sys.argv.index('-ct')
ct=sys.argv[ind+1]
if ct=='f':calculation_type='DE-FM'
if ct=='p':calculation_type='DE-BFP'
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-beg' in sys.argv:
ind=sys.argv.index('-beg')
beg_pca=int(sys.argv[ind+1])
if '-end' in sys.argv:
ind=sys.argv.index('-end')
end_pca=int(sys.argv[ind+1])
f=open(file,'r')
data=f.readlines()
#
datablock= [] # set up list for data
s="" # initialize specimen name
angle=0.
for line in data: # read in the data from standard input
rec=line.split() # split each line on space to get records
if angle=="":angle=float(rec[3])
if s=="":s=rec[0]
if units=='mT':datablock.append([float(rec[1])*1e-3,float(rec[3]),float(rec[4]),1e-3*float(rec[2]),'','g']) # treatment, dec, inc, int # convert to T and Am^2 (assume emu)
if units=='C':datablock.append([float(rec[1])+273.,float(rec[3]),float(rec[4]),1e-3*float(rec[2]),'','g']) # treatment, dec, inc, int, convert to K and Am^2, assume emu
if units=='U':datablock.append([float(rec[1]),float(rec[3]),float(rec[4]),float(rec[2]),'','g']) # treatment, dec, inc, int, using unscaled units
# define figure numbers in a dictionary for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
ZED={}
ZED['eqarea'],ZED['zijd'], ZED['demag']=1,2,3
pmagplotlib.plot_init(ZED['eqarea'],5,5) # initialize plots
pmagplotlib.plot_init(ZED['zijd'],5,5)
pmagplotlib.plot_init(ZED['demag'],5,5)
#
#
pmagplotlib.plot_zed(ZED,datablock,angle,s,SIunits) # plot the data
if plot==0:pmagplotlib.draw_figs(ZED)
#
# print out data for this sample to screen
#
recnum=0
for plotrec in datablock:
if units=='mT':print('%i %7.1f %8.3e %7.1f %7.1f ' % (recnum,plotrec[0]*1e3,plotrec[3],plotrec[1],plotrec[2]))
if units=='C':print('%i %7.1f %8.3e %7.1f %7.1f ' % (recnum,plotrec[0]-273.,plotrec[3],plotrec[1],plotrec[2]))
if units=='U':print('%i %7.1f %8.3e %7.1f %7.1f ' % (recnum,plotrec[0],plotrec[3],plotrec[1],plotrec[2]))
recnum += 1
if plot==0:
while 1:
if beg_pca!="" and end_pca!="" and calculation_type!="":
pmagplotlib.plot_zed(ZED,datablock,angle,s,SIunits) # plot the data
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type) # get best-fit direction/great circle
pmagplotlib.plot_dir(ZED,mpars,datablock,angle) # plot the best-fit direction/great circle
print('Specimen, calc_type, N, min, max, MAD, dec, inc')
if units=='mT':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"]*1e3,mpars["measurement_step_max"]*1e3,mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
if units=='C':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"]-273,mpars["measurement_step_max"]-273,mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
if units=='U':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"],mpars["measurement_step_max"],mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
if end_pca=="":end_pca=len(datablock)-1 # initialize end_pca, beg_pca to first and last measurement
if beg_pca=="":beg_pca=0
ans=input(" s[a]ve plot, [b]ounds for pca and calculate, change [h]orizontal projection angle, [q]uit: ")
if ans =='q':
sys.exit()
if ans=='a':
files={}
for key in list(ZED.keys()):
files[key]=s+'_'+key+'.'+fmt
pmagplotlib.save_plots(ZED,files)
if ans=='h':
angle=float(input(" Declination to project onto horizontal axis? "))
pmagplotlib.plot_zed(ZED,datablock,angle,s,SIunits) # plot the data
if ans=='b':
GoOn=0
while GoOn==0: # keep going until reasonable bounds are set
print('Enter index of first point for pca: ','[',beg_pca,']')
answer=input('return to keep default ')
if answer != "":beg_pca=int(answer)
print('Enter index of last point for pca: ','[',end_pca,']')
answer=input('return to keep default ')
if answer != "":
end_pca=int(answer)
if beg_pca >=0 and beg_pca<=len(datablock)-2 and end_pca>0 and end_pca<len(datablock):
GoOn=1
else:
print("Bad entry of indices - try again")
end_pca=len(datablock)-1
beg_pca=0
GoOn=0
while GoOn==0:
ct=input('Enter Calculation Type: best-fit line, plane or fisher mean [l]/p/f : ' )
if ct=="" or ct=="l":
calculation_type="DE-BFL"
GoOn=1 # all good
elif ct=='p':
calculation_type="DE-BFP"
GoOn=1 # all good
elif ct=='f':
calculation_type="DE-FM"
GoOn=1 # all good
else:
print("bad entry of calculation type: try again. ") # keep going
pmagplotlib.plot_zed(ZED,datablock,angle,s,SIunits) # plot the data
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type) # get best-fit direction/great circle
pmagplotlib.plot_dir(ZED,mpars,datablock,angle) # plot the best-fit direction/great circle
print('Specimen, calc_type, N, min, max, MAD, dec, inc')
if units=='mT':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"]*1e3,mpars["measurement_step_max"]*1e3,mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
if units=='C':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"]-273,mpars["measurement_step_max"]-273,mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
if units=='U':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"],mpars["measurement_step_max"],mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
pmagplotlib.draw_figs(ZED)
else:
print(beg_pca,end_pca)
if beg_pca!="" and end_pca!="":
pmagplotlib.plot_zed(ZED,datablock,angle,s,SIunits) # plot the data
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type) # get best-fit direction/great circle
pmagplotlib.plot_dir(ZED,mpars,datablock,angle) # plot the best-fit direction/great circle
print('Specimen, calc_type, N, min, max, MAD, dec, inc')
if units=='mT':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"]*1e3,mpars["measurement_step_max"]*1e3,mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
if units=='C':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"]-273,mpars["measurement_step_max"]-273,mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
if units=='U':print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s,calculation_type,mpars["specimen_n"],mpars["measurement_step_min"],mpars["measurement_step_max"],mpars["specimen_mad"],mpars["specimen_dec"],mpars["specimen_inc"]))
files={}
for key in list(ZED.keys()):
files[key]=s+'_'+key+'.'+fmt
pmagplotlib.save_plots(ZED,files) | 0.03965 |
def get_indicators(self):
"""List indicators available on the remote instance."""
response = self._get('', 'get-indicators')
response['message'] = "%i indicators:\n%s" % (
len(response['indicators']),
"\n".join(response['indicators'])
)
return response | 0.006329 |
def add_permission(self, queue, label, aws_account_id, action_name, callback=None):
"""
Add a permission to a queue.
:type queue: :class:`boto.sqs.queue.Queue`
:param queue: The queue object
:type label: str or unicode
:param label: A unique identification of the permission you are setting.
Maximum of 80 characters ``[0-9a-zA-Z_-]``
Example, AliceSendMessage
:type aws_account_id: str or unicode
:param principal_id: The AWS account number of the principal who will
be given permission. The principal must have
an AWS account, but does not need to be signed
up for Amazon SQS. For information
about locating the AWS account identification.
:type action_name: str or unicode
:param action_name: The action. Valid choices are:
\*|SendMessage|ReceiveMessage|DeleteMessage|
ChangeMessageVisibility|GetQueueAttributes
:rtype: bool
:return: True if successful, False otherwise.
"""
params = {'Label': label,
'AWSAccountId' : aws_account_id,
'ActionName' : action_name}
return self.get_status('AddPermission', params, queue.id, callback=callback) | 0.00563 |
def infer(self, number_of_processes=1, *args, **kwargs):
"""
:param number_of_processes: If set to more than 1, the inference routines will be paralellised
using ``multiprocessing`` module
:param args: arguments to pass to :meth:`Inference.infer`
:param kwargs: keyword arguments to pass to :meth:`Inference.infer`
:return:
"""
if number_of_processes == 1:
results = map(lambda x: x.infer(*args, **kwargs), self._inference_objects)
else:
inference_objects = self._inference_objects
results = raw_results_in_parallel(self._inference_objects, number_of_processes, *args,
**kwargs)
results = [inference._result_from_raw_result(raw_result)
for inference, raw_result in zip(inference_objects, results)]
results = sorted(results, key=lambda x: x.distance_at_minimum)
return InferenceResultsCollection(results) | 0.006737 |
def form_invalid(self, form, forms, open_tabs, position_form_default):
"""
Called if a form is invalid. Re-renders the context data with the data-filled forms and errors.
"""
# return self.render_to_response( self.get_context_data( form = form, forms = forms ) )
return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default)) | 0.011211 |
def hist(hists,
stacked=True,
reverse=False,
xpadding=0, ypadding=.1,
yerror_in_padding=True,
logy=None,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib hist plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
logy : bool, optional (default=None)
Apply special treatment of a log-scale y-axis to display the histogram
correctly. If None (the default) then automatically determine if the
y-axis is log-scale.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's
fill_between for the filled regions and matplotlib's step function
for the edges.
Returns
-------
The return value from matplotlib's hist function, or list of such return
values if a stack or list of histograms was plotted.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
returns = []
if isinstance(hists, _Hist):
# This is a single plottable object.
returns = _hist(hists, axes=axes, logy=logy, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked:
# draw the top histogram first so its edges don't cover the histograms
# beneath it in the stack
if not reverse:
hists = list(hists)[::-1]
for i, h in enumerate(hists):
kwargs_local = kwargs.copy()
if i == len(hists) - 1:
low = h.Clone()
low.Reset()
else:
low = sum(hists[i + 1:])
high = h + low
high.alpha = getattr(h, 'alpha', None)
proxy = _hist(high, bottom=low, axes=axes, logy=logy, **kwargs)
returns.append(proxy)
if not reverse:
returns = returns[::-1]
_set_bounds(sum(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
else:
for h in _maybe_reversed(hists, reverse):
returns.append(_hist(h, axes=axes, logy=logy, **kwargs))
if reverse:
returns = returns[::-1]
_set_bounds(hists[max(range(len(hists)), key=lambda idx: hists[idx].max())],
axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
return returns | 0.000424 |
def zpopmin(self, key, count=None, *, encoding=_NOTSET):
"""Removes and returns up to count members with the lowest scores
in the sorted set stored at key.
:raises TypeError: if count is not int
"""
if count is not None and not isinstance(count, int):
raise TypeError("count argument must be int")
args = []
if count is not None:
args.extend([count])
fut = self.execute(b'ZPOPMIN', key, *args, encoding=encoding)
return fut | 0.003831 |
def bar(msg='', width=40, position=None):
r"""
Returns a string with text centered in a bar caption.
Examples:
>>> bar('test', width=10)
'== test =='
>>> bar(width=10)
'=========='
>>> bar('Richard Dean Anderson is...', position='top', width=50)
'//========= Richard Dean Anderson is... ========\\\\'
>>> bar('...MacGyver', position='bottom', width=50)
'\\\\================= ...MacGyver ================//'
"""
if position == 'top':
start_bar = '//'
end_bar = r'\\'
elif position == 'bottom':
start_bar = r'\\'
end_bar = '//'
else:
start_bar = end_bar = '=='
if msg:
msg = ' ' + msg + ' '
width -= 4
return start_bar + msg.center(width, '=') + end_bar | 0.001289 |
def launch(self, task, **kwargs):
"""
Build the input files and submit the task via the :class:`Qadapter`
Args:
task: :class:`TaskObject`
Returns:
Process object.
"""
if task.status == task.S_LOCKED:
raise ValueError("You shall not submit a locked task!")
# Build the task
task.build()
# Pass information on the time limit to Abinit (we always assume ndtset == 1)
if isinstance(task, AbinitTask):
args = kwargs.get("exec_args", [])
if args is None: args = []
args = args[:]
args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit))
kwargs["exec_args"] = args
# Write the submission script
script_file = self.write_jobfile(task, **kwargs)
# Submit the task and save the queue id.
try:
qjob, process = self.qadapter.submit_to_queue(script_file)
task.set_status(task.S_SUB, msg='Submitted to queue')
task.set_qjob(qjob)
return process
except self.qadapter.MaxNumLaunchesError as exc:
# TODO: Here we should try to switch to another qadapter
# 1) Find a new parallel configuration in those stored in task.pconfs
# 2) Change the input file.
# 3) Regenerate the submission script
# 4) Relaunch
task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc))
raise | 0.004551 |
def setEnabled(self, state):
"""
Updates the drop shadow effect for this widget on enable/disable
state change.
:param state | <bool>
"""
super(XToolButton, self).setEnabled(state)
self.updateUi() | 0.014085 |
def check_pidfile(pidfile, debug):
"""Check that a process is not running more than once, using PIDFILE"""
# Check PID exists and see if the PID is running
if os.path.isfile(pidfile):
pidfile_handle = open(pidfile, 'r')
# try and read the PID file. If no luck, remove it
try:
pid = int(pidfile_handle.read())
pidfile_handle.close()
if check_pid(pid, debug):
return True
except:
pass
# PID is not active, remove the PID file
os.unlink(pidfile)
# Create a PID file, to ensure this is script is only run once (at a time)
pid = str(os.getpid())
open(pidfile, 'w').write(pid)
return False | 0.002759 |
def set_alpn_select_callback(self, callback):
"""
Specify a callback function that will be called on the server when a
client offers protocols using ALPN.
:param callback: The callback function. It will be invoked with two
arguments: the Connection, and a list of offered protocols as
bytestrings, e.g ``[b'http/1.1', b'spdy/2']``. It should return
one of those bytestrings, the chosen protocol.
"""
self._alpn_select_helper = _ALPNSelectHelper(callback)
self._alpn_select_callback = self._alpn_select_helper.callback
_lib.SSL_CTX_set_alpn_select_cb(
self._context, self._alpn_select_callback, _ffi.NULL) | 0.002782 |
def baltree(ntips, treeheight=1.0):
"""
Returns a balanced tree topology.
"""
# require even number of tips
if ntips % 2:
raise ToytreeError("balanced trees must have even number of tips.")
# make first cherry
rtree = toytree.tree()
rtree.treenode.add_child(name="0")
rtree.treenode.add_child(name="1")
# add tips in a balanced way
for i in range(2, ntips):
# get node to split
node = return_small_clade(rtree.treenode)
# add two children
node.add_child(name=node.name)
node.add_child(name=str(i))
# rename ancestral node
node.name = None
# rename tips so names are in order
idx = 0
for node in rtree.treenode.traverse("postorder"):
if node.is_leaf():
node.name = str(idx)
idx += 1
# get toytree from newick
tre = toytree.tree(rtree.write(tree_format=9))
tre = tre.mod.make_ultrametric()
self = tre.mod.node_scale_root_height(treeheight)
self._coords.update()
return self | 0.003339 |
def boolean(text):
"""
An alternative to the "bool" argument type which interprets string
values.
"""
tmp = text.lower()
if tmp.isdigit():
return bool(int(tmp))
elif tmp in ('t', 'true', 'on', 'yes'):
return True
elif tmp in ('f', 'false', 'off', 'no'):
return False
raise ValueError("invalid Boolean value %r" % text) | 0.002632 |
def pack(header, s):
"""Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s)
"""
header = IRHeader(*header)
if isinstance(header.label, numbers.Number):
header = header._replace(flag=0)
else:
label = np.asarray(header.label, dtype=np.float32)
header = header._replace(flag=label.size, label=0)
s = label.tostring() + s
s = struct.pack(_IR_FORMAT, *header) + s
return s | 0.002014 |
def save(self, filename):
""" save colormap to file"""
plt.savefig(filename, fig=self.fig, facecolor='black', edgecolor='black') | 0.020833 |
def Huq_Loth(x, rhol, rhog):
r'''Calculates void fraction in two-phase flow according to the model of
[1]_, also given in [2]_, [3]_, and [4]_.
.. math::
\alpha = 1 - \frac{2(1-x)^2}{1 - 2x + \left[1 + 4x(1-x)\left(\frac
{\rho_l}{\rho_g}-1\right)\right]^{0.5}}
Parameters
----------
x : float
Quality at the specific tube interval []
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
Returns
-------
alpha : float
Void fraction (area of gas / total area of channel), [-]
Notes
-----
[1]_ has been reviewed, and matches the expressions given in the reviews
[2]_, [3]_, and [4]_; the form of the expression is rearranged somewhat
differently.
Examples
--------
>>> Huq_Loth(.4, 800, 2.5)
0.9593868838476147
References
----------
.. [1] Huq, Reazul, and John L. Loth. "Analytical Two-Phase Flow Void
Prediction Method." Journal of Thermophysics and Heat Transfer 6, no. 1
(January 1, 1992): 139-44. doi:10.2514/3.329.
.. [2] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two-
Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no.
1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032.
.. [3] Dalkilic, A. S., S. Laohalertdecha, and S. Wongwises. "Effect of
Void Fraction Models on the Two-Phase Friction Factor of R134a during
Condensation in Vertical Downward Flow in a Smooth Tube." International
Communications in Heat and Mass Transfer 35, no. 8 (October 2008):
921-27. doi:10.1016/j.icheatmasstransfer.2008.04.001.
.. [4] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void
Fraction Correlations for Different Flow Patterns in Horizontal and
Upward Inclined Pipes." International Journal of Multiphase Flow 33,
no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004.
'''
B = 2*x*(1-x)
D = (1 + 2*B*(rhol/rhog -1))**0.5
return 1 - 2*(1-x)**2/(1 - 2*x + D) | 0.007039 |
def credentials(self, credentials):
"""
Sets the credentials of this WebAuthorization.
The confidential portion of the `Authorization` header that follows the `type` field. This field is write-only. It is omitted by read operations. If authorization is required, the `credentials` value must be provided whenever a File Source is created or modified. An update to a dataset that does not change the File Source may omit the `credentials` field--the update will preserve the previous value.
:param credentials: The credentials of this WebAuthorization.
:type: str
"""
if credentials is not None and len(credentials) > 1024:
raise ValueError("Invalid value for `credentials`, length must be less than or equal to `1024`")
if credentials is not None and len(credentials) < 1:
raise ValueError("Invalid value for `credentials`, length must be greater than or equal to `1`")
if credentials is not None and not re.search('[\\x21-\\x7E \\t]*', credentials):
raise ValueError("Invalid value for `credentials`, must be a follow pattern or equal to `/[\\x21-\\x7E \\t]*/`")
self._credentials = credentials | 0.005761 |
def q12d_local(vertices, lame, mu):
"""Local stiffness matrix for two dimensional elasticity on a square element.
Parameters
----------
lame : Float
Lame's first parameter
mu : Float
shear modulus
See Also
--------
linear_elasticity
Notes
-----
Vertices should be listed in counter-clockwise order::
[3]----[2]
| |
| |
[0]----[1]
Degrees of freedom are enumerated as follows::
[x=6,y=7]----[x=4,y=5]
| |
| |
[x=0,y=1]----[x=2,y=3]
"""
M = lame + 2*mu # P-wave modulus
R_11 = np.matrix([[2, -2, -1, 1],
[-2, 2, 1, -1],
[-1, 1, 2, -2],
[1, -1, -2, 2]]) / 6.0
R_12 = np.matrix([[1, 1, -1, -1],
[-1, -1, 1, 1],
[-1, -1, 1, 1],
[1, 1, -1, -1]]) / 4.0
R_22 = np.matrix([[2, 1, -1, -2],
[1, 2, -2, -1],
[-1, -2, 2, 1],
[-2, -1, 1, 2]]) / 6.0
F = inv(np.vstack((vertices[1] - vertices[0], vertices[3] - vertices[0])))
K = np.zeros((8, 8)) # stiffness matrix
E = F.T * np.matrix([[M, 0], [0, mu]]) * F
K[0::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[mu, 0], [0, M]]) * F
K[1::2, 1::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[0, mu], [lame, 0]]) * F
K[1::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
K[0::2, 1::2] = K[1::2, 0::2].T
K /= det(F)
return K | 0.001133 |
def _try_trigger_before_first_request_funcs(self): # pylint: disable=C0103
"""Runs each function from ``self.before_first_request_funcs`` once and only once."""
if self._after_first_request_handled:
return
else:
with self._before_first_request_lock:
if self._after_first_request_handled:
return
for func in self._before_first_request_funcs:
func()
self._after_first_request_handled = True | 0.005714 |
def add_ephemeral_listener(self, callback, event_type=None):
"""Add a callback handler for ephemeral events going to this room.
Args:
callback (func(room, event)): Callback called when an ephemeral event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
"""
listener_id = uuid4()
self.ephemeral_listeners.append(
{
'uid': listener_id,
'callback': callback,
'event_type': event_type
}
)
return listener_id | 0.005979 |
def sim(self, src, tar, qval=2, alpha=1, beta=1, bias=None):
"""Return the Tversky index of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alpha : float
Tversky index parameter as described above
beta : float
Tversky index parameter as described above
bias : float
The symmetric Tversky index bias parameter
Returns
-------
float
Tversky similarity
Raises
------
ValueError
Unsupported weight assignment; alpha and beta must be greater than
or equal to 0.
Examples
--------
>>> cmp = Tversky()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
if alpha < 0 or beta < 0:
raise ValueError(
'Unsupported weight assignment; alpha and beta '
+ 'must be greater than or equal to 0.'
)
if src == tar:
return 1.0
elif not src or not tar:
return 0.0
q_src, q_tar = self._get_qgrams(src, tar, qval)
q_src_mag = sum(q_src.values())
q_tar_mag = sum(q_tar.values())
q_intersection_mag = sum((q_src & q_tar).values())
if not q_src or not q_tar:
return 0.0
if bias is None:
return q_intersection_mag / (
q_intersection_mag
+ alpha * (q_src_mag - q_intersection_mag)
+ beta * (q_tar_mag - q_intersection_mag)
)
a_val = min(
q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag
)
b_val = max(
q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag
)
c_val = q_intersection_mag + bias
return c_val / (beta * (alpha * a_val + (1 - alpha) * b_val) + c_val) | 0.000869 |
def create_rsa_public_and_private_from_pem(pem, passphrase=None):
"""
<Purpose>
Generate public and private RSA keys from an optionally encrypted PEM. The
public and private keys returned conform to
'securesystemslib.formats.PEMRSA_SCHEMA' and have the form:
'-----BEGIN RSA PUBLIC KEY----- ... -----END RSA PUBLIC KEY-----'
and
'-----BEGIN RSA PRIVATE KEY----- ...-----END RSA PRIVATE KEY-----'
The public and private keys are returned as strings in PEM format.
In case the private key part of 'pem' is encrypted pyca/cryptography's
load_pem_private_key() method is passed passphrase. In the default case
here, pyca/cryptography will decrypt with a PBKDF1+MD5
strengthened'passphrase', and 3DES with CBC mode for encryption/decryption.
Alternatively, key data may be encrypted with AES-CTR-Mode and the
passphrase strengthened with PBKDF2+SHA256, although this method is used
only with TUF encrypted key files.
>>> public, private = generate_rsa_public_and_private(2048)
>>> passphrase = 'secret'
>>> encrypted_pem = create_rsa_encrypted_pem(private, passphrase)
>>> returned_public, returned_private = \
create_rsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_public)
True
>>> securesystemslib.formats.PEMRSA_SCHEMA.matches(returned_private)
True
>>> public == returned_public
True
>>> private == returned_private
True
<Arguments>
pem:
A byte string in PEM format, where the private key can be encrypted.
It has the form:
'-----BEGIN RSA PRIVATE KEY-----\n
Proc-Type: 4,ENCRYPTED\nDEK-Info: DES-EDE3-CBC ...'
passphrase: (optional)
The passphrase, or password, to decrypt the private part of the RSA
key. 'passphrase' is not directly used as the encryption key, instead
it is used to derive a stronger symmetric key.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if the public and private RSA keys
cannot be generated from 'pem', or exported in PEM format.
<Side Effects>
pyca/cryptography's 'serialization.load_pem_private_key()' called to
perform the actual conversion from an encrypted RSA private key to
PEM format.
<Returns>
A (public, private) tuple containing the RSA keys in PEM format.
"""
# Does 'encryped_pem' have the correct format?
# This check will ensure 'pem' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMRSA_SCHEMA.check_match(pem)
# If passed, does 'passphrase' have the correct format?
if passphrase is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(passphrase)
passphrase = passphrase.encode('utf-8')
# Generate a pyca/cryptography key object from 'pem'. The generated
# pyca/cryptography key contains the required export methods needed to
# generate the PEM-formatted representations of the public and private RSA
# key.
try:
private_key = load_pem_private_key(pem.encode('utf-8'),
passphrase, backend=default_backend())
# pyca/cryptography's expected exceptions for 'load_pem_private_key()':
# ValueError: If the PEM data could not be decrypted.
# (possibly because the passphrase is wrong)."
# TypeError: If a password was given and the private key was not encrypted.
# Or if the key was encrypted but no password was supplied.
# UnsupportedAlgorithm: If the private key (or if the key is encrypted with
# an unsupported symmetric cipher) is not supported by the backend.
except (ValueError, TypeError, cryptography.exceptions.UnsupportedAlgorithm) as e:
# Raise 'securesystemslib.exceptions.CryptoError' and pyca/cryptography's
# exception message. Avoid propogating pyca/cryptography's exception trace
# to avoid revealing sensitive error.
raise securesystemslib.exceptions.CryptoError('RSA (public, private) tuple'
' cannot be generated from the encrypted PEM string: ' + str(e))
# Export the public and private halves of the pyca/cryptography RSA key
# object. The (public, private) tuple returned contains the public and
# private RSA keys in PEM format, as strings.
# Extract the public & private halves of the RSA key and generate their
# PEM-formatted representations. Return the key pair as a (public, private)
# tuple, where each RSA is a string in PEM format.
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
# Need to generate the public key from the private one before serializing
# to PEM format.
public_key = private_key.public_key()
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public_pem.decode(), private_pem.decode() | 0.007729 |
def add_view(self, view_name, map_func, reduce_func=None, **kwargs):
"""
Appends a MapReduce view to the locally cached DesignDocument View
dictionary. To create a JSON query index use
:func:`~cloudant.database.CloudantDatabase.create_query_index` instead.
A CloudantException is raised if an attempt to add a QueryIndexView
(JSON query index) using this method is made.
:param str view_name: Name used to identify the View.
:param str map_func: Javascript map function.
:param str reduce_func: Optional Javascript reduce function.
"""
if self.get_view(view_name) is not None:
raise CloudantArgumentError(107, view_name)
if self.get('language', None) == QUERY_LANGUAGE:
raise CloudantDesignDocumentException(101)
view = View(self, view_name, map_func, reduce_func, **kwargs)
self.views.__setitem__(view_name, view) | 0.002099 |
def _verify(certificate_or_public_key, signature, data, hash_algorithm):
"""
Verifies an RSA, DSA or ECDSA signature
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha224", "sha256", "sha384" or "sha512"
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):
raise TypeError(pretty_message(
'''
certificate_or_public_key must be an instance of the Certificate or
PublicKey class, not %s
''',
type_name(certificate_or_public_key)
))
if not isinstance(signature, byte_cls):
raise TypeError(pretty_message(
'''
signature must be a byte string, not %s
''',
type_name(signature)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
valid_hash_algorithms = set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'])
if certificate_or_public_key.algorithm == 'rsa':
valid_hash_algorithms |= set(['raw'])
if hash_algorithm not in valid_hash_algorithms:
valid_hash_algorithms_error = '"md5", "sha1", "sha224", "sha256", "sha384", "sha512"'
if certificate_or_public_key.algorithm == 'rsa':
valid_hash_algorithms_error += ', "raw"'
raise ValueError(pretty_message(
'''
hash_algorithm must be one of %s, not %s
''',
valid_hash_algorithms_error,
repr(hash_algorithm)
))
if certificate_or_public_key.algorithm == 'rsa' and hash_algorithm == 'raw':
if len(data) > certificate_or_public_key.byte_size - 11:
raise ValueError(pretty_message(
'''
data must be 11 bytes shorter than the key size when
hash_algorithm is "raw" - key size is %s bytes, but data
is %s bytes long
''',
certificate_or_public_key.byte_size,
len(data)
))
result = Security.SecKeyRawVerify(
certificate_or_public_key.sec_key_ref,
SecurityConst.kSecPaddingPKCS1,
data,
len(data),
signature,
len(signature)
)
# errSSLCrypto is returned in some situations on macOS 10.12
if result == SecurityConst.errSecVerifyFailed or result == SecurityConst.errSSLCrypto:
raise SignatureError('Signature is invalid')
handle_sec_error(result)
return
cf_signature = None
cf_data = None
cf_hash_length = None
sec_transform = None
try:
error_pointer = new(CoreFoundation, 'CFErrorRef *')
cf_signature = CFHelpers.cf_data_from_bytes(signature)
sec_transform = Security.SecVerifyTransformCreate(
certificate_or_public_key.sec_key_ref,
cf_signature,
error_pointer
)
handle_cf_error(error_pointer)
hash_constant = {
'md5': Security.kSecDigestMD5,
'sha1': Security.kSecDigestSHA1,
'sha224': Security.kSecDigestSHA2,
'sha256': Security.kSecDigestSHA2,
'sha384': Security.kSecDigestSHA2,
'sha512': Security.kSecDigestSHA2
}[hash_algorithm]
Security.SecTransformSetAttribute(
sec_transform,
Security.kSecDigestTypeAttribute,
hash_constant,
error_pointer
)
handle_cf_error(error_pointer)
if hash_algorithm in set(['sha224', 'sha256', 'sha384', 'sha512']):
hash_length = {
'sha224': 224,
'sha256': 256,
'sha384': 384,
'sha512': 512
}[hash_algorithm]
cf_hash_length = CFHelpers.cf_number_from_integer(hash_length)
Security.SecTransformSetAttribute(
sec_transform,
Security.kSecDigestLengthAttribute,
cf_hash_length,
error_pointer
)
handle_cf_error(error_pointer)
if certificate_or_public_key.algorithm == 'rsa':
Security.SecTransformSetAttribute(
sec_transform,
Security.kSecPaddingKey,
Security.kSecPaddingPKCS1Key,
error_pointer
)
handle_cf_error(error_pointer)
cf_data = CFHelpers.cf_data_from_bytes(data)
Security.SecTransformSetAttribute(
sec_transform,
Security.kSecTransformInputAttributeName,
cf_data,
error_pointer
)
handle_cf_error(error_pointer)
res = Security.SecTransformExecute(sec_transform, error_pointer)
if not is_null(error_pointer):
error = unwrap(error_pointer)
if not is_null(error):
raise SignatureError('Signature is invalid')
res = bool(CoreFoundation.CFBooleanGetValue(res))
if not res:
raise SignatureError('Signature is invalid')
finally:
if sec_transform:
CoreFoundation.CFRelease(sec_transform)
if cf_signature:
CoreFoundation.CFRelease(cf_signature)
if cf_data:
CoreFoundation.CFRelease(cf_data)
if cf_hash_length:
CoreFoundation.CFRelease(cf_hash_length) | 0.001152 |
def stringify(self, value):
"""Convert value to string
This method is used to generate a simple JSON representation of the
object (without dereferencing objects etc.)
"""
# SuperModel -> UID
if ISuperModel.providedBy(value):
return str(value)
# DateTime -> ISO8601 format
elif isinstance(value, (DateTime)):
return value.ISO8601()
# Image/Files -> filename
elif safe_hasattr(value, "filename"):
return value.filename
# Dict -> convert_value_to_string
elif isinstance(value, dict):
return {k: self.stringify(v) for k, v in value.iteritems()}
# List -> convert_value_to_string
if isinstance(value, (list, tuple, LazyMap)):
return map(self.stringify, value)
# Callables
elif safe_callable(value):
return self.stringify(value())
elif isinstance(value, unicode):
value = value.encode("utf8")
try:
return str(value)
except (AttributeError, TypeError, ValueError):
logger.warn("Could not convert {} to string".format(repr(value)))
return None | 0.001656 |
def mode(self, axis=0, numeric_only=False, dropna=True):
"""Perform mode across the DataFrame.
Args:
axis (int): The axis to take the mode on.
numeric_only (bool): if True, only apply to numeric columns.
Returns:
DataFrame: The mode of the DataFrame.
"""
axis = self._get_axis_number(axis)
return self.__constructor__(
query_compiler=self._query_compiler.mode(
axis=axis, numeric_only=numeric_only, dropna=dropna
)
) | 0.00354 |
def _call_numpy(self, x):
"""Return ``self(x)`` for numpy back-end.
Parameters
----------
x : `numpy.ndarray`
Array representing the function to be transformed
Returns
-------
out : `numpy.ndarray`
Result of the transform
"""
# Pre-processing before calculating the DFT
# Note: since the FFT call is out-of-place, it does not matter if
# preprocess produces real or complex output in the R2C variant.
# There is no significant time difference between (full) R2C and
# C2C DFT in Numpy.
preproc = self._preprocess(x)
# The actual call to the FFT library, out-of-place unfortunately
if self.halfcomplex:
out = np.fft.rfftn(preproc, axes=self.axes)
else:
if self.sign == '-':
out = np.fft.fftn(preproc, axes=self.axes)
else:
out = np.fft.ifftn(preproc, axes=self.axes)
# Numpy's FFT normalizes by 1 / prod(shape[axes]), we
# need to undo that
out *= np.prod(np.take(self.domain.shape, self.axes))
# Post-processing accounting for shift, scaling and interpolation
self._postprocess(out, out=out)
return out | 0.001531 |
def pos(self):
"""
Lazy-loads the part of speech tag for this word
:getter: Returns the plain string value of the POS tag for the word
:type: str
"""
if self._pos is None:
poses = self._element.xpath('POS/text()')
if len(poses) > 0:
self._pos = poses[0]
return self._pos | 0.005435 |
def dec(data, **kwargs):
'''
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
box_type = _get_config(**kwargs)['box_type']
if box_type == 'secretbox':
return secretbox_decrypt(data, **kwargs)
return sealedbox_decrypt(data, **kwargs) | 0.002732 |
def hmean(nums):
r"""Return harmonic mean.
The harmonic mean is defined as:
:math:`\frac{|nums|}{\sum\limits_{i}\frac{1}{nums_i}}`
Following the behavior of Wolfram|Alpha:
- If one of the values in nums is 0, return 0.
- If more than one value in nums is 0, return NaN.
Cf. https://en.wikipedia.org/wiki/Harmonic_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The harmonic mean of nums
Raises
------
AttributeError
hmean requires at least one value
Examples
--------
>>> hmean([1, 2, 3, 4])
1.9200000000000004
>>> hmean([1, 2])
1.3333333333333333
>>> hmean([0, 5, 1000])
0
"""
if len(nums) < 1:
raise AttributeError('hmean requires at least one value')
elif len(nums) == 1:
return nums[0]
else:
for i in range(1, len(nums)):
if nums[0] != nums[i]:
break
else:
return nums[0]
if 0 in nums:
if nums.count(0) > 1:
return float('nan')
return 0
return len(nums) / sum(1 / i for i in nums) | 0.000855 |
def basic_recover(self, requeue=False):
"""Redeliver unacknowledged messages
This method asks the broker to redeliver all unacknowledged
messages on a specified channel. Zero or more messages may be
redelivered. This method is only allowed on non-transacted
channels.
RULE:
The server MUST set the redelivered flag on all messages
that are resent.
RULE:
The server MUST raise a channel exception if this is
called on a transacted channel.
PARAMETERS:
requeue: boolean
requeue the message
If this field is False, the message will be redelivered
to the original recipient. If this field is True, the
server will attempt to requeue the message,
potentially then delivering it to an alternative
subscriber.
"""
args = AMQPWriter()
args.write_bit(requeue)
self._send_method((60, 110), args) | 0.001908 |
def add_env(url, saltenv):
'''
append `saltenv` to `url` as a query parameter to a 'salt://' url
'''
if not url.startswith('salt://'):
return url
path, senv = parse(url)
return create(path, saltenv) | 0.004329 |
def createNewTriples(Win):
"""Add entries to the triples tables based on new images in the db"""
win.help("Building list of exposures to look for triples")
cols=('e.expnum', 'object',
'mjdate',
'uttime',
'elongation',
'filter',
'obs_iq_refccd','qso_status' )
header='%6s %-10s%-12s%10s%10s%10s%8s%10s' % cols
pointings=getNewTriples()
num_p=len(pointings)
for pointing in pointings:
pid=pointing[0]
mjd=pointing[1]
expnums=getExpnums(pointing=pid,night=mjd)
num_p=num_p-1
while (1):
### Loop over this pointing until keystroke gets us out
win.help("Select (space) members of triplets - %d remaining" % num_p )
## start with an empty list
explist=[]
choices=[]
current_date=''
for expnum in expnums:
info=getExpInfo(expnum[0])
row=()
if not str(info['triple'])=='None' :
continue
if str(info['obs_iq_refccd'])=='None':
info['obs_iq_refccd']=-1.0
choices.append('%6d %10s %15s %10s %8.2f %10s %8.2f %10s' % (
int(info['e.expnum']),
str(info['object']),
str(info['mjdate']),
str(info['uttime']),
float(str(info['elongation'])),
str(info['filter']),
float(str(info['obs_iq_refccd'])),
str(info['qso_status'])
))
explist.append(expnum[0])
if len(choices)<3:
### we need to provide at least 3 choices,
### otherwise this isn't a triple (is it)
break
### win.list returns the user's choices as a list.
choice_list=win.list(header,choices)
### zero length list implies were done.
if choice_list==None:
break
### is this actually a triple?
if len(choice_list)!=3:
win.help("Must have 3 members to make a tripple")
continue
### Create a new line in the triple table
sql = "INSERT INTO triples (id, pointing ) VALUES ( NULL, %s ) "
cfeps.execute(sql, ( pid, ) )
sql = "SELECT id FROM triples WHERE pointing=%s order by id desc"
cfeps.execute(sql, ( pid, ) )
ttt=cfeps.fetchall()
triple= ttt[0][0]
win.help(str(triple))
### record the members of this new triple.
sql = "INSERT INTO triple_members (triple, expnum) VALUES ( %s, %s)";
win.help(sql)
for exp in choice_list:
cfeps.execute(sql,(triple,explist[exp]))
return(0) | 0.026464 |
def send_start(remote, code, device=None, address=None):
"""
All parameters are passed to irsend. See the man page for irsend
for details about their usage.
Parameters
----------
remote: str
code: str
device: str
address: str
Notes
-----
No attempt is made to catch or handle errors. See the documentation
for subprocess.check_output to see the types of exceptions it may raise.
"""
args = ['send_start', remote, code]
_call(args, device, address) | 0.001946 |
def add(name,
uid=None,
gid=None,
groups=None,
home=None,
shell=None,
fullname=None,
createhome=True,
**kwargs):
'''
Add a user to the minion
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
'''
if info(name):
raise CommandExecutionError('User \'{0}\' already exists'.format(name))
if salt.utils.stringutils.contains_whitespace(name):
raise SaltInvocationError('Username cannot contain whitespace')
if uid is None:
uid = _first_avail_uid()
if gid is None:
gid = 20 # gid 20 == 'staff', the default group
if home is None:
home = '/Users/{0}'.format(name)
if shell is None:
shell = '/bin/bash'
if fullname is None:
fullname = ''
if not isinstance(uid, int):
raise SaltInvocationError('uid must be an integer')
if not isinstance(gid, int):
raise SaltInvocationError('gid must be an integer')
name_path = '/Users/{0}'.format(name)
_dscl([name_path, 'UniqueID', uid])
_dscl([name_path, 'PrimaryGroupID', gid])
_dscl([name_path, 'UserShell', shell])
_dscl([name_path, 'NFSHomeDirectory', home])
_dscl([name_path, 'RealName', fullname])
# Make sure home directory exists
if createhome:
__salt__['file.mkdir'](home, user=uid, group=gid)
# dscl buffers changes, sleep before setting group membership
time.sleep(1)
if groups:
chgroups(name, groups)
return True | 0.000638 |
def antenna1(self, context):
""" antenna1 data source """
lrow, urow = MS.uvw_row_extents(context)
antenna1 = self._manager.ordered_uvw_table.getcol(
MS.ANTENNA1, startrow=lrow, nrow=urow-lrow)
return antenna1.reshape(context.shape).astype(context.dtype) | 0.006689 |
def _format_response(self, request, response):
""" Format response using appropriate datamapper.
Take the devil response and turn it into django response, ready to
be returned to the client.
"""
res = datamapper.format(request, response, self)
# data is now formatted, let's check if the status_code is set
if res.status_code is 0:
res.status_code = 200
# apply headers
self._add_resposne_headers(res, response)
return res | 0.003876 |
def synchronize(func):
""" Decorator for :meth:`synchronize`. """
@wraps(func)
def outer(self, *args, **kwargs):
@self.synchronize
def inner(self, *args, **kwargs):
return func(self, *args, **kwargs)
return inner(self, *args, **kwargs)
return outer | 0.0033 |
def callback(self):
"""Callback after this async pipeline finishes."""
if self.was_aborted:
return
mapreduce_id = self.outputs.job_id.value
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
if mapreduce_state.result_status != model.MapreduceState.RESULT_SUCCESS:
self.retry("Job %s had status %s" % (
mapreduce_id, mapreduce_state.result_status))
return
mapper_spec = mapreduce_state.mapreduce_spec.mapper
outputs = []
output_writer_class = mapper_spec.output_writer_class()
if (output_writer_class and
mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS):
outputs = output_writer_class.get_filenames(mapreduce_state)
self.fill(self.outputs.result_status, mapreduce_state.result_status)
self.fill(self.outputs.counters, mapreduce_state.counters_map.to_dict())
self.complete(outputs) | 0.006601 |
def spec(self):
"""Return a SourceSpec to describe this source"""
from ambry_sources.sources import SourceSpec
d = self.dict
d['url'] = self.url
# Will get the URL twice; once as ref and once as URL, but the ref is ignored
return SourceSpec(**d) | 0.010135 |
def _GetRecordValue(self, record, value_entry):
"""Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
"""
column_type = record.get_column_type(value_entry)
long_value = None
if record.is_long_value(value_entry):
long_value = record.get_value_data_as_long_value(value_entry)
if record.is_multi_value(value_entry):
# TODO: implement
raise ValueError('Multi value support not implemented yet.')
if column_type == pyesedb.column_types.NULL:
return None
if column_type == pyesedb.column_types.BOOLEAN:
# TODO: implement
raise ValueError('Boolean value support not implemented yet.')
if column_type in self.INTEGER_COLUMN_TYPES:
if long_value:
raise ValueError('Long integer value not supported.')
return record.get_value_data_as_integer(value_entry)
if column_type in self.FLOATING_POINT_COLUMN_TYPES:
if long_value:
raise ValueError('Long floating point value not supported.')
return record.get_value_data_as_floating_point(value_entry)
if column_type in self.STRING_COLUMN_TYPES:
if long_value:
return long_value.get_data_as_string()
return record.get_value_data_as_string(value_entry)
if column_type == pyesedb.column_types.GUID:
# TODO: implement
raise ValueError('GUID value support not implemented yet.')
if long_value:
return long_value.get_data()
return record.get_value_data(value_entry) | 0.009685 |
def _print_installed_apps(self, controller):
"""Print out a list of installed sprockets applications
:param str controller: The name of the controller to get apps for
"""
print('\nInstalled Sprockets %s Apps\n' % controller.upper())
print("{0:<25} {1:>25}".format('Name', 'Module'))
print(string.ljust('', 51, '-'))
for app in self._get_applications(controller):
print('{0:<25} {1:>25}'.format(app.name, '(%s)' % app.module_name))
print('') | 0.003868 |
def assert_rank_at_most(x, rank, data=None, summarize=None, message=None,
name=None):
"""Assert `x` has rank equal to `rank` or smaller.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_most(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_most".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or lower.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with tf.compat.v2.name_scope(name or 'assert_rank_at_most'):
return tf.compat.v1.assert_less_equal(
tf.rank(x), rank, data=data, summarize=summarize, message=message) | 0.002625 |
def execute(self, conn, app="", release_version="", pset_hash="", output_label="",
global_tag='', transaction = False):
"""
returns id for a given application
"""
sql = self.sql
binds = {}
setAnd=False
if not app == "":
sql += " A.APP_NAME=:app_name"
binds["app_name"]=app
setAnd=True
if not release_version == "":
if setAnd : sql += " AND "
sql += " R.RELEASE_VERSION=:release_version"
binds["release_version"]=release_version
setAnd=True
if not pset_hash == "":
if setAnd : sql += " AND "
sql += " P.PSET_HASH=:pset_hash"
binds["pset_hash"]=pset_hash
setAnd=True
if not output_label == "":
if setAnd : sql += " AND "
sql += " O.OUTPUT_MODULE_LABEL=:output_module_label"
binds["output_module_label"]=output_label
setAnd=True
if not global_tag == "":
if setAnd : sql += " AND "
sql += " O.GLOBAL_TAG=:global_tag"
binds["global_tag"]=global_tag
if app == release_version == pset_hash == global_tag == "":
dbsExceptionHandler('dbsException-invalid-input', "%s Either app_name, release_version, pset_hash or global_tag must be provided", self.logger.exception)
result = self.dbi.processData(sql, binds, conn, transaction)
plist = self.formatDict(result)
if len(plist) < 1: return -1
return plist[0]["output_mod_config_id"] | 0.043601 |
def killTask(self, driver, taskId):
"""
Kill parent task process and all its spawned children
"""
try:
pid = self.runningTasks[taskId]
pgid = os.getpgid(pid)
except KeyError:
pass
else:
os.killpg(pgid, signal.SIGKILL) | 0.00639 |
def expire_queues(self):
'''
Expires old queue_dict keys that have not been used in a long time.
Prevents slow memory build up when crawling lots of different domains
'''
curr_time = time.time()
for key in list(self.queue_dict):
diff = curr_time - self.queue_dict[key][1]
if diff > self.queue_timeout:
self.logger.debug("Expiring domain queue key " + key)
del self.queue_dict[key]
if key in self.queue_keys:
self.queue_keys.remove(key) | 0.003478 |
def clean_meta(meta, including_info=False, logger=None):
""" Clean meta dict. Optionally log changes using the given logger.
@param logger: If given, a callable accepting a string message.
@return: Set of keys removed from C{meta}.
"""
modified = set()
for key in meta.keys():
if [key] not in METAFILE_STD_KEYS:
if logger:
logger("Removing key %r..." % (key,))
del meta[key]
modified.add(key)
if including_info:
for key in meta["info"].keys():
if ["info", key] not in METAFILE_STD_KEYS:
if logger:
logger("Removing key %r..." % ("info." + key,))
del meta["info"][key]
modified.add("info." + key)
for idx, entry in enumerate(meta["info"].get("files", [])):
for key in entry.keys():
if ["info", "files", key] not in METAFILE_STD_KEYS:
if logger:
logger("Removing key %r from file #%d..." % (key, idx + 1))
del entry[key]
modified.add("info.files." + key)
# Remove crap that certain PHP software puts in paths
entry["path"] = [i for i in entry["path"] if i]
return modified | 0.001529 |
def j0_1(a=1):
r"""Hankel transform pair J0_1 ([Ande75]_)."""
def lhs(x):
return x*np.exp(-a*x**2)
def rhs(b):
return np.exp(-b**2/(4*a))/(2*a)
return Ghosh('j0', lhs, rhs) | 0.004831 |
def select_time(da, **indexer):
"""Select entries according to a time period.
Parameters
----------
da : xarray.DataArray
Input data.
**indexer : {dim: indexer, }, optional
Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,
month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are
considered.
Returns
-------
xr.DataArray
Selected input values.
"""
if not indexer:
selected = da
else:
key, val = indexer.popitem()
time_att = getattr(da.time.dt, key)
selected = da.sel(time=time_att.isin(val)).dropna(dim='time')
return selected | 0.003979 |
def sortedIndex(self, obj, iterator=lambda x: x):
"""
Use a comparator function to figure out the smallest index at which
an object should be inserted so as to maintain order.
Uses binary search.
"""
array = self.obj
value = iterator(obj)
low = 0
high = len(array)
while low < high:
mid = (low + high) >> 1
if iterator(array[mid]) < value:
low = mid + 1
else:
high = mid
return self._wrap(low) | 0.003643 |
def register(self, resource=None, **meta):
""" Add resource to the API.
:param resource: Resource class for registration
:param **meta: Redefine Meta options for the resource
:return adrest.views.Resource: Generated resource.
"""
if resource is None:
def wrapper(resource):
return self.register(resource, **meta)
return wrapper
# Must be instance of ResourceView
if not issubclass(resource, ResourceView):
raise AssertionError("%s not subclass of ResourceView" % resource)
# Cannot be abstract
if resource._meta.abstract:
raise AssertionError("Attempt register of abstract resource: %s."
% resource)
# Fabric of resources
meta = dict(self.meta, **meta)
meta['name'] = meta.get('name', resource._meta.name)
options = type('Meta', tuple(), meta)
params = dict(api=self, Meta=options, **meta)
params['__module__'] = '%s.%s' % (
self.prefix, self.str_version.replace('.', '_'))
params['__doc__'] = resource.__doc__
new_resource = type(
'%s%s' % (resource.__name__, len(self.resources)),
(resource,), params)
if self.resources.get(new_resource._meta.url_name):
logger.warning(
"A resource '%r' is replacing the existing record for '%s'",
new_resource, self.resources.get(new_resource._meta.url_name))
self.resources[new_resource._meta.url_name] = new_resource
return resource | 0.001229 |
def get_task_statuses(self):
"""
Get all tasks' statuses
:return: a dict which key is the task name and value is the :class:`odps.models.Instance.Task` object
:rtype: dict
"""
params = {'taskstatus': ''}
resp = self._client.get(self.resource(), params=params)
self.parse(self._client, resp, obj=self)
return dict([(task.name, task) for task in self._tasks]) | 0.006928 |
def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance)
return results | 0.007752 |
def _insert_uncompressed(collection_name, docs, check_keys,
safe, last_error_args, continue_on_error, opts):
"""Internal insert message helper."""
op_insert, max_bson_size = _insert(
collection_name, docs, check_keys, continue_on_error, opts)
rid, msg = __pack_message(2002, op_insert)
if safe:
rid, gle, _ = __last_error(collection_name, last_error_args)
return rid, msg + gle, max_bson_size
return rid, msg, max_bson_size | 0.004175 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.