text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def call_operation(self, operation, **kwargs):
"""
A generic method to call any operation supported by the Lambda handler
"""
data = {'operation': operation}
data.update(kwargs)
return self.invoke(data) | 0.008 |
def _perturbation(self):
"""
Returns Gaussian perturbation
"""
if self.P>1:
scales = []
for term_i in range(self.n_terms):
_scales = SP.randn(self.diag[term_i].shape[0])
if self.offset[term_i]>0:
_scales = SP.concatenate((_scales,SP.zeros(1)))
scales.append(_scales)
scales = SP.concatenate(scales)
else:
scales = SP.randn(self.vd.getNumberScales())
return scales | 0.011364 |
def a10_allocate_ip_from_dhcp_range(self, subnet, interface_id, mac, port_id):
"""Search for an available IP.addr from unallocated nmodels.IPAllocationPool range.
If no addresses are available then an error is raised. Returns the address as a string.
This search is conducted by a difference of the nmodels.IPAllocationPool set_a
and the current IP allocations.
"""
subnet_id = subnet["id"]
network_id = subnet["network_id"]
iprange_result = self.get_ipallocationpool_by_subnet_id(subnet_id)
ip_in_use_list = [x.ip_address for x in self.get_ipallocations_by_subnet_id(subnet_id)]
range_begin, range_end = iprange_result.first_ip, iprange_result.last_ip
ip_address = IPHelpers.find_unused_ip(range_begin, range_end, ip_in_use_list)
if not ip_address:
msg = "Cannot allocate from subnet {0}".format(subnet)
LOG.error(msg)
# TODO(mdurrant) - Raise neutron exception
raise Exception
mark_in_use = {
"ip_address": ip_address,
"network_id": network_id,
"port_id": port_id,
"subnet_id": subnet["id"]
}
self.create_ipallocation(mark_in_use)
return ip_address, subnet["cidr"], mark_in_use["port_id"] | 0.006061 |
def aggregate(self, out_file: str = None, select: np.ndarray = None, group_by: Union[str, np.ndarray] = "Clusters", aggr_by: str = "mean", aggr_ca_by: Dict[str, str] = None) -> np.ndarray:
"""
Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute
"""
ca = {} # type: Dict[str, np.ndarray]
if select is not None:
raise ValueError("The 'select' argument is deprecated")
if isinstance(group_by, np.ndarray):
labels = group_by
else:
labels = (self.ca[group_by]).astype('int')
_, zero_strt_sort_noholes_lbls = np.unique(labels, return_inverse=True)
n_groups = len(set(labels))
if aggr_ca_by is not None:
for key in self.ca.keys():
if key not in aggr_ca_by:
continue
func = aggr_ca_by[key]
if func == "tally":
for val in set(self.ca[key]):
if np.issubdtype(type(val), np.str_):
valnew = val.replace("/", "-") # Slashes are not allowed in attribute names
valnew = valnew.replace(".", "_") # Nor are periods
ca[key + "_" + str(valnew)] = npg.aggregate(zero_strt_sort_noholes_lbls, (self.ca[key] == val).astype('int'), func="sum", fill_value=0)
elif func == "mode":
def mode(x): # type: ignore
return scipy.stats.mode(x)[0][0]
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=mode, fill_value=0).astype('str')
elif func == "mean":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=0)
elif func == "first":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=self.ca[key][0])
m = np.empty((self.shape[0], n_groups))
for (_, selection, view) in self.scan(axis=0, layers=[""]):
vals_aggr = npg.aggregate(zero_strt_sort_noholes_lbls, view[:, :], func=aggr_by, axis=1, fill_value=0)
m[selection, :] = vals_aggr
if out_file is not None:
loompy.create(out_file, m, self.ra, ca)
return m | 0.025044 |
def sparse_var(X):
'''
Compute variance from
:param X:
:return:
'''
Xc = X.copy()
Xc.data **= 2
return np.array(Xc.mean(axis=0) - np.power(X.mean(axis=0), 2))[0] | 0.005181 |
def _parse_metadata_filename(self, metadata):
"""
Parse the header metadata to pull out the filename and then store it under the key 'filename'
"""
index = metadata['Content-Disposition'].index('=')+1
metadata['filename'] = metadata['Content-Disposition'][index:].replace('"', '')
return metadata | 0.011628 |
def delete_track(self, href=None):
"""Delete a track.
'href' the relative index of the track. May not be none.
Returns nothing.
If the response status is not 204, throws and APIException."""
# Argument error checking.
assert href is not None
raw_result = self.delete(href)
if raw_result.status != 204:
raise APIException(raw_result.status, raw_result.json) | 0.004566 |
def List(self):
"""
Lists the keys and values.
"""
print()
for key in list(self.keys()):
print(key,'=',self[key])
print() | 0.022099 |
def heartbeat():
"""Call Heartbeat URL"""
print "We got a call heartbeat notification\n"
if request.method == 'POST':
print request.form
else:
print request.args
return "OK" | 0.004739 |
def create_table(self, table, exists_ok=False, retry=DEFAULT_RETRY):
"""API call: create a table via a PUT request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.table.Table` to create.
If ``table`` is a reference, an empty table is created
with the specified ID. The dataset that the table belongs to
must already exist.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the table.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.table.Table:
A new ``Table`` returned from the service.
"""
table = _table_arg_to_table(table, default_project=self.project)
path = "/projects/%s/datasets/%s/tables" % (table.project, table.dataset_id)
data = table.to_api_repr()
try:
api_response = self._call_api(retry, method="POST", path=path, data=data)
return Table.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_table(table.reference, retry=retry) | 0.002516 |
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute.
"""
path = os.path.normcase(os.path.abspath(path))
if _drive_re.match(path):
path = path[0] + '|' + path[2:]
url = urllib.quote(path)
url = url.replace(os.path.sep, '/')
url = url.lstrip('/')
return 'file:///' + url | 0.002857 |
def playlist_songs_add(
self,
songs,
playlist,
*,
after=None,
before=None,
index=None,
position=None
):
"""Add songs to a playlist.
Note:
* Provide no optional arguments to add to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to add to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
songs (list): A list of song dicts.
playlist (dict): A playlist dict.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
"""
playlist_songs = self.playlist_songs(playlist)
prev, next_ = get_ple_prev_next(
playlist_songs,
after=after,
before=before,
index=index,
position=position
)
songs_len = len(songs)
for i, song in enumerate(songs):
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
mutation = mc_calls.PlaylistEntriesBatch.create(
song_id, playlist['id'],
preceding_entry_id=prev.get('id'),
following_entry_id=next_.get('id')
)
response = self._call(mc_calls.PlaylistEntriesBatch, mutation)
result = response.body['mutate_response'][0]
# TODO: Proper exception on failure.
if result['response_code'] != 'OK':
break
if i < songs_len - 1:
while True:
prev = self.playlist_song(result['id'])
if prev:
break
return self.playlist(playlist['id'], include_songs=True) | 0.032822 |
def output_reference(self, name):
"""Return a reference to the given output for use in an input
of a next Step.
For a Step named `echo` that has an output called `echoed`, the
reference `echo/echoed` is returned.
Args:
name (str): the name of the Step output
Raises:
ValueError: The name provided is not a valid output name for this
Step.
"""
if name not in self.output_names:
raise ValueError('Invalid output "{}"'.format(name))
return Reference(step_name=self.name_in_workflow, output_name=name) | 0.00319 |
def init_file_mapping_store():
""" init_file_mapping_store: creates log to keep track of downloaded files
Args: None
Returns: None
"""
# Make storage directory for restore files if it doesn't already exist
path = os.path.join(RESTORE_DIRECTORY, FILE_STORE_LOCATION)
if not os.path.exists(path):
os.makedirs(path) | 0.002809 |
def _adjust_boundaries(self, boundary_indices, text_file, real_wave_mfcc, sync_root, force_aba_auto=False, leaf_level=False):
"""
Adjust boundaries as requested by the user.
Return the computed time map, that is,
a list of pairs ``[start_time, end_time]``,
of length equal to number of fragments + 2,
where the two extra elements are for
the HEAD (first) and TAIL (last).
"""
# boundary_indices contains the boundary indices in the all_mfcc of real_wave_mfcc
# starting with the (head-1st fragment) and ending with (-1th fragment-tail)
aba_parameters = self.task.configuration.aba_parameters()
if force_aba_auto:
self.log(u"Forced running algorithm: 'auto'")
aba_parameters["algorithm"] = (AdjustBoundaryAlgorithm.AUTO, [])
# note that the other aba settings (nonspeech and nozero)
# remain as specified by the user
self.log([u"ABA parameters: %s", aba_parameters])
aba = AdjustBoundaryAlgorithm(rconf=self.rconf, logger=self.logger)
aba.adjust(
aba_parameters=aba_parameters,
real_wave_mfcc=real_wave_mfcc,
boundary_indices=boundary_indices,
text_file=text_file,
allow_arbitrary_shift=leaf_level
)
aba.append_fragment_list_to_sync_root(sync_root=sync_root) | 0.003566 |
def B_(self):
r''':math:`\underline{B}=\langle V_{n+1},M_lAM_rU\rangle`.
This property is obtained from :math:`C` if the operator is
self-adjoint. Otherwise, the inner products have to be formed
explicitly.'''
(n_, n) = self.H.shape
ls = self.linear_system
if self._B_ is None or self._B_.shape[1] < n_:
# compute B_
if ls.self_adjoint:
self._B_ = self.C.T.conj()
if n_ > n:
self._B_ = numpy.r_[self._B_,
utils.inner(self.V[:, [-1]],
self.projection.AU,
ip_B=ls.ip_B)]
else:
self._B_ = utils.inner(self.V, self.projection.AU,
ip_B=ls.ip_B)
return self._B_ | 0.002205 |
def load_ini(self, ini_file):
""" Load the contents from the ini file
Args:
ini_file (str): The file from which the settings should be loaded
"""
if ini_file and not os.path.exists(ini_file):
self.log.critical(f"Settings file specified but not found. {ini_file}")
sys.exit(1)
if not ini_file:
ini_file = f"{self.cwd}/settings.ini"
if os.path.exists(ini_file):
config = configparser.RawConfigParser(allow_no_value=True)
config.read(ini_file)
for key, value in self.spec.items():
entry = None
if value['type'] == str:
entry = config.get("settings", option=key.lower(), fallback=None)
elif value['type'] == bool:
entry = config.getboolean("settings", option=key.lower(), fallback=None)
elif value['type'] == int:
entry = config.getint("settings", option=key.lower(), fallback=None)
elif value['type'] == float:
entry = config.getfloat("settings", option=key.lower(), fallback=None)
elif value['type'] in [list, dict]:
entries = config.get("settings", option=key.lower(), fallback=None)
if entries:
try:
entry = json.loads(entries)
except json.decoder.JSONDecodeError as _err: #pragma: no cover
self.log.critical(f"Error parsing json from ini file. {entries}")
sys.exit(1)
if entry is not None:
setattr(self, key.upper(), entry) | 0.006853 |
def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key):
"""Convert a PKCS1-encoded RSA private key to PKCS8."""
algorithm = RsaAlgorithmIdentifier()
algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID
pkcs8_key = PKCS8PrivateKey()
pkcs8_key["version"] = 0
pkcs8_key["privateKeyAlgorithm"] = algorithm
pkcs8_key["privateKey"] = pkcs1_key
return encoder.encode(pkcs8_key) | 0.002532 |
def _run_config_cmds(self, commands, server):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param server: Server endpoint on the Arista switch to be configured
"""
command_start = ['enable', 'configure']
command_end = ['exit']
full_command = command_start + commands + command_end
self._run_eos_cmds(full_command, server) | 0.003436 |
def os_function_mapper(map):
"""
When called with an open connection, this function uses the
conn.guess_os() function to guess the operating system
of the connected host.
It then uses the given map to look up a function name that corresponds
to the operating system, and calls it. Example::
def ios_xr(job, host, conn):
pass # Do something.
def junos(job, host, conn):
pass # Do something else.
def shell(job, host, conn):
pass # Do something else.
Exscript.util.start.quickrun('myhost', os_function_mapper(globals()))
An exception is raised if a matching function is not found in the map.
:type conn: Exscript.protocols.Protocol
:param conn: The open connection.
:type map: dict
:param map: A dictionary mapping operating system name to a function.
:type args: list
:param args: Passed on to the called function.
:type kwargs: dict
:param kwargs: Passed on to the called function.
:rtype: object
:return: The return value of the called function.
"""
def decorated(job, host, conn, *args, **kwargs):
os = conn.guess_os()
func = map.get(os)
if func is None:
raise Exception('No handler for %s found.' % os)
return func(job, host, conn, *args, **kwargs)
return decorated | 0.000729 |
def get_block_info(self, block):
"""
Args:
block: block number (eg: 223212)
block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa)
word "last" - this will always return the latest block
word "first" - this will always return the first block
Returns:
basic block data
"""
url = '{}/block/info/{}'.format(self._url, block)
return self.make_request(url) | 0.006024 |
def dump_emails(part):
"""Show the sent emails' tested parts, to aid in debugging."""
print("Sent emails:")
for email in mail.outbox:
print(getattr(email, part)) | 0.005495 |
def workerTypeLastModified(self, *args, **kwargs):
"""
Get Worker Type Last Modified Time
This method is provided to allow workers to see when they were
last modified. The value provided through UserData can be
compared against this value to see if changes have been made
If the worker type definition has not been changed, the date
should be identical as it is the same stored value.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["workerTypeLastModified"], *args, **kwargs) | 0.005666 |
def cmd_follow(self, args):
'''control following of vehicle'''
if len(args) < 2:
print("map follow 0|1")
return
follow = int(args[1])
self.map.set_follow(follow) | 0.009217 |
def parse_exchange(self):
"""Yield tuples of exchange compounds.
Each exchange compound is a tuple of compound, reaction ID, lower and
upper flux limits.
"""
if 'media' in self._model:
if 'exchange' in self._model:
raise ParseError('Both "media" and "exchange" are specified')
logger.warning(
'The "media" key is deprecated! Please use "exchange" instead:'
' https://psamm.readthedocs.io/en/stable/file_format.html')
exchange_list = self._model['media']
else:
exchange_list = self._model.get('exchange')
extracellular = self.extracellular_compartment
if exchange_list is not None:
if not isinstance(exchange_list, list):
raise ParseError('Expected "exchange" to be a list')
for exchange_compound in parse_exchange_list(
self._context, exchange_list, extracellular):
compound, reaction_id, lower, upper = exchange_compound
if compound.compartment is None:
compound = compound.in_compartment(extracellular)
yield compound, reaction_id, lower, upper | 0.001616 |
def select_rect(action, action_space, select_add, screen, screen2):
"""Select units within a rectangle."""
select = spatial(action, action_space).unit_selection_rect
out_rect = select.selection_screen_coord.add()
screen_rect = point.Rect(screen, screen2)
screen_rect.tl.assign_to(out_rect.p0)
screen_rect.br.assign_to(out_rect.p1)
select.selection_add = bool(select_add) | 0.020833 |
def get_uri_list(self, **kwargs):
"""
Returns a list of Uris to index
"""
index_status_filter = """
optional {{ ?s dcterm:modified ?modTime }} .
optional {{ ?s kds:esIndexTime ?time }} .
optional {{ ?s kds:esIndexError ?error }}
filter (
!(bound(?time)) ||
?time<?modTime ||
(bound(?error) && ?time < {idx_start_time}))
""".format(idx_start_time=self.idx_start_time.sparql)
items_query_template = """
SELECT DISTINCT ?s ?es_id
{{
VALUES ?rdftypes {{\n\t\t{rdf_types} }} .
?s a ?rdftypes .
BIND(SHA1(STR(?s)) as ?es_id) .
{status_filter}
}}
{order_by}
"""
status_filter = index_status_filter \
if not kwargs.get("no_status") else ""
order_by = kwargs.get("order_by", "")
sparql = items_query_template.format(
rdf_types="\n\t\t".join(self.rdf_types),
status_filter=status_filter,
order_by=order_by)
results = [(Uri(item['s']['value']), item['es_id']['value'],)
for item in self.tstore_conn.query(sparql=sparql)]
return results | 0.002206 |
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e)) | 0.006231 |
def InitFromNotification(self, notification, is_pending=False):
"""Initializes this object from an existing notification.
Args:
notification: A rdfvalues.flows.Notification object.
is_pending: Indicates whether the user has already seen this notification
or not.
Returns:
The current instance.
"""
self.timestamp = notification.timestamp
self.message = notification.message
self.subject = str(notification.subject)
self.is_pending = is_pending
reference_type_enum = ApiNotificationReference.Type
# Please see the comments to aff4_objects.GRRUser.Notify implementation
# for the details of notification.type format. Short summary:
# notification.type may be one of legacy values (i.e. "ViewObject") or
# have a format of "[legacy value]:[new-style notification type]", i.e.
# "ViewObject:TYPE_CLIENT_INTERROGATED".
legacy_type = None
if ":" in notification.type:
legacy_type, new_type = notification.type.split(":", 2)
self.notification_type = new_type
else:
legacy_type = notification.type
# TODO(user): refactor notifications, so that we send a meaningful
# notification from the start, so that we don't have to do the
# bridging/conversion/guessing here.
components = self._GetUrnComponents(notification)
if legacy_type == "Discovery":
self.reference.type = reference_type_enum.CLIENT
self.reference.client = ApiNotificationClientReference(
client_id=components[0])
elif legacy_type == "ViewObject":
if len(components) >= 2 and components[0] == "hunts":
self.reference.type = reference_type_enum.HUNT
self.reference.hunt.hunt_id = components[1]
elif len(components) >= 2 and components[0] == "cron":
self.reference.type = reference_type_enum.CRON
self.reference.cron.cron_job_id = components[1]
elif len(components) >= 3 and components[1] == "flows":
self.reference.type = reference_type_enum.FLOW
self.reference.flow.flow_id = components[2]
self.reference.flow.client_id = components[0]
elif len(components) == 1 and rdf_client.ClientURN.Validate(
components[0]):
self.reference.type = reference_type_enum.CLIENT
self.reference.client.client_id = components[0]
else:
if notification.subject:
path = notification.subject.Path()
for prefix in itervalues(rdf_paths.PathSpec.AFF4_PREFIXES):
part = "/%s%s" % (components[0], prefix)
if path.startswith(part):
self.reference.type = reference_type_enum.VFS
self.reference.vfs.client_id = components[0]
self.reference.vfs.vfs_path = (prefix +
path[len(part):]).lstrip("/")
break
if self.reference.type != reference_type_enum.VFS:
self.reference.type = reference_type_enum.UNKNOWN
self.reference.unknown.subject_urn = notification.subject
elif legacy_type == "FlowStatus":
if not components or not rdf_client.ClientURN.Validate(components[0]):
self.reference.type = reference_type_enum.UNKNOWN
self.reference.unknown.subject_urn = notification.subject
else:
self.reference.type = reference_type_enum.FLOW
self.reference.flow.flow_id = notification.source.Basename()
self.reference.flow.client_id = components[0]
# TODO(user): refactor GrantAccess notification so that we don't have
# to infer approval type from the URN.
elif legacy_type == "GrantAccess":
if rdf_client.ClientURN.Validate(components[1]):
self.reference.type = reference_type_enum.CLIENT_APPROVAL
self.reference.client_approval.client_id = components[1]
self.reference.client_approval.approval_id = components[-1]
self.reference.client_approval.username = components[-2]
elif components[1] == "hunts":
self.reference.type = reference_type_enum.HUNT_APPROVAL
self.reference.hunt_approval.hunt_id = components[2]
self.reference.hunt_approval.approval_id = components[-1]
self.reference.hunt_approval.username = components[-2]
elif components[1] == "cron":
self.reference.type = reference_type_enum.CRON_JOB_APPROVAL
self.reference.cron_job_approval.cron_job_id = components[2]
self.reference.cron_job_approval.approval_id = components[-1]
self.reference.cron_job_approval.username = components[-2]
else:
self.reference.type = reference_type_enum.UNKNOWN
self.reference.unknown.subject_urn = notification.subject
self.reference.unknown.source_urn = notification.source
return self | 0.005887 |
def _make_dynamic_magic(self,magic):
"""Return a function `fun` that will execute `magic` on active frontend.
Parameters
----------
magic : string
string that will be executed as is when the returned function is called
Returns
-------
fun : function
function with no parameters, when called will execute `magic` on the
current active frontend at call time
See Also
--------
populate_all_magic_menu : generate the "All Magics..." menu
Notes
-----
`fun` executes `magic` in active frontend at the moment it is triggered,
not the active frontend at the moment it was created.
This function is mostly used to create the "All Magics..." Menu at run time.
"""
# need two level nested function to be sure to pass magic
# to active frontend **at run time**.
def inner_dynamic_magic():
self.active_frontend.execute(magic)
inner_dynamic_magic.__name__ = "dynamics_magic_s"
return inner_dynamic_magic | 0.007207 |
def _cache_credentials(self, source, credentials, connect=False):
"""Save a set of authentication credentials.
The credentials are used to login a socket whenever one is created.
If `connect` is True, verify the credentials on the server first.
"""
# Don't let other threads affect this call's data.
all_credentials = self.__all_credentials.copy()
if source in all_credentials:
# Nothing to do if we already have these credentials.
if credentials == all_credentials[source]:
return
raise OperationFailure('Another user is already authenticated '
'to this database. You must logout first.')
if connect:
server = self._get_topology().select_server(
writable_preferred_server_selector)
# get_socket() logs out of the database if logged in with old
# credentials, and logs in with new ones.
with server.get_socket(all_credentials) as sock_info:
sock_info.authenticate(credentials)
# If several threads run _cache_credentials at once, last one wins.
self.__all_credentials[source] = credentials | 0.001612 |
def _request(self, method, url, headers=None, params=None, **aio_kwargs):
"""Setup Authorization Header.."""
access_token = params.pop(self.access_token_key, None)
if access_token:
headers['Authorization'] = "Bearer %s" % access_token
return super(DiscordClient, self)._request(
method, url, headers=headers, params=params, **aio_kwargs) | 0.005089 |
def to(self, unit):
"""
convert between units. Returns a new nparray object with the new units
"""
if not _has_astropy:
raise ImportError("astropy must be installed for unit/quantity support")
if self.unit is None:
raise ValueError("no units currently set")
if not is_unit_or_unitstring(unit)[0]:
raise ValueError("unit not recognized")
mult_factor = self.unit.to(unit)
copy = self.copy() * mult_factor
copy.unit = unit
return copy | 0.005445 |
def auto_migrate_storage_system(*, persistent_storage_system=None, new_persistent_storage_system=None, data_item_uuids=None, deletions: typing.List[uuid.UUID] = None, utilized_deletions: typing.Set[uuid.UUID] = None, ignore_older_files: bool = True):
"""Migrate items from the storage system to the object context.
Files in data_item_uuids have already been loaded and are ignored (not migrated).
Files in deletes have been deleted in object context and are ignored (not migrated) and then added
to the utilized deletions list.
Data items will have persistent_object_context set upon return, but caller will need to call finish_reading
on each of the data items.
"""
storage_handlers = persistent_storage_system.find_data_items()
ReaderInfo = collections.namedtuple("ReaderInfo", ["properties", "changed_ref", "large_format", "storage_handler", "identifier"])
reader_info_list = list()
for storage_handler in storage_handlers:
try:
large_format = isinstance(storage_handler, HDF5Handler.HDF5Handler)
properties = Migration.transform_to_latest(storage_handler.read_properties())
reader_info = ReaderInfo(properties, [False], large_format, storage_handler, storage_handler.reference)
reader_info_list.append(reader_info)
except Exception as e:
logging.debug("Error reading %s", storage_handler.reference)
import traceback
traceback.print_exc()
traceback.print_stack()
library_storage_properties = persistent_storage_system.library_storage_properties
for deletion in copy.deepcopy(library_storage_properties.get("data_item_deletions", list())):
if not deletion in deletions:
deletions.append(deletion)
preliminary_library_updates = dict()
library_updates = dict()
if not ignore_older_files:
Migration.migrate_to_latest(reader_info_list, preliminary_library_updates)
good_reader_info_list = list()
count = len(reader_info_list)
for index, reader_info in enumerate(reader_info_list):
storage_handler = reader_info.storage_handler
properties = reader_info.properties
try:
version = properties.get("version", 0)
if version == DataItem.DataItem.writer_version:
data_item_uuid = uuid.UUID(properties["uuid"])
if not data_item_uuid in data_item_uuids:
if str(data_item_uuid) in deletions:
utilized_deletions.add(data_item_uuid)
else:
auto_migrate_data_item(reader_info, persistent_storage_system, new_persistent_storage_system, index, count)
good_reader_info_list.append(reader_info)
data_item_uuids.add(data_item_uuid)
library_update = preliminary_library_updates.get(data_item_uuid)
if library_update:
library_updates[data_item_uuid] = library_update
except Exception as e:
logging.debug("Error reading %s", storage_handler.reference)
import traceback
traceback.print_exc()
traceback.print_stack()
return good_reader_info_list, library_updates | 0.004537 |
def email(self, domains: Union[tuple, list] = None) -> str:
"""Generate a random email.
:param domains: List of custom domains for emails.
:type domains: list or tuple
:return: Email address.
:Example:
[email protected]
"""
if not domains:
domains = EMAIL_DOMAINS
domain = self.random.choice(domains)
name = self.username(template='ld')
return '{name}{domain}'.format(
name=name,
domain=domain,
) | 0.003724 |
def create_turnover_tear_sheet(factor_data, turnover_periods=None):
"""
Creates a tear sheet for analyzing the turnover properties of a factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
turnover_periods : sequence[string], optional
Periods to compute turnover analysis on. By default periods in
'factor_data' are used but custom periods can provided instead. This
can be useful when periods in 'factor_data' are not multiples of the
frequency at which factor values are computed i.e. the periods
are 2h and 4h and the factor is computed daily and so values like
['1D', '2D'] could be used instead
"""
if turnover_periods is None:
turnover_periods = utils.get_forward_returns_columns(
factor_data.columns)
quantile_factor = factor_data['factor_quantile']
quantile_turnover = \
{p: pd.concat([perf.quantile_turnover(quantile_factor, q, p)
for q in range(1, int(quantile_factor.max()) + 1)],
axis=1)
for p in turnover_periods}
autocorrelation = pd.concat(
[perf.factor_rank_autocorrelation(factor_data, period) for period in
turnover_periods], axis=1)
plotting.plot_turnover_table(autocorrelation, quantile_turnover)
fr_cols = len(turnover_periods)
columns_wide = 1
rows_when_wide = (((fr_cols - 1) // 1) + 1)
vertical_sections = fr_cols + 3 * rows_when_wide + 2 * fr_cols
gf = GridFigure(rows=vertical_sections, cols=columns_wide)
for period in turnover_periods:
if quantile_turnover[period].isnull().all().all():
continue
plotting.plot_top_bottom_quantile_turnover(quantile_turnover[period],
period=period,
ax=gf.next_row())
for period in autocorrelation:
if autocorrelation[period].isnull().all():
continue
plotting.plot_factor_rank_auto_correlation(autocorrelation[period],
period=period,
ax=gf.next_row())
plt.show()
gf.close() | 0.000382 |
def expand_include(filename):
"""
Expand the content of a file into a string.
If @include directives are found in the config, they are expanded by
this function. In case recursion is detected or a RunTimeError is
thrown, ``None`` is returned.
"""
open_files = set()
def _expand_include_rec(filename):
if filename in open_files:
raise RuntimeError('Recursive include statement detected for '
'file: ' + filename)
else:
open_files.add(filename)
with open(filename) as open_file:
for line in open_file:
line_stripped = line.strip().replace("//", "#")
if line_stripped.startswith('@include '):
inc_to_clean = line_stripped.split(None, 1)[1]
inc_filename = inc_to_clean.replace('"'," ").strip()
for included_line in _expand_include_rec(inc_filename):
yield included_line
else:
yield line
open_files.remove(filename)
try:
lines = []
for line in _expand_include_rec(filename):
lines.append(line)
return ''.join(lines)
except RuntimeError:
return None | 0.002132 |
def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
if add_newaxis_on_deficient_ndim and ndim == 1 and np.isscalar(val):
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val | 0.001637 |
def schemaValidCtxtGetParserCtxt(self):
"""allow access to the parser context of the schema validation
context """
ret = libxml2mod.xmlSchemaValidCtxtGetParserCtxt(self._o)
if ret is None:raise parserError('xmlSchemaValidCtxtGetParserCtxt() failed')
__tmp = parserCtxt(_obj=ret)
return __tmp | 0.01462 |
def read_history_file(self, filename=None):
u'''Load a readline history file. The default filename is ~/.history.'''
if filename is None:
filename = self.mode._history.history_filename
log(u"read_history_file from %s"%ensure_unicode(filename))
self.mode._history.read_history_file(filename) | 0.014925 |
def balanceSheetDF(symbol, token='', version=''):
'''Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years)
https://iexcloud.io/docs/api/#balance-sheet
Updates at 8am, 9am UTC daily
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
val = balanceSheet(symbol, token, version)
df = pd.io.json.json_normalize(val, 'balancesheet', 'symbol')
_toDatetime(df)
_reindex(df, 'reportDate')
return df | 0.003466 |
async def delete_cas(self, key, *, index):
"""Deletes the Key with check-and-set semantics.
Parameters:
key (str): Key to delete
index (ObjectIndex): Index ID
Response:
bool: ``True`` on success
The Key will only be deleted if its current modify index matches the
supplied Index.
"""
index = extract_attr(index, keys=["ModifyIndex", "Index"])
response = await self._discard(key, cas=index)
return response.body is True | 0.003788 |
def _match_registers(self, query):
"""Tries to match in status registers
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
if query in self._status_registers:
register = self._status_registers[query]
response = register.value
logger.debug('Found response in status register: %s',
repr(response))
register.clear()
return response | 0.00369 |
def violinplot(x=None, y=None, data=None, bw=0.2, scale='width',
inner=None, ax=None, **kwargs):
"""Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data
What's different:
- bw = 0.2: Sets bandwidth to be small and the same between datasets
- scale = 'width': Sets the width of all violinplots to be the same
- inner = None: Don't plot a boxplot or points inside the violinplot
"""
if ax is None:
ax = plt.gca()
sns.violinplot(x, y, data=data, bw=bw, scale=scale, inner=inner, ax=ax,
**kwargs)
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1))
return ax | 0.00155 |
def hostcmd_push(base_path, project_name, engine_name, vars_files=None, config_file=None, **kwargs):
"""
Push images to a registry. Requires authenticating with the registry prior to starting
the push. If your engine's config file does not already contain an authorization for the
registry, pass username and/or password. If you exclude password, you will be prompted.
"""
assert_initialized(base_path, config_file)
config = get_config(base_path, vars_files=vars_files, engine_name=engine_name, project_name=project_name,
config_file=config_file)
engine_obj = load_engine(['LOGIN', 'PUSH'],
engine_name, config.project_name,
config['services'], **kwargs)
logger.debug('PROJECT NAME', project_name=config.project_name)
push_images(base_path,
config.image_namespace,
engine_obj,
config,
save_conductor=config.save_conductor,
**kwargs) | 0.005797 |
def extract_packet(self):
"""
Extract packet from buffer
"""
packet_size = velbus.MINIMUM_MESSAGE_SIZE + \
(self.buffer[3] & 0x0F)
packet = self.buffer[0:packet_size]
return packet | 0.008333 |
def terminate_all(self):
"""Terminate all worker processes."""
for worker in self._workers:
worker.terminate()
# for thread in self._threads:
# try:
# thread.terminate()
# thread.wait()
# except Exception:
# pass
self._queue_workers = deque() | 0.005571 |
def _get_ignored_version():
"""
:return: Most recently ignored API version
"""
if os.path.exists(filepath):
with open(filepath) as data_file:
data = json.load(data_file)
version = data.get('version')
else:
version = None
return version | 0.003344 |
def remove_check(self, func, *, call_once=False):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass | 0.004545 |
def is_noncopyable(class_, already_visited_cls_vars=None):
"""
Checks if class is non copyable
Args:
class_ (declarations.class_t): the class to be checked
already_visited_cls_vars (list): optional list of vars that should not
be checked a second time, to prevent infinite recursions.
In general you can ignore this argument, it is mainly used during
recursive calls of is_noncopyable() done by pygccxml.
Returns:
bool: if the class is non copyable
"""
logger = utils.loggers.cxx_parser
class_decl = class_traits.get_declaration(class_)
true_header = "is_noncopyable(TRUE) - %s - " % class_.decl_string
if is_union(class_):
return False
if class_decl.is_abstract:
logger.debug(true_header + "abstract client")
return True
# if class has public, user defined copy constructor, than this class is
# copyable
copy_ = find_copy_constructor(class_decl)
if copy_ and copy_.access_type == 'public' and not copy_.is_artificial:
return False
if already_visited_cls_vars is None:
already_visited_cls_vars = []
for base_desc in class_decl.recursive_bases:
assert isinstance(base_desc, class_declaration.hierarchy_info_t)
if base_desc.related_class.decl_string in \
('::boost::noncopyable', '::boost::noncopyable_::noncopyable'):
logger.debug(true_header + "derives from boost::noncopyable")
return True
if not has_copy_constructor(base_desc.related_class):
base_copy_ = find_copy_constructor(base_desc.related_class)
if base_copy_ and base_copy_.access_type == 'private':
logger.debug(
true_header +
"there is private copy constructor")
return True
elif __is_noncopyable_single(
base_desc.related_class, already_visited_cls_vars):
logger.debug(
true_header +
"__is_noncopyable_single returned True")
return True
if __is_noncopyable_single(
base_desc.related_class, already_visited_cls_vars):
logger.debug(
true_header +
"__is_noncopyable_single returned True")
return True
if not has_copy_constructor(class_decl):
logger.debug(true_header + "does not have trivial copy constructor")
return True
elif not has_public_constructor(class_decl):
logger.debug(true_header + "does not have a public constructor")
return True
elif has_destructor(class_decl) and not has_public_destructor(class_decl):
logger.debug(true_header + "has private destructor")
return True
return __is_noncopyable_single(class_decl, already_visited_cls_vars) | 0.000345 |
def check_installation(cur_file):
"""Warn user if running cleverhans from a different directory than tutorial."""
cur_dir = os.path.split(os.path.dirname(os.path.abspath(cur_file)))[0]
ch_dir = os.path.split(cleverhans.__path__[0])[0]
if cur_dir != ch_dir:
warnings.warn("It appears that you have at least two versions of "
"cleverhans installed, one at %s and one at"
" %s. You are running the tutorial script from the "
"former but python imported the library module from the "
"latter. This may cause errors, for example if the tutorial"
" version is newer than the library version and attempts to"
" call new features." % (cur_dir, ch_dir)) | 0.007853 |
def coordinates_to_array(
log,
ra,
dec):
"""*Convert a single value RA, DEC or list of RA and DEC to numpy arrays*
**Key Arguments:**
- ``ra`` -- list, numpy array or single ra value
- ``dec`` --list, numpy array or single dec value
- ``log`` -- logger
**Return:**
- ``raArray`` -- input RAs as a numpy array of decimal degree values
- ``decArray`` -- input DECs as a numpy array of decimal degree values
**Usage:**
.. todo::
add usage info
create a sublime snippet for usage
.. code-block:: python
ra, dec = coordinates_to_array(
log=log,
ra=ra,
dec=dec
)
"""
log.info('starting the ``coordinates_to_array`` function')
if isinstance(ra, np.ndarray) and isinstance(dec, np.ndarray):
return ra, dec
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=log
)
# CONVERT RA AND DEC TO NUMPY ARRAYS
if isinstance(ra, float):
pass
elif isinstance(ra, str):
try:
ra = float(ra)
except:
ra = converter.ra_sexegesimal_to_decimal(ra=ra)
elif isinstance(ra, list):
try:
ra = np.array(ra).astype(np.float)
except:
raList = []
raList[:] = [converter.ra_sexegesimal_to_decimal(ra=r) for r in ra]
ra = raList
if isinstance(dec, float):
pass
elif isinstance(dec, str):
try:
dec = float(dec)
except:
dec = converter.dec_sexegesimal_to_decimal(dec=dec)
elif isinstance(dec, list):
try:
dec = np.array(dec).astype(np.float)
except:
decList = []
decList[:] = [
converter.dec_sexegesimal_to_decimal(dec=d) for d in dec]
dec = decList
raArray = np.array(ra, dtype='f8', ndmin=1, copy=False)
decArray = np.array(dec, dtype='f8', ndmin=1, copy=False)
log.info('completed the ``coordinates_to_array`` function')
return raArray, decArray | 0.002323 |
def target_socket(self, config):
""" This method overrides :meth:`.WNetworkNativeTransport.target_socket` method. Do the same thing as
basic method do, but also checks that the result address is IPv4 multicast address.
:param config: beacon configuration
:return: WIPV4SocketInfo
"""
target = WUDPNetworkNativeTransport.target_socket(self, config)
if WNetworkIPV4.is_multicast(target.address()) is False:
raise ValueError('IP multicast address not RFC compliant')
return target | 0.026157 |
def customtype(self):
"""If this variable is a user-derivedy type, return the CustomType instance that
is its kind.
"""
result = None
if self.is_custom:
#Look for the module that declares this variable's kind in its public list.
self.dependency()
if self._kind_module is not None:
if self.kind.lower() in self._kind_module.types:
result = self._kind_module.types[self.kind.lower()]
return result | 0.00969 |
def to_rio(self):
"""Converts the colormap to a rasterio colormap.
"""
self.colors = (((self.colors * 1.0 - self.colors.min()) /
(self.colors.max() - self.colors.min())) * 255)
return dict(zip(self.values, tuple(map(tuple, self.colors)))) | 0.006803 |
def set_python(self, value):
"""Validate using cursor for consistency between direct set of values vs modification of cursor values"""
if not isinstance(value, (list, type(None))):
raise ValidationError(
self.record,
"Field '{}' must be set to a list, not '{}'".format(
self.name,
value.__class__
)
)
value = value or []
self.cursor._validate_list(value)
return super(ListField, self).set_python(value) | 0.005415 |
def add_fields(self, log_record, record, message_dict):
"""
Override this method to implement custom logic for adding fields.
"""
for field in self._required_fields:
log_record[field] = record.__dict__.get(field)
log_record.update(message_dict)
merge_record_extra(record, log_record, reserved=self._skip_fields)
if self.timestamp:
key = self.timestamp if type(self.timestamp) == str else 'timestamp'
log_record[key] = datetime.utcnow() | 0.005682 |
def n_executions(self):
"""
Queries and returns the number of past task executions.
"""
pipeline = self.tiger.connection.pipeline()
pipeline.exists(self.tiger._key('task', self.id))
pipeline.llen(self.tiger._key('task', self.id, 'executions'))
exists, n_executions = pipeline.execute()
if not exists:
raise TaskNotFound('Task {} not found.'.format(
self.id
))
return n_executions | 0.004073 |
def loop_until_closed(self, suppress_warning=False):
''' Execute a blocking loop that runs and executes event callbacks
until the connection is closed (e.g. by hitting Ctrl-C).
While this method can be used to run Bokeh application code "outside"
the Bokeh server, this practice is HIGHLY DISCOURAGED for any real
use case. This function is intented to facilitate testing ONLY.
'''
suppress_warning # shut up flake
from bokeh.util.deprecation import deprecated
deprecated("ClientSession.loop_until_closed is deprecated, and will be removed in an eventual 2.0 release. "
"Run Bokeh applications directly on a Bokeh server instead. See:\n\n"
" https//docs.bokeh.org/en/latest/docs/user_guide/server.html\n")
self._connection.loop_until_closed() | 0.006912 |
def head(self, url):
'''head request, typically used for status code retrieval, etc.
'''
bot.debug('HEAD %s' %url)
return self._call(url, func=requests.head) | 0.011561 |
def check_valid_ip_or_cidr(val, return_as_cidr=False):
"""
Checks that the value is a valid IP address or a valid CIDR.
Returns the specified value.
If 'return_as_cidr' is set then the return value will always be in the form
of a CIDR, even if a plain IP address was specified.
"""
is_ip = True
if "/" in val:
ip_check(val, netmask_expected=True)
is_ip = False
else:
ip_check(val, netmask_expected=False)
if return_as_cidr and is_ip:
# Convert a plain IP to a CIDR
if val == "0.0.0.0":
# Special case for the default route
val = "0.0.0.0/0"
else:
val = "%s/32" % val
try:
ipaddress.IPv4Network(unicode(val))
except Exception as e:
raise ArgsError("Not a valid network: %s" % str(e))
return val | 0.001175 |
def mass_3d(self, R, Rs, rho0, r_core):
"""
:param R: projected distance
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
"""
Rs = float(Rs)
b = r_core * Rs ** -1
c = R * Rs ** -1
M0 = 4*np.pi*Rs**3 * rho0
return M0 * (1+b**2) ** -1 * (0.5*np.log(1+c**2) + b**2*np.log(c*b**-1 + 1) - b*np.arctan(c)) | 0.007042 |
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores | 0.012048 |
def solveConsMarkov(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
MrkvArray,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period consumption-saving problem with risky income and
stochastic transitions between discrete states, in a Markov fashion. Has
identical inputs as solveConsIndShock, except for a discrete
Markov transitionrule MrkvArray. Markov states can differ in their interest
factor, permanent growth factor, and income distribution, so the inputs Rfree,
PermGroFac, and IncomeDstn are arrays or lists specifying those values in each
(succeeding) Markov state.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn_list : [[np.array]]
A length N list of income distributions in each succeeding Markov
state. Each income distribution contains three arrays of floats,
representing a discrete approximation to the income process at the
beginning of the succeeding period. Order: event probabilities,
permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree_list : np.array
Risk free interest factor on end-of-period assets for each Markov
state in the succeeding period.
PermGroGac_list : float
Expected permanent income growth factor at the end of this period
for each Markov state in the succeeding period.
MrkvArray : numpy.array
An NxN array representing a Markov transition matrix between discrete
states. The i,j-th element of MrkvArray is the probability of
moving from state i in period t to state j in period t+1.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marg-
inal value function vPfunc, a minimum acceptable level of normalized
market resources mNrmMin, normalized human wealth hNrm, and bounding
MPCs MPCmin and MPCmax. It might also have a value function vFunc
and marginal marginal value function vPPfunc. All of these attributes
are lists or arrays, with elements corresponding to the current
Markov state. E.g. solution.cFunc[0] is the consumption function
when in the i=0 Markov state this period.
'''
solver = ConsMarkovSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,MrkvArray,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
solution_now = solver.solve()
return solution_now | 0.008 |
def get_dict_from_response(response):
"""Check for errors in the response and return the resulting JSON."""
if getattr(response, '_resp') and response._resp.code > 400:
raise OAuthResponseError(
'Application mis-configuration in Globus', None, response
)
return response.data | 0.003086 |
def sse(mean, estimator):
"""
Description:
Calculates the Sum of Squared Errors (SSE) of
an estimation on flat numpy ndarrays.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
"""
return np.sum((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0) | 0.00271 |
def get_project_details(self, project):
""" Get the project details. """
result = self.get_project(project.pid)
if result is None:
result = {}
return result | 0.01 |
def find(cls, session, resource_id, include=None):
"""Retrieve a single resource.
This should only be called from sub-classes.
Args:
session(Session): The session to find the resource in
resource_id: The ``id`` for the resource to look up
Keyword Args:
include: Resource classes to include
Returns:
Resource: An instance of a resource, or throws a
:class:`NotFoundError` if the resource can not be found.
"""
url = session._build_url(cls._resource_path(), resource_id)
params = build_request_include(include, None)
process = cls._mk_one(session, include=include)
return session.get(url, CB.json(200, process), params=params) | 0.002581 |
def __args_check(self, envelope, target, modification_code):
""" Method checks arguments, that are specified to the
:meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode`
methods
:param envelope: same as envelope in :meth:`.WMessengerFixedModificationLayer.encode` and \
:meth:`.WMessengerFixedModificationLayer.decode` methods
:param target: same as target in :meth:`.WMessengerFixedModificationLayer.encode` and \
:meth:`.WMessengerFixedModificationLayer.decode` methods
:param modification_code: same as modification_code in \
:meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` \
methods
:return: None
"""
if target is None:
raise RuntimeError('"target" argument must be specified for this layer')
if modification_code is None:
raise RuntimeError('"modification_code" argument must be specified for this layer')
if isinstance(target, WMessengerFixedModificationLayer.Target) is False:
raise TypeError('Invalid "target" argument')
if isinstance(envelope, WMessengerTextEnvelope) is True:
if isinstance(modification_code, str) is False:
raise TypeError('Invalid "modification_code" argument for specified envelope')
elif isinstance(modification_code, bytes) is False:
raise TypeError('Invalid "modification_code" argument for specified envelope') | 0.022825 |
def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd | 0.002217 |
def register_processes(self, process_schemas, user, force=False, verbosity=1):
"""Read and register processors."""
log_processors = []
log_templates = []
for p in process_schemas:
# TODO: Remove this when all processes are migrated to the
# new syntax.
if 'flow_collection' in p:
if 'entity' in p:
self.stderr.write(
"Skip processor {}: only one of 'flow_collection' and 'entity' fields "
"allowed".format(p['slug'])
)
continue
p['entity'] = {'type': p.pop('flow_collection')}
if p['type'][-1] != ':':
p['type'] += ':'
if 'category' in p and not p['category'].endswith(':'):
p['category'] += ':'
for field in ['input', 'output']:
for schema, _, _ in iterate_schema({}, p[field] if field in p else {}):
if not schema['type'][-1].endswith(':'):
schema['type'] += ':'
# TODO: Check if schemas validate with our JSON meta schema and Processor model docs.
if not self.valid(p, PROCESSOR_SCHEMA):
continue
if 'entity' in p:
if 'type' not in p['entity']:
self.stderr.write(
"Skip process {}: 'entity.type' required if 'entity' defined".format(p['slug'])
)
continue
p['entity_type'] = p['entity']['type']
p['entity_descriptor_schema'] = p['entity'].get('descriptor_schema', p['entity_type'])
p['entity_input'] = p['entity'].get('input', None)
p.pop('entity')
if not DescriptorSchema.objects.filter(slug=p['entity_descriptor_schema']).exists():
self.stderr.write(
"Skip processor {}: Unknown descriptor schema '{}' used in 'entity' "
"field.".format(p['slug'], p['entity_descriptor_schema'])
)
continue
if 'persistence' in p:
persistence_mapping = {
'RAW': Process.PERSISTENCE_RAW,
'CACHED': Process.PERSISTENCE_CACHED,
'TEMP': Process.PERSISTENCE_TEMP,
}
p['persistence'] = persistence_mapping[p['persistence']]
if 'scheduling_class' in p:
scheduling_class_mapping = {
'interactive': Process.SCHEDULING_CLASS_INTERACTIVE,
'batch': Process.SCHEDULING_CLASS_BATCH
}
p['scheduling_class'] = scheduling_class_mapping[p['scheduling_class']]
if 'input' in p:
p['input_schema'] = p.pop('input')
if 'output' in p:
p['output_schema'] = p.pop('output')
slug = p['slug']
if 'run' in p:
# Set default language to 'bash' if not set.
p['run'].setdefault('language', 'bash')
# Transform output schema using the execution engine.
try:
execution_engine = manager.get_execution_engine(p['run']['language'])
extra_output_schema = execution_engine.get_output_schema(p)
if extra_output_schema:
p.setdefault('output_schema', []).extend(extra_output_schema)
except InvalidEngineError:
self.stderr.write("Skip processor {}: execution engine '{}' not supported".format(
slug, p['run']['language']
))
continue
# Validate if container image is allowed based on the configured pattern.
# NOTE: This validation happens here and is not deferred to executors because the idea
# is that this will be moved to a "container" requirement independent of the
# executor.
if hasattr(settings, 'FLOW_CONTAINER_VALIDATE_IMAGE'):
try:
container_image = dict_dot(p, 'requirements.executor.docker.image')
if not re.match(settings.FLOW_CONTAINER_VALIDATE_IMAGE, container_image):
self.stderr.write("Skip processor {}: container image does not match '{}'".format(
slug, settings.FLOW_CONTAINER_VALIDATE_IMAGE,
))
continue
except KeyError:
pass
version = p['version']
int_version = convert_version_string_to_int(version, VERSION_NUMBER_BITS)
# `latest version` is returned as `int` so it has to be compared to `int_version`
latest_version = Process.objects.filter(slug=slug).aggregate(Max('version'))['version__max']
if latest_version is not None and latest_version > int_version:
self.stderr.write("Skip processor {}: newer version installed".format(slug))
continue
previous_process_qs = Process.objects.filter(slug=slug)
if previous_process_qs.exists():
previous_process = previous_process_qs.latest()
else:
previous_process = None
process_query = Process.objects.filter(slug=slug, version=version)
if process_query.exists():
if not force:
if verbosity > 0:
self.stdout.write("Skip processor {}: same version installed".format(slug))
continue
process_query.update(**p)
log_processors.append("Updated {}".format(slug))
else:
process = Process.objects.create(contributor=user, **p)
assign_contributor_permissions(process)
if previous_process:
copy_permissions(previous_process, process)
log_processors.append("Inserted {}".format(slug))
if verbosity > 0:
if log_processors:
self.stdout.write("Processor Updates:")
for log in log_processors:
self.stdout.write(" {}".format(log))
if log_templates:
self.stdout.write("Default Template Updates:")
for log in log_templates:
self.stdout.write(" {}".format(log)) | 0.003787 |
def sed(self, photon_energy, distance=1 * u.kpc, seed=None):
"""Spectral energy distribution at a given distance from the source
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
"""
sed = super(InverseCompton, self).sed(photon_energy, distance=distance)
if seed is not None:
if distance != 0:
out_unit = "erg/(cm2 s)"
else:
out_unit = "erg/s"
sed = (
self.flux(photon_energy, distance=distance, seed=seed)
* photon_energy ** 2.0
).to(out_unit)
return sed | 0.001797 |
def update(self, **kwargs):
'''Update the attributes of a JObject.
Parameters
----------
kwargs
Keyword arguments of the form `attribute=new_value`
Examples
--------
>>> J = jams.JObject(foo=5)
>>> J.dumps()
'{"foo": 5}'
>>> J.update(bar='baz')
>>> J.dumps()
'{"foo": 5, "bar": "baz"}'
'''
for name, value in six.iteritems(kwargs):
setattr(self, name, value) | 0.004032 |
def run_init_tables(*args):
'''
Run to init tables.
'''
print('--')
create_table(TabPost)
create_table(TabTag)
create_table(TabMember)
create_table(TabWiki)
create_table(TabLink)
create_table(TabEntity)
create_table(TabPostHist)
create_table(TabWikiHist)
create_table(TabCollect)
create_table(TabPost2Tag)
create_table(TabRel)
create_table(TabEvaluation)
create_table(TabUsage)
create_table(TabReply)
create_table(TabUser2Reply)
create_table(TabRating)
create_table(TabEntity2User)
create_table(TabLog) | 0.001692 |
def is_lock_pending(
end_state: NettingChannelEndState,
secrethash: SecretHash,
) -> bool:
"""True if the `secrethash` corresponds to a lock that is pending to be claimed
and didn't expire.
"""
return (
secrethash in end_state.secrethashes_to_lockedlocks or
secrethash in end_state.secrethashes_to_unlockedlocks or
secrethash in end_state.secrethashes_to_onchain_unlockedlocks
) | 0.004566 |
def _sum_by_samples(seqs_freq, samples_order):
"""
Sum sequences of a metacluster by samples.
"""
n = len(seqs_freq[seqs_freq.keys()[0]].freq.keys())
y = np.array([0] * n)
for s in seqs_freq:
x = seqs_freq[s].freq
exp = [seqs_freq[s].freq[sam] for sam in samples_order]
y = list(np.array(exp) + y)
return y | 0.002793 |
def spacing(self, spacing):
"""Set the spacing in each axial direction. Pass a length three tuple of
floats"""
dx, dy, dz = spacing[0], spacing[1], spacing[2]
self.SetSpacing(dx, dy, dz)
self.Modified() | 0.012397 |
async def deserialize(data: dict):
"""
Builds a Proof object with defined attributes.
Attributes are provided by a previous call to the serialize function.
:param data:
Example:
name = "proof name"
requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}]
proof = await Proof.create(source_id, name, requested_attrs)
data = proof.serialize()
proof2 = await Proof.deserialize(data)
:return: Proof Object
"""
return await Proof._deserialize("vcx_proof_deserialize",
json.dumps(data),
data.get('data').get('source_id')) | 0.001696 |
def split_all(reference, sep):
"""
Splits a given string at a given separator or list of separators.
:param reference: The reference to split.
:param sep: Separator string or list of separator strings.
:return: A list of split strings
"""
parts = partition_all(reference, sep)
return [p for p in parts if p not in sep] | 0.002849 |
def create_or_update_group(self, name, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create or update LDAP group policies.
Supported methods:
POST: /auth/{mount_point}/groups/{name}. Produces: 204 (empty body)
:param name: The name of the LDAP group
:type name: str | unicode
:param policies: List of policies associated with the group. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_group request.
:rtype: requests.Response
"""
if policies is None:
policies = []
if not isinstance(policies, list):
error_msg = '"policies" argument must be an instance of list or None, "{policies_type}" provided.'.format(
policies_type=type(policies),
)
raise exceptions.ParamValidationError(error_msg)
params = {
'policies': ','.join(policies),
}
api_path = '/v1/auth/{mount_point}/groups/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
) | 0.003549 |
async def _run(self, data_id, script):
"""Execute the script and save results."""
self.data_id = data_id
# Fetch data instance to get any executor requirements.
self.process = PROCESS
requirements = self.process['requirements']
self.requirements = requirements.get('executor', {}).get(self.name, {}) # pylint: disable=no-member
self.resources = requirements.get('resources', {})
logger.debug("Preparing output files for Data with id {}".format(data_id))
os.chdir(EXECUTOR_SETTINGS['DATA_DIR'])
try:
log_file = self._create_file('stdout.txt')
json_file = self._create_file('jsonout.txt')
except FileExistsError:
logger.error("Stdout or jsonout out file already exists.")
# Looks like executor was already ran for this Data object,
# so don't raise the error to prevent setting status to error.
await self._send_manager_command(ExecutorProtocol.ABORT, expect_reply=False)
return
proc_pid = await self.start()
await self.update_data_status(
status=DATA_META['STATUS_PROCESSING'],
process_pid=proc_pid
)
# Run process and handle intermediate results
logger.info("Running program for Data with id {}".format(data_id))
logger.debug("The program for Data with id {} is: \n{}".format(data_id, script))
await self.run_script(script)
spawn_processes = []
output = {}
process_error, process_warning, process_info = [], [], []
process_progress, process_rc = 0, 0
# read process output
try:
stdout = self.get_stdout()
while True:
line = await stdout.readline()
logger.debug("Process's output: {}".format(line.strip()))
if not line:
break
line = line.decode('utf-8')
try:
if line.strip().startswith('run'):
# Save process and spawn if no errors
log_file.write(line)
log_file.flush()
for obj in iterjson(line[3:].strip()):
spawn_processes.append(obj)
elif line.strip().startswith('export'):
file_name = line[6:].strip()
export_folder = SETTINGS['FLOW_EXECUTOR']['UPLOAD_DIR']
unique_name = 'export_{}'.format(uuid.uuid4().hex)
export_path = os.path.join(export_folder, unique_name)
self.exported_files_mapper[self.data_id][file_name] = unique_name
shutil.move(file_name, export_path)
else:
# If JSON, save to MongoDB
updates = {}
for obj in iterjson(line):
for key, val in obj.items():
if key.startswith('proc.'):
if key == 'proc.error':
process_error.append(val)
if not process_rc:
process_rc = 1
updates['process_rc'] = process_rc
updates['process_error'] = process_error
updates['status'] = DATA_META['STATUS_ERROR']
elif key == 'proc.warning':
process_warning.append(val)
updates['process_warning'] = process_warning
elif key == 'proc.info':
process_info.append(val)
updates['process_info'] = process_info
elif key == 'proc.rc':
process_rc = int(val)
updates['process_rc'] = process_rc
if process_rc != 0:
updates['status'] = DATA_META['STATUS_ERROR']
elif key == 'proc.progress':
process_progress = int(float(val) * 100)
updates['process_progress'] = process_progress
else:
output[key] = val
updates['output'] = output
if updates:
await self.update_data_status(**updates)
# Process meta fields are collected in listener, so we can clear them.
process_error, process_warning, process_info = [], [], []
if process_rc > 0:
log_file.close()
json_file.close()
await self._send_manager_command(ExecutorProtocol.FINISH, extra_fields={
ExecutorProtocol.FINISH_PROCESS_RC: process_rc
})
return
# Debug output
# Not referenced in Data object
json_file.write(line)
json_file.flush()
except ValueError as ex:
# Ignore if not JSON
log_file.write(line)
log_file.flush()
except MemoryError as ex:
logger.error("Out of memory:\n\n{}".format(ex))
except IOError as ex:
# TODO: if ex.errno == 28: no more free space
raise ex
finally:
# Store results
log_file.close()
json_file.close()
return_code = await self.end()
if process_rc < return_code:
process_rc = return_code
# send a notification to the executor listener that we're done
finish_fields = {
ExecutorProtocol.FINISH_PROCESS_RC: process_rc
}
if spawn_processes and process_rc == 0:
finish_fields[ExecutorProtocol.FINISH_SPAWN_PROCESSES] = spawn_processes
finish_fields[ExecutorProtocol.FINISH_EXPORTED_FILES] = self.exported_files_mapper
return finish_fields | 0.002724 |
def GetArtifactPathDependencies(rdf_artifact):
"""Return a set of knowledgebase path dependencies.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
deps = set()
for source in rdf_artifact.sources:
for arg, value in iteritems(source.attributes):
paths = []
if arg in ["path", "query"]:
paths.append(value)
if arg == "key_value_pairs":
# This is a REGISTRY_VALUE {key:blah, value:blah} dict.
paths.extend([x["key"] for x in value])
if arg in ["keys", "paths", "path_list", "content_regex_list"]:
paths.extend(value)
for path in paths:
for match in artifact_utils.INTERPOLATED_REGEX.finditer(path):
deps.add(match.group()[2:-2]) # Strip off %%.
deps.update(GetArtifactParserDependencies(rdf_artifact))
return deps | 0.013158 |
def stop(self):
""" Try to gracefully stop the greenlet synchronously
Stop isn't expected to re-raise greenlet _run exception
(use self.greenlet.get() for that),
but it should raise any stop-time exception """
if self._stop_event.ready():
return
self._stop_event.set()
self._global_send_event.set()
for retrier in self._address_to_retrier.values():
if retrier:
retrier.notify()
self._client.set_presence_state(UserPresence.OFFLINE.value)
self._client.stop_listener_thread() # stop sync_thread, wait client's greenlets
# wait own greenlets, no need to get on them, exceptions should be raised in _run()
gevent.wait(self.greenlets + [r.greenlet for r in self._address_to_retrier.values()])
# Ensure keep-alive http connections are closed
self._client.api.session.close()
self.log.debug('Matrix stopped', config=self._config)
del self.log | 0.00495 |
def request(self, method, params=None):
"""Send a JSON RPC request to the client.
Args:
method (str): The method name of the message to send
params (any): The payload of the message
Returns:
Future that will resolve once a response has been received
"""
msg_id = self._id_generator()
log.debug('Sending request with id %s: %s %s', msg_id, method, params)
message = {
'jsonrpc': JSONRPC_VERSION,
'id': msg_id,
'method': method,
}
if params is not None:
message['params'] = params
request_future = futures.Future()
request_future.add_done_callback(self._cancel_callback(msg_id))
self._server_request_futures[msg_id] = request_future
self._consumer(message)
return request_future | 0.002278 |
def request_doi_status_by_filename(self, file_name, data_type='result'):
"""
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a JSON with the status of the submission
"""
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'file_name': file_name,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result | 0.002442 |
def recur(obj, type_func_tuple_list=()):
'''recuring dealing an object'''
for obj_type, func in type_func_tuple_list:
if type(obj) == type(obj_type):
return func(obj)
# by default, we wolud recurring list, tuple and dict
if isinstance(obj, list) or isinstance(obj, tuple):
n_obj = []
for i in obj:
n_obj.append(recur(i))
return n_obj if isinstance(obj, list) else tuple(obj)
elif isinstance(obj, dict):
n_obj = {}
for k,v in obj.items():
n_obj[k] = recur(v)
return n_obj
return obj | 0.00335 |
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults['form'] = self.add_form
defaults.update(kwargs)
return super(SettingsAdmin, self).get_form(request, obj, **defaults) | 0.006289 |
def add_analysis_dataset(self, group_name, dataset_name, data, attrs=None):
""" Add a dataset to the specified group.
:param group_name: The path of the group the dataset will be added to,
relative to the "Analyses" group.
:param dataset_name: The name of the new dataset.
:param data: A numpy array representing the data to be written.
:param attrs: A dictionary of attributes to be added to the dataset.
:raises KeyError: If dataset is being added to non-existant group or
if file is not open for writing.
"""
self.assert_writeable()
group_path = 'Analyses/{}'.format(group_name)
if group_path not in self.handle:
msg = 'Dataset cannot be added to non-existent group: Analyses/{} in {}'
raise KeyError(msg.format(group_name, self.filename))
sanitized_data = _sanitize_data_for_writing(data)
if np.shape(sanitized_data) == (): # We can't compress scalar datasets
self.handle[group_path].create_dataset(dataset_name,
data=sanitized_data)
else:
self.handle[group_path].create_dataset(dataset_name,
data=sanitized_data,
compression='gzip')
if attrs is not None:
path = '{}/{}'.format(group_path, dataset_name)
self._add_attributes(path, attrs) | 0.002644 |
def extract_pairs(abed, bbed, groups):
"""
Called by fromgroups(), extract pairs specific to a pair of species.
"""
agenome = op.basename(abed.filename).split(".")[0]
bgenome = op.basename(bbed.filename).split(".")[0]
aorder = abed.order
border = bbed.order
pairsfile = "{0}.{1}.pairs".format(agenome, bgenome)
fw = open(pairsfile, "w")
is_self = abed.filename == bbed.filename
npairs = 0
for group in groups:
iter = combinations(group, 2) if is_self \
else product(group, repeat=2)
for a, b in iter:
if a not in aorder or b not in border:
continue
print("\t".join((a, b)), file=fw)
npairs += 1
logging.debug("File `{0}` written with {1} pairs.".format(pairsfile, npairs)) | 0.002451 |
def graph_from_edges(edges):
""" Constructs an undirected multigraph from a list containing data on
weighted edges.
Parameters
----------
edges : list
List of tuples each containing first node, second node, weight, key.
Returns
-------
M : :class:`networkx.classes.multigraph.MultiGraph
"""
M = nx.MultiGraph()
for e in edges:
n0, n1, weight, key = e
M.add_edge(n0, n1, weight=weight, key=key)
return M | 0.002079 |
def setup(self):
""" Setups the controller by reading/setting position for all motors. """
self._init_vrep_streaming()
# Init lifo for temperature spoofing
for m in self.motors:
m.__dict__['_load_fifo'] = deque(200 * [1], maxlen=200)
self.update() | 0.009967 |
def convert_numeric_id_to_id36(numeric_id):
"""Convert an integer into its base36 string representation.
This method has been cleaned up slightly to improve readability. For more
info see:
https://github.com/reddit/reddit/blob/master/r2/r2/lib/utils/_utils.pyx
https://www.reddit.com/r/redditdev/comments/n624n/submission_ids_question/
https://en.wikipedia.org/wiki/Base36
"""
# base36 allows negative numbers, but reddit does not
if not isinstance(numeric_id, six.integer_types) or numeric_id < 0:
raise ValueError("must supply a positive int/long")
# Alphabet used for base 36 conversion
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
alphabet_len = len(alphabet)
# Temp assign
current_number = numeric_id
base36 = []
# Current_number must be greater than alphabet length to while/divmod
if 0 <= current_number < alphabet_len:
return alphabet[current_number]
# Break up into chunks
while current_number != 0:
current_number, rem = divmod(current_number, alphabet_len)
base36.append(alphabet[rem])
# String is built in reverse order
return ''.join(reversed(base36)) | 0.000838 |
def commit_docs(*, added, removed):
"""
Commit the docs to the current branch
Assumes that :func:`setup_GitHub_push`, which sets up the ``doctr_remote``
remote, has been run.
Returns True if changes were committed and False if no changes were
committed.
"""
TRAVIS_BUILD_NUMBER = os.environ.get("TRAVIS_BUILD_NUMBER", "<unknown>")
TRAVIS_BRANCH = os.environ.get("TRAVIS_BRANCH", "<unknown>")
TRAVIS_COMMIT = os.environ.get("TRAVIS_COMMIT", "<unknown>")
TRAVIS_REPO_SLUG = os.environ.get("TRAVIS_REPO_SLUG", "<unknown>")
TRAVIS_JOB_WEB_URL = os.environ.get("TRAVIS_JOB_WEB_URL", "<unknown>")
TRAVIS_TAG = os.environ.get("TRAVIS_TAG", "")
branch = "tag" if TRAVIS_TAG else "branch"
DOCTR_COMMAND = ' '.join(map(shlex.quote, sys.argv))
if added:
run(['git', 'add', *added])
if removed:
run(['git', 'rm', *removed])
commit_message = """\
Update docs after building Travis build {TRAVIS_BUILD_NUMBER} of
{TRAVIS_REPO_SLUG}
The docs were built from the {branch} '{TRAVIS_BRANCH}' against the commit
{TRAVIS_COMMIT}.
The Travis build that generated this commit is at
{TRAVIS_JOB_WEB_URL}.
The doctr command that was run is
{DOCTR_COMMAND}
""".format(
branch=branch,
TRAVIS_BUILD_NUMBER=TRAVIS_BUILD_NUMBER,
TRAVIS_BRANCH=TRAVIS_BRANCH,
TRAVIS_COMMIT=TRAVIS_COMMIT,
TRAVIS_REPO_SLUG=TRAVIS_REPO_SLUG,
TRAVIS_JOB_WEB_URL=TRAVIS_JOB_WEB_URL,
DOCTR_COMMAND=DOCTR_COMMAND,
)
# Only commit if there were changes
if run(['git', 'diff-index', '--exit-code', '--cached', '--quiet', 'HEAD', '--'], exit=False) != 0:
print("Committing")
run(['git', 'commit', '-am', commit_message])
return True
return False | 0.005679 |
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for obj_dict_list in self.obj_dict['edges'].values():
edge_objs.extend([
Edge(obj_dict=obj_d)
for obj_d
in obj_dict_list])
return edge_objs | 0.00489 |
def _stop_vibration_win(self):
"""Stop the vibration."""
xinput_set_state = self.manager.xinput.XInputSetState
xinput_set_state.argtypes = [
ctypes.c_uint, ctypes.POINTER(XinputVibration)]
xinput_set_state.restype = ctypes.c_uint
stop_vibration = ctypes.byref(XinputVibration(0, 0))
xinput_set_state(self.__device_number, stop_vibration) | 0.005038 |
def categories_to_fetch(self, categories_to_fetch):
"""Sets the categories_to_fetch of this GCPConfiguration.
A list of Google Cloud Platform (GCP) services (such as ComputeEngine, PubSub, etc) from which to pull metrics. Allowable values are APPENGINE, BIGQUERY, BIGTABLE, CLOUDFUNCTIONS, CLOUDIOT, CLOUDSQL, CLOUDTASKS, COMPUTE, CONTAINER, DATAFLOW, DATASTORE, FIREBASEDATABASE, FIREBASEHOSTING, LOGGING, ML, PUBSUB, ROUTER, SPANNER, STORAGE, VPN # noqa: E501
:param categories_to_fetch: The categories_to_fetch of this GCPConfiguration. # noqa: E501
:type: list[str]
"""
allowed_values = ["APPENGINE", "BIGQUERY", "BIGTABLE", "CLOUDFUNCTIONS", "CLOUDIOT", "CLOUDSQL", "CLOUDTASKS", "COMPUTE", "CONTAINER", "DATAFLOW", "DATAPROC", "DATASTORE", "FIREBASEDATABASE", "FIREBASEHOSTING", "INTERCONNECT", "LOADBALANCING", "LOGGING", "ML", "MONITORING", "PUBSUB", "REDIS", "ROUTER", "SERVICERUNTIME", "SPANNER", "STORAGE", "TPU", "VPN"] # noqa: E501
if not set(categories_to_fetch).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `categories_to_fetch` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(categories_to_fetch) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._categories_to_fetch = categories_to_fetch | 0.001385 |
def hist_1d_index(x, shape):
"""
Fast 1d histogram of 1D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogram().
The indices are given in coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
shape : tuple
tuple with x dimensions: (x,)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 1:
raise InvalidInputError('The shape has to describe a 1-d histogram')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32)
analysis_functions.hist_1d(x, shape[0], result)
return result | 0.00267 |
def register_user_type(self, keyspace, user_type, klass):
"""
Registers a class to use to represent a particular user-defined type.
Query parameters for this user-defined type will be assumed to be
instances of `klass`. Result sets for this user-defined type will
be instances of `klass`. If no class is registered for a user-defined
type, a namedtuple will be used for result sets, and non-prepared
statements may not encode parameters for this type correctly.
`keyspace` is the name of the keyspace that the UDT is defined in.
`user_type` is the string name of the UDT to register the mapping
for.
`klass` should be a class with attributes whose names match the
fields of the user-defined type. The constructor must accepts kwargs
for each of the fields in the UDT.
This method should only be called after the type has been created
within Cassandra.
Example::
cluster = Cluster(protocol_version=3)
session = cluster.connect()
session.set_keyspace('mykeyspace')
session.execute("CREATE TYPE address (street text, zipcode int)")
session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)")
# create a class to map to the "address" UDT
class Address(object):
def __init__(self, street, zipcode):
self.street = street
self.zipcode = zipcode
cluster.register_user_type('mykeyspace', 'address', Address)
# insert a row using an instance of Address
session.execute("INSERT INTO users (id, location) VALUES (%s, %s)",
(0, Address("123 Main St.", 78723)))
# results will include Address instances
results = session.execute("SELECT * FROM users")
row = results[0]
print row.id, row.location.street, row.location.zipcode
"""
if self.protocol_version < 3:
log.warning("User Type serialization is only supported in native protocol version 3+ (%d in use). "
"CQL encoding for simple statements will still work, but named tuples will "
"be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type)
self._user_types[keyspace][user_type] = klass
for session in tuple(self.sessions):
session.user_type_registered(keyspace, user_type, klass)
UserType.evict_udt_class(keyspace, user_type) | 0.002294 |
def _fit_tfa_inner(
self,
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself.
"""
nfeature = data.shape[0]
nsample = data.shape[1]
feature_indices =\
np.random.choice(nfeature, self.max_num_voxel, replace=False)
sample_features = np.zeros(nfeature).astype(bool)
sample_features[feature_indices] = True
samples_indices =\
np.random.choice(nsample, self.max_num_tr, replace=False)
curr_data = np.zeros((self.max_num_voxel, self.max_num_tr))\
.astype(float)
curr_data = data[feature_indices]
curr_data = curr_data[:, samples_indices].copy()
curr_R = R[feature_indices].copy()
centers = self.get_centers(self.local_prior)
widths = self.get_widths(self.local_prior)
unique_R, inds = self.get_unique_R(curr_R)
F = self.get_factors(unique_R, inds, centers, widths)
W = self.get_weights(curr_data, F)
self.local_posterior_, self.total_cost = self._estimate_centers_widths(
unique_R, inds, curr_data, W, centers, widths,
template_centers, template_centers_mean_cov,
template_widths, template_widths_mean_var_reci)
return self | 0.000934 |
def on_widget__motion_notify_event(self, widget, event):
'''
Called when mouse pointer is moved within drawing area.
.. versionchanged:: 0.11
Do not trigger `route-electrode-added` event if `ALT` key is
pressed.
'''
if self.canvas is None:
# Canvas has not been initialized. Nothing to do.
return
elif event.is_hint:
pointer = event.window.get_pointer()
x, y, mod_type = pointer
else:
x = event.x
y = event.y
shape = self.canvas.find_shape(x, y)
# Grab focus to [enable notification on key press/release events][1].
#
# [1]: http://mailman.daa.com.au/cgi-bin/pipermail/pygtk/2003-August/005770.html
self.widget.grab_focus()
if shape != self.last_hovered:
if self.last_hovered is not None:
# Leaving shape
self.emit('electrode-mouseout', {'electrode_id':
self.last_hovered,
'event': event.copy()})
self.last_hovered = None
elif shape is not None:
# Entering shape
self.last_hovered = shape
if self._route is not None:
if self._route.append(shape) and not (event.get_state() &
gtk.gdk.MOD1_MASK):
# `<Alt>` key was not held down.
self.emit('route-electrode-added', shape)
self.emit('electrode-mouseover', {'electrode_id':
self.last_hovered,
'event': event.copy()}) | 0.00164 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.