text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)] | 0.006019 |
def _association_types(self):
"""Retrieve Custom Indicator Associations types from the ThreatConnect API."""
# Dynamically create custom indicator class
r = self.session.get('/v2/types/associationTypes')
# check for bad status code and response that is not JSON
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
warn = u'Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
# validate successful API results
data = r.json()
if data.get('status') != 'Success':
warn = u'Bad Status: Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
try:
# Association Type Name is not a unique value at this time, but should be.
for association in data.get('data', {}).get('associationType', []):
self._indicator_associations_types_data[association.get('name')] = association
except Exception as e:
self.handle_error(200, [e]) | 0.006364 |
def filter_all_contents(value: ecore.EPackage, type_):
"""Returns `eAllContents(type_)`."""
return (c for c in value.eAllContents() if isinstance(c, type_)) | 0.011628 |
def error_string(mqtt_errno):
"""Return the error string associated with an mqtt error number."""
if mqtt_errno == MQTT_ERR_SUCCESS:
return "No error."
elif mqtt_errno == MQTT_ERR_NOMEM:
return "Out of memory."
elif mqtt_errno == MQTT_ERR_PROTOCOL:
return "A network protocol error occurred when communicating with the broker."
elif mqtt_errno == MQTT_ERR_INVAL:
return "Invalid function arguments provided."
elif mqtt_errno == MQTT_ERR_NO_CONN:
return "The client is not currently connected."
elif mqtt_errno == MQTT_ERR_CONN_REFUSED:
return "The connection was refused."
elif mqtt_errno == MQTT_ERR_NOT_FOUND:
return "Message not found (internal error)."
elif mqtt_errno == MQTT_ERR_CONN_LOST:
return "The connection was lost."
elif mqtt_errno == MQTT_ERR_TLS:
return "A TLS error occurred."
elif mqtt_errno == MQTT_ERR_PAYLOAD_SIZE:
return "Payload too large."
elif mqtt_errno == MQTT_ERR_NOT_SUPPORTED:
return "This feature is not supported."
elif mqtt_errno == MQTT_ERR_AUTH:
return "Authorisation failed."
elif mqtt_errno == MQTT_ERR_ACL_DENIED:
return "Access denied by ACL."
elif mqtt_errno == MQTT_ERR_UNKNOWN:
return "Unknown error."
elif mqtt_errno == MQTT_ERR_ERRNO:
return "Error defined by errno."
else:
return "Unknown error." | 0.001392 |
def create(self,image_path, size=1024, sudo=False):
'''create will create a a new image
Parameters
==========
image_path: full path to image
size: image sizein MiB, default is 1024MiB
filesystem: supported file systems ext3/ext4 (ext[2/3]: default ext3
'''
from spython.utils import check_install
check_install()
cmd = self.init_command('image.create')
cmd = cmd + ['--size', str(size), image_path ]
output = self.run_command(cmd,sudo=sudo)
self.println(output)
if not os.path.exists(image_path):
bot.exit("Could not create image %s" %image_path)
return image_path | 0.010495 |
def bind_objects(self, *objects):
"""Bind one or more objects"""
self.control.bind_keys(objects)
self.objects += objects | 0.013605 |
def create(name, launch_config_name, availability_zones, min_size, max_size,
desired_capacity=None, load_balancers=None, default_cooldown=None,
health_check_type=None, health_check_period=None,
placement_group=None, vpc_zone_identifier=None, tags=None,
termination_policies=None, suspended_processes=None,
scaling_policies=None, scheduled_actions=None, region=None,
notification_arn=None, notification_types=None,
key=None, keyid=None, profile=None):
'''
Create an autoscale group.
CLI example::
salt myminion boto_asg.create myasg mylc '["us-east-1a", "us-east-1e"]' 1 10 load_balancers='["myelb", "myelb2"]' tags='[{"key": "Name", value="myasg", "propagate_at_launch": True}]'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(availability_zones, six.string_types):
availability_zones = salt.utils.json.loads(availability_zones)
if isinstance(load_balancers, six.string_types):
load_balancers = salt.utils.json.loads(load_balancers)
if isinstance(vpc_zone_identifier, six.string_types):
vpc_zone_identifier = salt.utils.json.loads(vpc_zone_identifier)
if isinstance(tags, six.string_types):
tags = salt.utils.json.loads(tags)
# Make a list of tag objects from the dict.
_tags = []
if tags:
for tag in tags:
try:
key = tag.get('key')
except KeyError:
log.error('Tag missing key.')
return False
try:
value = tag.get('value')
except KeyError:
log.error('Tag missing value.')
return False
propagate_at_launch = tag.get('propagate_at_launch', False)
_tag = autoscale.Tag(key=key, value=value, resource_id=name,
propagate_at_launch=propagate_at_launch)
_tags.append(_tag)
if isinstance(termination_policies, six.string_types):
termination_policies = salt.utils.json.loads(termination_policies)
if isinstance(suspended_processes, six.string_types):
suspended_processes = salt.utils.json.loads(suspended_processes)
if isinstance(scheduled_actions, six.string_types):
scheduled_actions = salt.utils.json.loads(scheduled_actions)
retries = 30
while True:
try:
_asg = autoscale.AutoScalingGroup(
name=name, launch_config=launch_config_name,
availability_zones=availability_zones,
min_size=min_size, max_size=max_size,
desired_capacity=desired_capacity, load_balancers=load_balancers,
default_cooldown=default_cooldown,
health_check_type=health_check_type,
health_check_period=health_check_period,
placement_group=placement_group, tags=_tags,
vpc_zone_identifier=vpc_zone_identifier,
termination_policies=termination_policies,
suspended_processes=suspended_processes)
conn.create_auto_scaling_group(_asg)
# create scaling policies
_create_scaling_policies(conn, name, scaling_policies)
# create scheduled actions
_create_scheduled_actions(conn, name, scheduled_actions)
# create notifications
if notification_arn and notification_types:
conn.put_notification_configuration(_asg, notification_arn, notification_types)
log.info('Created ASG %s', name)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == 'Throttling':
log.debug('Throttled by AWS API, retrying in 5 seconds...')
time.sleep(5)
retries -= 1
continue
log.error(e)
msg = 'Failed to create ASG %s', name
log.error(msg)
return False | 0.000992 |
def get_form_layout(self, process_id, wit_ref_name):
"""GetFormLayout.
[Preview API] Gets the form layout.
:param str process_id: The ID of the process.
:param str wit_ref_name: The reference name of the work item type.
:rtype: :class:`<FormLayout> <azure.devops.v5_0.work_item_tracking_process.models.FormLayout>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str')
response = self._send(http_method='GET',
location_id='fa8646eb-43cd-4b71-9564-40106fd63e40',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('FormLayout', response) | 0.006283 |
def pick_best_methods(stochastic):
"""
Picks the StepMethods best suited to handle
a stochastic variable.
"""
# Keep track of most competent methohd
max_competence = 0
# Empty set of appropriate StepMethods
best_candidates = set([])
# Loop over StepMethodRegistry
for method in StepMethodRegistry:
# Parse method and its associated competence
try:
competence = method.competence(stochastic)
except:
competence = 0
# If better than current best method, promote it
if competence > max_competence:
best_candidates = set([method])
max_competence = competence
# If same competence, add it to the set of best methods
elif competence == max_competence:
best_candidates.add(method)
if max_competence <= 0:
raise ValueError(
'Maximum competence reported for stochastic %s is <= 0... you may need to write a custom step method class.' %
stochastic.__name__)
# print_(s.__name__ + ': ', best_candidates, ' ', max_competence)
return best_candidates | 0.002625 |
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user.
:rtype: List[Project]
"""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects | 0.011331 |
def _env(self, lines):
'''env will parse a list of environment lines and simply remove any
blank lines, or those with export. Dockerfiles don't usually
have exports.
Parameters
==========
lines: A list of environment pair lines.
'''
environ = [x for x in lines if not x.startswith('export')]
self.environ += environ | 0.009685 |
def set_composition(self, composition_id):
"""Sets the composition.
arg: composition_id (osid.id.Id): a composition
raise: InvalidArgument - ``composition_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``composition_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_composition_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(composition_id):
raise errors.InvalidArgument()
self._my_map['compositionId'] = str(composition_id) | 0.004043 |
def pre_validate(self, form):
'''Calls preprocessors before pre_validation'''
for preprocessor in self._preprocessors:
preprocessor(form, self)
super(FieldHelper, self).pre_validate(form) | 0.008969 |
def _read_buffers(header, buffers, mesh_kwargs):
"""
Given a list of binary data and a layout, return the
kwargs to create a scene object.
Parameters
-----------
header : dict
With GLTF keys
buffers : list of bytes
Stored data
passed : dict
Kwargs for mesh constructors
Returns
-----------
kwargs : dict
Can be passed to load_kwargs for a trimesh.Scene
"""
# split buffer data into buffer views
views = [None] * len(header["bufferViews"])
for i, view in enumerate(header["bufferViews"]):
if "byteOffset" in view:
start = view["byteOffset"]
else:
start = 0
end = start + view["byteLength"]
views[i] = buffers[view["buffer"]][start:end]
assert len(views[i]) == view["byteLength"]
# load data from buffers and bufferviews into numpy arrays
# using the layout described by accessors
access = []
for a in header["accessors"]:
data = views[a["bufferView"]]
dtype = _types[a["componentType"]]
shape = _shapes[a["type"]]
# is the accessor offset in a buffer
if "byteOffset" in a:
start = a["byteOffset"]
else:
start = 0
# basically the number of columns
per_count = np.abs(np.product(shape))
# length is the number of bytes per item times total
length = np.dtype(dtype).itemsize * a["count"] * per_count
end = start + length
array = np.frombuffer(
data[start:end], dtype=dtype).reshape(shape)
assert len(array) == a["count"]
access.append(array)
# load images and textures into material objects
materials = _parse_materials(header, views)
mesh_prim = collections.defaultdict(list)
# load data from accessors into Trimesh objects
meshes = collections.OrderedDict()
for index, m in enumerate(header["meshes"]):
metadata = {}
try:
# try loading units from the GLTF extra
metadata['units'] = str(m["extras"]["units"])
except BaseException:
# GLTF spec indicates the default units are meters
metadata['units'] = 'meters'
for j, p in enumerate(m["primitives"]):
# if we don't have a triangular mesh continue
# if not specified assume it is a mesh
if "mode" in p and p["mode"] != 4:
continue
# store those units
kwargs = {"metadata": {}}
kwargs.update(mesh_kwargs)
kwargs["metadata"].update(metadata)
# get faces from accessors and reshape
kwargs["faces"] = access[p["indices"]].reshape((-1, 3))
# get vertices from accessors
kwargs["vertices"] = access[p["attributes"]["POSITION"]]
# do we have UV coordinates
if "material" in p:
if materials is None:
log.warning('no materials! `pip install pillow`')
else:
uv = None
if "TEXCOORD_0" in p["attributes"]:
# flip UV's top- bottom to move origin to lower-left:
# https://github.com/KhronosGroup/glTF/issues/1021
uv = access[p["attributes"]["TEXCOORD_0"]].copy()
uv[:, 1] = 1.0 - uv[:, 1]
# create a texture visual
kwargs["visual"] = visual.texture.TextureVisuals(
uv=uv, material=materials[p["material"]])
# create a unique mesh name per- primitive
if "name" in m:
name = m["name"]
else:
name = "GLTF_geometry"
# each primitive gets it's own Trimesh object
if len(m["primitives"]) > 1:
name += "_{}".format(j)
meshes[name] = kwargs
mesh_prim[index].append(name)
# make it easier to reference nodes
nodes = header["nodes"]
# nodes are referenced by index
# save their string names if they have one
# node index (int) : name (str)
names = {}
for i, n in enumerate(nodes):
if "name" in n:
names[i] = n["name"]
else:
names[i] = str(i)
# make sure we have a unique base frame name
base_frame = "world"
if base_frame in names:
base_frame = str(int(np.random.random() * 1e10))
names[base_frame] = base_frame
# visited, kwargs for scene.graph.update
graph = collections.deque()
# unvisited, pairs of node indexes
queue = collections.deque()
# start the traversal from the base frame to the roots
for root in header["scenes"][header["scene"]]["nodes"]:
# add transform from base frame to these root nodes
queue.append([base_frame, root])
# go through the nodes tree to populate
# kwargs for scene graph loader
while len(queue) > 0:
# (int, int) pair of node indexes
a, b = queue.pop()
# dict of child node
# parent = nodes[a]
child = nodes[b]
# add edges of children to be processed
if "children" in child:
queue.extend([[b, i] for i in child["children"]])
# kwargs to be passed to scene.graph.update
kwargs = {"frame_from": names[a], "frame_to": names[b]}
# grab matrix from child
# parent -> child relationships have matrix stored in child
# for the transform from parent to child
if "matrix" in child:
kwargs["matrix"] = (
np.array(child["matrix"],
dtype=np.float64).reshape((4, 4)).T
)
else:
# if no matrix set identity
kwargs["matrix"] = np.eye(4)
# Now apply keyword translations
# GLTF applies these in order: T * R * S
if "translation" in child:
kwargs["matrix"] = np.dot(
kwargs["matrix"],
transformations.translation_matrix(child["translation"]),
)
if "rotation" in child:
# GLTF rotations are stored as (4,) XYZW unit quaternions
# we need to re- order to our quaternion style, WXYZ
quat = np.reshape(child["rotation"], 4)[[3, 0, 1, 2]]
# add the rotation to the matrix
kwargs["matrix"] = np.dot(
kwargs["matrix"], transformations.quaternion_matrix(quat)
)
# append the nodes for connectivity without the mesh
graph.append(kwargs.copy())
if "mesh" in child:
# append a new node per- geometry instance
geometries = mesh_prim[child["mesh"]]
for name in geometries:
kwargs["geometry"] = name
kwargs["frame_to"] = "{}_{}".format(
name, util.unique_id(
length=6, increment=len(graph)).upper()
)
# append the edge with the mesh frame
graph.append(kwargs.copy())
# kwargs to be loaded
result = {
"class": "Scene",
"geometry": meshes,
"graph": graph,
"base_frame": base_frame,
}
return result | 0.000137 |
def _get(self, ndef_message, timeout=1.0):
"""Get an NDEF message from the server. Temporarily connects
to the default SNEP server if the client is not yet connected.
"""
if not self.socket:
try:
self.connect('urn:nfc:sn:snep')
except nfc.llcp.ConnectRefused:
return None
else:
self.release_connection = True
else:
self.release_connection = False
try:
snep_request = b'\x10\x01'
snep_request += struct.pack('>L', 4 + len(str(ndef_message)))
snep_request += struct.pack('>L', self.acceptable_length)
snep_request += str(ndef_message)
if send_request(self.socket, snep_request, self.send_miu):
response = recv_response(
self.socket, self.acceptable_length, timeout)
if response is not None:
if response[1] != 0x81:
raise SnepError(response[1])
return response[6:]
finally:
if self.release_connection:
self.close() | 0.001714 |
def host(name, ip4=True, ip6=True, **kwargs):
'''
Return a list of addresses for name
ip6:
Return IPv6 addresses
ip4:
Return IPv4 addresses
the rest is passed on to lookup()
'''
res = {}
if ip6:
ip6 = lookup(name, 'AAAA', **kwargs)
if ip6:
res['ip6'] = ip6
if ip4:
ip4 = lookup(name, 'A', **kwargs)
if ip4:
res['ip4'] = ip4
return res | 0.002227 |
def get_qpimage_raw(self, idx=0):
"""Return QPImage without background correction"""
qpi = qpimage.QPImage(h5file=self.path,
h5mode="r",
h5dtype=self.as_type,
).copy()
# Remove previously performed background correction
qpi.set_bg_data(None)
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | 0.003546 |
def variable_names(self):
"""
Returns the names of all environment variables.
:return: the names of the variables
:rtype: list
"""
result = []
names = javabridge.call(self.jobject, "getVariableNames", "()Ljava/util/Set;")
for name in javabridge.iterate_collection(names):
result.append(javabridge.to_string(name))
return result | 0.007282 |
def _init_mythril_dir() -> str:
"""
Initializes the mythril dir and config.ini file
:return: The mythril dir's path
"""
try:
mythril_dir = os.environ["MYTHRIL_DIR"]
except KeyError:
mythril_dir = os.path.join(os.path.expanduser("~"), ".mythril")
if not os.path.exists(mythril_dir):
# Initialize data directory
log.info("Creating mythril data directory")
os.mkdir(mythril_dir)
db_path = str(Path(mythril_dir) / "signatures.db")
if not os.path.exists(db_path):
# if the default mythril dir doesn't contain a signature DB
# initialize it with the default one from the project root
asset_dir = Path(__file__).parent.parent / "support" / "assets"
copyfile(str(asset_dir / "signatures.db"), db_path)
return mythril_dir | 0.002212 |
def conforms(element, etype, namespace: Dict[str, Any]) -> bool:
""" Determine whether element conforms to etype
:param element: Element to test for conformance
:param etype: Type to test against
:param namespace: Namespace to use to resolve forward references
:return:
"""
etype = proc_forward(etype, namespace)
if is_union(etype):
return union_conforms(element, etype, namespace, conforms)
else:
return element_conforms(element, etype) | 0.002041 |
def _clean_bindings(self, bindings):
"""
Remove all of the expressions from bindings
:param bindings: The bindings to clean
:type bindings: list
:return: The cleaned bindings
:rtype: list
"""
return list(filter(lambda b: not isinstance(b, QueryExpression), bindings)) | 0.009009 |
def _checksum_paths():
"""Returns dict {'dataset_name': 'path/to/checksums/file'}."""
dataset2path = {}
for dir_path in _CHECKSUM_DIRS:
for fname in _list_dir(dir_path):
if not fname.endswith(_CHECKSUM_SUFFIX):
continue
fpath = os.path.join(dir_path, fname)
dataset_name = fname[:-len(_CHECKSUM_SUFFIX)]
dataset2path[dataset_name] = fpath
return dataset2path | 0.022388 |
def convert_wide_to_long(wide_data,
ind_vars,
alt_specific_vars,
availability_vars,
obs_id_col,
choice_col,
new_alt_id_name=None):
"""
Will convert a cross-sectional dataframe of discrete choice data from wide
format to long format.
Parameters
----------
wide_data : pandas dataframe.
Contains one row for each observation. Should have the specified
`[obs_id_col, choice_col] + availability_vars.values()` columns.
ind_vars : list of strings.
Each element should be a column heading in `wide_data` that denotes a
variable that varies across observations but not across alternatives.
alt_specific_vars : dict.
Each key should be a string that will be a column heading of the
returned, long format dataframe. Each value should be a dictionary
where the inner key is the alternative id and the value is the column
heading in wide data that specifies the value of the outer key for the
associated alternative. The variables denoted by the outer key should
vary across individuals and across some or all alternatives.
availability_vars : dict.
There should be one key value pair for each alternative that is
observed in the dataset. Each key should be the alternative id for the
alternative, and the value should be the column heading in `wide_data`
that denotes (using ones and zeros) whether an alternative is
available/unavailable, respectively, for a given observation.
Alternative id's, i.e. the keys, must be integers.
obs_id_col : str.
Denotes the column in `wide_data` that contains the observation ID
values for each row.
choice_col : str.
Denotes the column in `wide_data` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise.
new_alt_id_name : str, optional.
If not None, should be a string. This string will be used as the column
heading for the alternative id column in the returned 'long' format
dataframe. If not passed, this column will be called `'alt_id'`.
Default == None.
Returns
-------
final_long_df : pandas dataframe.
Will contain one row for each available alternative for each
observation. Will contain an observation id column of the same name as
`obs_id_col`. Will also contain a choice column of the same name as
`choice_col`. Will also contain an alternative id column called
`alt_id` if `new_alt_id_col == None`, or `new_alt_id` otherwise. Will
contain one column per variable in `ind_vars`. Will contain one column
per key in `alt_specific_vars`.
"""
##########
# Check that all columns of wide_data are being
# used in the conversion to long format
##########
all_alt_specific_cols = []
for var_dict in alt_specific_vars.values():
all_alt_specific_cols.extend(var_dict.values())
vars_accounted_for = set(ind_vars +
# converto list explicitly to support
# both python 2 and 3
list(availability_vars.values()) +
[obs_id_col, choice_col] +
all_alt_specific_cols)
num_vars_accounted_for = len(vars_accounted_for)
ensure_all_columns_are_used(num_vars_accounted_for,
wide_data,
data_title='wide_data')
##########
# Check that all columns one wishes to use are actually in wide_data
##########
ensure_columns_are_in_dataframe(ind_vars,
wide_data,
col_title='ind_vars',
data_title='wide_data')
ensure_columns_are_in_dataframe(availability_vars.values(),
wide_data,
col_title='availability_vars',
data_title='wide_data')
for new_column in alt_specific_vars:
for alt_id in alt_specific_vars[new_column]:
old_column = alt_specific_vars[new_column][alt_id]
ensure_columns_are_in_dataframe([old_column],
wide_data,
col_title="alt_specific_vars",
data_title='wide_data')
ensure_columns_are_in_dataframe([choice_col, obs_id_col],
wide_data,
col_title='[choice_col, obs_id_col]',
data_title='wide_data')
##########
# Check the integrity of the various columns present in wide_data
##########
# Make sure the observation id's are unique (i.e. one per row)
ensure_unique_obs_ids_in_wide_data(obs_id_col, wide_data)
# Make sure there are no blank values in the choice column
check_wide_data_for_blank_choices(choice_col, wide_data)
##########
# Check that the user-provided alternative ids are observed
# in the realized choices.
##########
ensure_all_wide_alt_ids_are_chosen(choice_col,
alt_specific_vars,
availability_vars,
wide_data)
##########
# Check that the realized choices are all in the
# user-provided alternative ids
##########
ensure_chosen_alternatives_are_in_user_alt_ids(choice_col,
wide_data,
availability_vars)
##########
# Make sure each observation chose a personally available alternative.
##########
ensure_each_wide_obs_chose_an_available_alternative(obs_id_col,
choice_col,
availability_vars,
wide_data)
##########
# Figure out how many rows/columns should be in the long format dataframe
##########
# Note that the number of rows in long format is the
# number of available alternatives across all observations
sorted_alt_ids = np.sort(wide_data[choice_col].unique())
sorted_availability_cols = [availability_vars[x] for x in sorted_alt_ids]
num_rows = wide_data[sorted_availability_cols].sum(axis=0).sum()
#####
# Calculate the needed number of colums
#####
# For each observation, there is at least one column-- the observation id,
num_cols = 1
# We should also have one alternative id column
num_cols += 1
# We should also have one column to record the choice of each observation
num_cols += 1
# We should also have one column for each individual specific variable
num_cols += len(ind_vars)
# We should also have one column for each alternative specific variable,
num_cols += len(alt_specific_vars.keys())
##########
# Create the columns of the new dataframe
##########
#####
# Create the observation id column,
#####
# Determine the various availability values for each observation
wide_availability_values = wide_data[list(
availability_vars.values())].values
new_obs_id_col = (wide_availability_values *
wide_data[obs_id_col].values[:, None]).ravel()
# Make sure the observation id column has an integer data type
new_obs_id_col = new_obs_id_col.astype(int)
#####
# Create the independent variable columns. Store them in a list.
#####
new_ind_var_cols = []
for var in ind_vars:
new_ind_var_cols.append((wide_availability_values *
wide_data[var].values[:, None]).ravel())
#####
# Create the choice column in the long data format
#####
wide_choice_data = (wide_data[choice_col].values[:, None] ==
sorted_alt_ids[None, :])
new_choice_col = wide_choice_data.ravel()
# Make sure the choice column has an integer data type
new_choice_col = new_choice_col.astype(int)
#####
# Create the alternative id column
#####
new_alt_id_col = (wide_availability_values *
sorted_alt_ids[None, :]).ravel().astype(int)
# Make sure the alternative id column has an integer data type
new_alt_id_col = new_alt_id_col.astype(int)
#####
# Create the alternative specific and subset
# alternative specific variable columns
#####
# For each alternative specific variable, create a wide format array.
# Then unravel that array to have the long format column for the
# alternative specific variable. Store all the long format columns
# in a list
new_alt_specific_cols = []
for new_col in alt_specific_vars:
new_wide_alt_specific_cols = []
for alt_id in sorted_alt_ids:
# This will extract the correct values for the alternatives over
# which the alternative specific variables vary
if alt_id in alt_specific_vars[new_col]:
rel_wide_column = alt_specific_vars[new_col][alt_id]
new_col_vals = wide_data[rel_wide_column].values[:, None]
new_wide_alt_specific_cols.append(new_col_vals)
# This will create placeholder zeros for the alternatives that
# the alternative specific variables do not vary over
else:
new_wide_alt_specific_cols.append(np.zeros(
(wide_data.shape[0], 1)))
concatenated_long_column = np.concatenate(new_wide_alt_specific_cols,
axis=1).ravel()
new_alt_specific_cols.append(concatenated_long_column)
##########
# Construct the final wide format dataframe to be returned
##########
# Identify rows that correspond to unavailable alternatives
availability_condition = wide_availability_values.ravel() != 0
# Figure out the names of all of the columns in the final
# dataframe
alt_id_column_name = ("alt_id" if new_alt_id_name is None
else new_alt_id_name)
final_long_columns = ([obs_id_col,
alt_id_column_name,
choice_col] +
ind_vars +
list(alt_specific_vars.keys()))
# Create a 'record array' of the final dataframe's columns
# Note that record arrays are constructed from a list of 1D
# arrays hence the array unpacking performed below for
# new_ind_var_cols and new_alt_specific_cols
all_arrays = ([new_obs_id_col,
new_alt_id_col,
new_choice_col] +
new_ind_var_cols +
new_alt_specific_cols)
# Be sure to remove rows corresponding to unavailable alternatives
# When creating the record array.
df_recs = np.rec.fromarrays([all_arrays[pos][availability_condition]
for pos in range(len(all_arrays))],
names=final_long_columns)
# Create the final dataframe
final_long_df = pd.DataFrame.from_records(df_recs)
##########
# Make sure one has the correct number of rows and columns in
# the final dataframe
##########
try:
assert final_long_df.shape == (num_rows, num_cols)
except AssertionError:
msg_1 = "There is an error with the dataframe that will be returned."
msg_2 = "The shape of the dataframe should be {}".format((num_rows,
num_cols))
msg_3 = "Instead, the returned dataframe will have shape: {}"
total_msg = "\n".join([msg_1, msg_2, msg_3])
warnings.warn(total_msg.format(final_long_df.shape))
# Return the wide format dataframe
return final_long_df | 0.000081 |
def init_logger(self):
"""Init logger."""
if not self.result_logger:
if not os.path.exists(self.local_dir):
os.makedirs(self.local_dir)
if not self.logdir:
self.logdir = tempfile.mkdtemp(
prefix="{}_{}".format(
str(self)[:MAX_LEN_IDENTIFIER], date_str()),
dir=self.local_dir)
elif not os.path.exists(self.logdir):
os.makedirs(self.logdir)
self.result_logger = UnifiedLogger(
self.config,
self.logdir,
upload_uri=self.upload_dir,
loggers=self.loggers,
sync_function=self.sync_function) | 0.002695 |
def unprovision_vdp_overlay_networks(self, net_uuid, lvid, vdp_vlan, oui):
"""Unprovisions a overlay type network configured using VDP.
:param net_uuid: the uuid of the network associated with this vlan.
:lvid: Local VLAN ID
:vdp_vlan: VDP VLAN ID
:oui: OUI Parameters
"""
# check validity
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.error("Cannot unprovision VDP Overlay network for"
" net-id=%(net_uuid)s - Invalid ",
{'net_uuid': net_uuid})
return
LOG.info('unprovision_vdp_overlay_networks: add_flow for '
'Local Vlan %(local_vlan)s VDP VLAN %(vdp_vlan)s',
{'local_vlan': lvid, 'vdp_vlan': vdp_vlan})
self.program_vm_ovs_flows(lvid, vdp_vlan, 0) | 0.002389 |
def resample_run(res, rstate=None, return_idx=False):
"""
Probes **sampling uncertainties** on a nested sampling run using bootstrap
resampling techniques to generate a *realization* of the (expected) prior
volume(s) associated with each sample (dead point). This effectively
splits a nested sampling run with `K` particles (live points) into a
series of `K` "strands" (i.e. runs with a single live point) which are then
bootstrapped to construct a new "resampled" run. Companion function to
:meth:`jitter_run` and :meth:`simulate_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
return_idx : bool, optional
Whether to return the list of resampled indices used to construct
the new run. Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
samples and weights based on our "bootstrapped" samples and
(expected) prior volumes.
"""
if rstate is None:
rstate = np.random
# Check whether the final set of live points were added to the
# run.
nsamps = len(res.ncall)
try:
# Check if the number of live points explicitly changes.
samples_n = res.samples_n
samples_batch = res.samples_batch
batch_bounds = res.batch_bounds
added_final_live = True
except:
# If the number of live points is constant, compute `samples_n` and
# set up the `added_final_live` flag.
nlive = res.nlive
niter = res.niter
if nsamps == niter:
samples_n = np.ones(niter, dtype='int') * nlive
added_final_live = False
elif nsamps == (niter + nlive):
samples_n = np.append(np.ones(niter, dtype='int') * nlive,
np.arange(1, nlive + 1)[::-1])
added_final_live = True
else:
raise ValueError("Final number of samples differs from number of "
"iterations and number of live points.")
samples_batch = np.zeros(len(samples_n), dtype='int')
batch_bounds = np.array([(-np.inf, np.inf)])
batch_llmin = batch_bounds[:, 0]
# Identify unique particles that make up each strand.
ids = np.unique(res.samples_id)
# Split the set of strands into two groups: a "baseline" group that
# contains points initially sampled from the prior, which gives information
# on the evidence, and an "add-on" group, which gives additional
# information conditioned on our baseline strands.
base_ids = []
addon_ids = []
for i in ids:
sbatch = samples_batch[res.samples_id == i]
if np.any(batch_llmin[sbatch] == -np.inf):
base_ids.append(i)
else:
addon_ids.append(i)
nbase, nadd = len(base_ids), len(addon_ids)
base_ids, addon_ids = np.array(base_ids), np.array(addon_ids)
# Resample strands.
if nbase > 0 and nadd > 0:
live_idx = np.append(base_ids[rstate.randint(0, nbase, size=nbase)],
addon_ids[rstate.randint(0, nadd, size=nadd)])
elif nbase > 0:
live_idx = base_ids[rstate.randint(0, nbase, size=nbase)]
elif nadd > 0:
raise ValueError("The provided `Results` does not include any points "
"initially sampled from the prior!")
else:
raise ValueError("The provided `Results` does not appear to have "
"any particles!")
# Find corresponding indices within the original run.
samp_idx = np.arange(len(res.ncall))
samp_idx = np.concatenate([samp_idx[res.samples_id == idx]
for idx in live_idx])
# Derive new sample size.
nsamps = len(samp_idx)
# Sort the loglikelihoods (there will be duplicates).
logls = res.logl[samp_idx]
idx_sort = np.argsort(logls)
samp_idx = samp_idx[idx_sort]
logl = res.logl[samp_idx]
if added_final_live:
# Compute the effective number of live points for each sample.
samp_n = np.zeros(nsamps, dtype='int')
uidxs, uidxs_n = np.unique(live_idx, return_counts=True)
for uidx, uidx_n in zip(uidxs, uidxs_n):
sel = (res.samples_id == uidx) # selection flag
sbatch = samples_batch[sel][0] # corresponding batch ID
lower = batch_llmin[sbatch] # lower bound
upper = max(res.logl[sel]) # upper bound
# Add number of live points between endpoints equal to number of
# times the strand has been resampled.
samp_n[(logl > lower) & (logl < upper)] += uidx_n
# At the endpoint, divide up the final set of points into `uidx_n`
# (roughly) equal chunks and have live points decrease across them.
endsel = (logl == upper)
endsel_n = np.count_nonzero(endsel)
chunk = endsel_n / uidx_n # define our chunk
counters = np.array(np.arange(endsel_n) / chunk, dtype='int')
nlive_end = counters[::-1] + 1 # decreasing number of live points
samp_n[endsel] += nlive_end # add live point sequence
else:
# If we didn't add the final set of live points, the run has a constant
# number of live points and can simply be re-ordered.
samp_n = samples_n[samp_idx]
# Assign log(volume) to samples.
logvol = np.cumsum(np.log(samp_n / (samp_n + 1.)))
# Computing weights using quadratic estimator.
h = 0.
logz = -1.e300
loglstar = -1.e300
logzvar = 0.
logvols_pad = np.concatenate(([0.], logvol))
logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(nsamps),
-np.ones(nsamps)])
logdvols += math.log(0.5)
dlvs = logvols_pad[:-1] - logvols_pad[1:]
saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], []
for i in range(nsamps):
loglstar_new = logl[i]
logdvol, dlv = logdvols[i], dlvs[i]
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol
logz_new = np.logaddexp(logz, logwt)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * dlv
loglstar = loglstar_new
saved_logwt.append(logwt)
saved_logz.append(logz)
saved_logzvar.append(logzvar)
saved_h.append(h)
# Compute sampling efficiency.
eff = 100. * len(res.ncall[samp_idx]) / sum(res.ncall[samp_idx])
# Copy results.
new_res = Results([item for item in res.items()])
# Overwrite items with our new estimates.
new_res.niter = len(res.ncall[samp_idx])
new_res.ncall = res.ncall[samp_idx]
new_res.eff = eff
new_res.samples = res.samples[samp_idx]
new_res.samples_id = res.samples_id[samp_idx]
new_res.samples_it = res.samples_it[samp_idx]
new_res.samples_u = res.samples_u[samp_idx]
new_res.samples_n = samp_n
new_res.logwt = np.array(saved_logwt)
new_res.logl = logl
new_res.logvol = logvol
new_res.logz = np.array(saved_logz)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
new_res.logzerr = np.sqrt(np.array(saved_logzvar))
new_res.h = np.array(saved_h)
if return_idx:
return new_res, samp_idx
else:
return new_res | 0.000252 |
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
'''
self.action_log_info('Shutting down')
if hasattr(self, 'minion') and hasattr(self.minion, 'destroy'):
self.minion.destroy()
super(Minion, self).shutdown(
exitcode, ('The Salt {0} is shutdown. {1}'.format(
self.__class__.__name__, (exitmsg or '')).strip())) | 0.003953 |
def build(outname, wcsname, refimage, undistort=False,
applycoeffs=False, coeffsfile=None, **wcspars):
""" Core functionality to create a WCS instance from a reference image WCS,
user supplied parameters or user adjusted reference WCS.
The distortion information can either be read in as part of the reference
image WCS or given in 'coeffsfile'.
Parameters
----------
outname : string
filename of output WCS
wcsname : string
WCSNAME ID for generated WCS
refimage : string
filename of image with source WCS used as basis for output WCS
undistort : bool
Create an undistorted WCS?
applycoeffs : bool
Apply coefficients from refimage to generate undistorted WCS?
coeffsfile : string
If specified, read distortion coeffs from separate file
"""
# Insure that the User WCS parameters have values for all the parameters,
# even if that value is 'None'
user_wcs_pars = convert_user_pars(wcspars)
userwcs = wcspars['userwcs']
"""
Use cases to document the logic required to interpret the parameters
WCS generation based on refimage/userwcs parameters
-------------------------------------------------------------
refimage == None, userwcs == False:
*NO WCS specified*
=> print a WARNING message and return without doing anything
refimage == None, userwcs == True:
=> Create WCS without a distortion model entirely from user parameters*
refimage != None, userwcs == False:
=> No user WCS parameters specified
=> Simply use refimage WCS as specified
refimage != None, userwcs == True:
=> Update refimage WCS with user specified values*
Apply distortion and generate final headerlet using processed WCS
-----------------------------------------------------------------
refimage == None, userwcs == True:
*Output WCS generated entirely from user supplied parameters*
Case 1: applycoeffs == False, undistort == True/False (ignored)
=> no distortion model to interpret
=> generate undistorted headerlet with no distortion model
Case 2: applycoeffs == True/False, undistort == True
=> ignore any user specified distortion model
=> generate undistorted headerlet with no distortion model
Case 3: applycoeffs == True, undistort == False
=> WCS from scratch combined with distortion model from another image
=> generate headerlet with distortion model
refimage != None, userwcs == True/False:
*Output WCS generated from reference image possibly modified by user parameters*
Case 4: applycoeffs == False, undistort == True
=> If refimage has distortion, remove it
=> generate undistorted headerlet with no distortion model
Case 5: applycoeffs == False, undistort == False
=> Leave refimage distortion model (if any) unmodified
=> generate a headerlet using same distortion model (if any) as refimage
Case 6: applycoeffs == True, undistort == False
=> Update refimage with distortion model with user-specified model
=> generate a headerlet with a distortion model
Case 7: applycoeffs == True, undistort == True
=> ignore user specified distortion model and undistort WCS
=> generate a headerlet without a distortion model
"""
### Build WCS from refimage and/or user pars
if util.is_blank(refimage) and not userwcs:
print('WARNING: No WCS specified... No WCS created!')
return
customwcs = None
if util.is_blank(refimage) and userwcs:
# create HSTWCS object from user parameters
complete_wcs = True
for key in user_wcs_pars:
if util.is_blank(user_wcs_pars[key]):
complete_wcs = False
break
if complete_wcs:
customwcs = wcs_functions.build_hstwcs(user_wcs_pars['crval1'],user_wcs_pars['crval2'],
user_wcs_pars['crpix1'],user_wcs_pars['crpix2'],
user_wcs_pars['naxis1'],user_wcs_pars['naxis2'],
user_wcs_pars['pscale'],user_wcs_pars['orientat'])
else:
print('WARNING: Not enough WCS information provided by user!')
raise ValueError
if not util.is_blank(refimage):
refwcs = stwcs.wcsutil.HSTWCS(refimage)
else:
refwcs = customwcs
### Apply distortion model (if any) to update WCS
if applycoeffs and not util.is_blank(coeffsfile):
if not util.is_blank(refimage):
replace_model(refwcs, coeffsfile)
else:
if not undistort:
add_model(refwcs,coeffsfile)
# Only working with custom WCS from user, no distortion
# so apply model to WCS, including modifying the CD matrix
apply_model(refwcs)
### Create undistorted WCS, if requested
if undistort:
outwcs = undistortWCS(refwcs)
else:
outwcs = refwcs
if userwcs:
# replace (some/all?) WCS values from refimage with user WCS values
# by running 'updatewcs' functions on input WCS
outwcs = mergewcs(outwcs,customwcs,user_wcs_pars)
### Create the final headerlet and write it out, if specified
if not util.is_blank(refimage):
template = refimage
elif not util.is_blank(coeffsfile):
template = coeffsfile
else:
template = None
# create default WCSNAME if None was given
wcsname = create_WCSname(wcsname)
print('Creating final headerlet with name ',wcsname,' using template ',template)
outhdr = generate_headerlet(outwcs,template,wcsname,outname=outname)
# synchronize this new WCS with the rest of the chips in the image
for ext in outhdr:
if 'extname' in ext.header and ext.header['extname'] == 'SIPWCS':
ext_wcs = wcsutil.HSTWCS(ext)
stwcs.updatewcs.makewcs.MakeWCS.updateWCS(ext_wcs,outwcs)
return outwcs | 0.00454 |
def recurse_tree(path, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
# use absolute path for root, as relative paths like '../../foo' cause
# 'if "/." in root ...' to filter out *all* modules otherwise
path = os.path.abspath(path)
# check if the base directory is a package and get is name
if INIT in os.listdir(path):
package_name = path.split(os.path.sep)[-1]
else:
package_name = None
toc = []
tree = os.walk(path, False)
for root, subs, files in tree:
# keep only the Python script files
py_files = sorted([f for f in files if os.path.splitext(f)[1] == '.py'])
if INIT in py_files:
py_files.remove(INIT)
py_files.insert(0, INIT)
# remove hidden ('.') and private ('_') directories
subs = sorted([sub for sub in subs if sub[0] not in ['.', '_']])
# check if there are valid files to process
# TODO: could add check for windows hidden files
if "/." in root or "/_" in root \
or not py_files \
or is_excluded(root, excludes):
continue
if INIT in py_files:
# we are in package ...
if (# ... with subpackage(s)
subs
or
# ... with some module(s)
len(py_files) > 1
or
# ... with a not-to-be-skipped INIT file
not shall_skip(os.path.join(root, INIT))
):
subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.')
create_package_file(root, package_name, subroot, py_files, opts, subs)
toc.append(makename(package_name, subroot))
elif root == path:
# if we are at the root level, we don't require it to be a package
for py_file in py_files:
if not shall_skip(os.path.join(path, py_file)):
module = os.path.splitext(py_file)[0]
create_module_file(package_name, module, opts)
toc.append(makename(package_name, module))
# create the module's index
if not opts.notoc:
create_modules_toc_file(package_name, toc, opts) | 0.003478 |
def get_xblock_settings(self, default=None):
"""
Gets XBlock-specific settigns for current XBlock
Returns default if settings service is not available.
Parameters:
default - default value to be used in two cases:
* No settings service is available
* As a `default` parameter to `SettingsService.get_settings_bucket`
"""
settings_service = self.runtime.service(self, "settings")
if settings_service:
return settings_service.get_settings_bucket(self, default=default)
return default | 0.004886 |
def dumps(obj, *args, **kwargs):
''' Typeless dump an object to json string '''
return json.dumps(obj, *args, cls=TypelessSONEncoder, ensure_ascii=False, **kwargs) | 0.011696 |
def update(self, version, reason=None):
"""
Modify the datamodel's manifest
:param version: New version of the manifest
:param reason: Optional reason of the update (i.g. "Update from x.y.z")
"""
_check_version_format(version)
return self.collection.update({'_id': 'manifest'}, {
'$set': {'version': version},
'$push': {'history': {
'timestamp': datetime.utcnow(), 'version': version,
'reason': reason}}
}) | 0.003795 |
def list_nodes_full(mask='mask[id, hostname, primaryIpAddress, \
primaryBackendIpAddress, processorPhysicalCoreAmount, memoryCount]',
call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
ret = {}
conn = get_conn(service='SoftLayer_Account')
response = conn.getHardware(mask=mask)
for node in response:
ret[node['hostname']] = node
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret | 0.006051 |
def iter_chunks(self):
"""
Generate a |_Chunk| subclass instance for each chunk in this parser's
PNG stream, in the order encountered in the stream.
"""
for chunk_type, offset in self._iter_chunk_offsets():
chunk = _ChunkFactory(chunk_type, self._stream_rdr, offset)
yield chunk | 0.005848 |
def create(provider, instances, opts=None, **kwargs):
'''
Create an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt-run cloud.create my-ec2-config myinstance \
image=ami-1624987f size='t1.micro' ssh_username=ec2-user \
securitygroup=default delvol_on_destroy=True
'''
client = _get_client()
if isinstance(opts, dict):
client.opts.update(opts)
info = client.create(provider, instances, **salt.utils.args.clean_kwargs(**kwargs))
return info | 0.003745 |
def listify(obj, ignore=(list, tuple, type(None))):
''' Wraps all non-list or tuple objects in a list; provides a simple way
to accept flexible arguments. '''
return obj if isinstance(obj, ignore) else [obj] | 0.004566 |
def get_covariance(datargs, outargs, vargs, datvar, outvar):
"""
Get covariance matrix.
:param datargs: data arguments
:param outargs: output arguments
:param vargs: variable arguments
:param datvar: variance of data arguments
:param outvar: variance of output arguments
:return: covariance
"""
# number of formula arguments that are not constant
argn = len(vargs)
# number of observations must be the same for all vargs
nobs = 1
c = []
# FIXME: can just loop ver varg, don't need indices I think, do we?
for m in xrange(argn):
a = vargs[m] # get the variable formula arg in vargs at idx=m
try:
a = datargs[a] # get the calculation data arg
except (KeyError, TypeError):
a = outargs[a] # get the calculation output arg
if not isinstance(a, basestring):
# calculation arg might be sequence (arg, idx, [unit])
a = a[0] # if a is a sequence, get just the arg from a[0]
LOGGER.debug('using output variance key: %r', a)
avar = outvar[a] # get variance from output registry
else:
if not isinstance(a, basestring):
# calculation arg might be sequence (arg, idx, [unit])
a = a[0] # if a is a sequence, get just the arg from a[0]
LOGGER.debug('using data variance key: %r', a)
avar = datvar[a] # get variance from data registry
d = []
# avar is a dictionary with the variance of "a" vs all other vargs
for n in xrange(argn):
# FIXME: just get all of the calculation args one time
b = vargs[n] # get the variable formula arg in vargs at idx=n
try:
b = datargs[b] # get the calculation data arg
except (KeyError, TypeError):
b = outargs[b] # get variance from output registry
if not isinstance(b, basestring):
# calculation arg might be sequence (arg, idx, [unit])
b = b[0] # if a is a sequence, get just the arg from b[0]
LOGGER.debug('using variance key: %r', b)
d.append(avar.get(b, 0.0)) # add covariance to sequence
# figure out number of observations from longest covariance
# only works if nobs is in one of the covariance
# fails if nobs > 1, but covariance shape doesn't have nobs!!!
# eg: if variance for data is uniform for all observations!!!
try:
nobs = max(nobs, len(d[-1]))
except (TypeError, ValueError):
LOGGER.debug('c of %s vs %s = %g', a, b, d[-1])
LOGGER.debug('d:\n%r', d)
c.append(d)
# covariance matrix is initially zeros
cov = np.zeros((nobs, argn, argn))
# loop over arguments in both directions, fill in covariance
for m in xrange(argn):
d = c.pop()
LOGGER.debug('pop row %d:\n%r', argn-1-m, d)
for n in xrange(argn):
LOGGER.debug('pop col %d:\n%r', argn - 1 - n, d[-1])
cov[:, argn-1-m, argn-1-n] = d.pop()
if nobs == 1:
cov = cov.squeeze() # squeeze out any extra dimensions
LOGGER.debug('covariance:\n%r', cov)
return cov | 0.00056 |
def init_relation(self, models, relation):
"""
Initialize the relation on a set of models.
:type models: list
:type relation: str
"""
for model in models:
model.set_relation(
relation, Result(self._related.new_collection(), self, model)
)
return models | 0.005714 |
def columnCount(self, qindex=QModelIndex()):
"""Array column number"""
if self.total_cols <= self.cols_loaded:
return self.total_cols
else:
return self.cols_loaded | 0.009259 |
def validate(self):
"""
Check if all mandatory keys exist in :attr:`metainfo` and are of expected
types
The necessary values are documented here:
| http://bittorrent.org/beps/bep_0003.html
| https://wiki.theory.org/index.php/BitTorrentSpecification#Metainfo_File_Structure
Note that ``announce`` is not considered mandatory because clients can
find peers via DHT.
:raises MetainfoError: if :attr:`metainfo` would not generate a valid
torrent file or magnet link
"""
md = self.metainfo
info = md['info']
# Check values shared by singlefile and multifile torrents
utils.assert_type(md, ('info', 'name'), (str,), must_exist=True)
utils.assert_type(md, ('info', 'piece length'), (int,), must_exist=True)
utils.assert_type(md, ('info', 'pieces'), (bytes, bytearray), must_exist=True)
if 'length' in info and 'files' in info:
raise error.MetainfoError("['info'] includes both 'length' and 'files'")
elif 'length' in info:
# Validate info as singlefile torrent
utils.assert_type(md, ('info', 'length'), (int, float), must_exist=True)
utils.assert_type(md, ('info', 'md5sum'), (str,), must_exist=False, check=utils.is_md5sum)
if self.path is not None:
# Check if filepath actually points to a file
if not os.path.isfile(self.path):
raise error.MetainfoError(f"Metainfo includes {self.path} as file, but it is not a file")
# Check if size matches
if os.path.getsize(self.path) != info['length']:
raise error.MetainfoError(f"Mismatching file sizes in metainfo ({info['length']})"
f" and local file system ({os.path.getsize(self.path)}): "
f"{self.path!r}")
elif 'files' in info:
# Validate info as multifile torrent
utils.assert_type(md, ('info', 'files'), (list,), must_exist=True)
for i,fileinfo in enumerate(info['files']):
utils.assert_type(md, ('info', 'files', i, 'length'), (int, float), must_exist=True)
utils.assert_type(md, ('info', 'files', i, 'path'), (list,), must_exist=True)
utils.assert_type(md, ('info', 'files', i, 'md5sum'), (str,), must_exist=False,
check=utils.is_md5sum)
if self.path is not None:
# Check if filepath actually points to a directory
if not os.path.isdir(self.path):
raise error.MetainfoError(f"Metainfo includes {self.path} as directory, but it is not a directory")
for i,fileinfo in enumerate(info['files']):
for j,item in enumerate(fileinfo['path']):
utils.assert_type(md, ('info', 'files', i, 'path', j), (str,))
filepath = os.path.join(self.path, os.path.join(*fileinfo['path']))
# Check if filepath exists and is a file
if not os.path.exists(filepath):
raise error.MetainfoError(f"Metainfo inclues file that doesn't exist: {filepath!r}")
if not os.path.isfile(filepath):
raise error.MetainfoError(f"Metainfo inclues non-file: {filepath!r}")
# Check if sizes match
if os.path.getsize(filepath) != fileinfo['length']:
raise error.MetainfoError(f"Mismatching file sizes in metainfo ({fileinfo['length']})"
f" and local file system ({os.path.getsize(filepath)}): "
f"{filepath!r}")
else:
raise error.MetainfoError("Missing 'length' or 'files' in metainfo") | 0.006791 |
def build_url(self, data):
"""This method occurs after dumping the data into the class.
Args:
data (dict): dictionary of all the query values
Returns:
data (dict): ordered dict of all the values
"""
query_part_one = []
query_part_two = []
keys_to_be_removed = []
for key, value in data.items():
if key not in ['version', 'restApi', 'resourcePath']:
if key == 'mapArea':
query_part_one.append(','.join(str(val) for val in value))
keys_to_be_removed.append(key)
elif key == 'includeLocationCodes':
query_part_one.append(value)
keys_to_be_removed.append(key)
else:
if isinstance(value, list):
value = ','.join(str(val) for val in value)
query_part_two.append('{0}={1}'.format(key, value))
keys_to_be_removed.append(key)
for k in keys_to_be_removed:
del data[k]
data['query'] = '{0}?{1}'.format('/'.join(query_part_one),
'&'.join(query_part_two))
return data | 0.001606 |
def _construct_config(self, basedir, port, name=None, isreplset=False):
"""Construct command line strings for a config server."""
if isreplset:
return self._construct_replset(basedir=basedir, portstart=port,
name=name,
num_nodes=list(range(
self.args['config'])),
arbiter=False, extra='--configsvr')
else:
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
port, replset=None, extra='--configsvr') | 0.002538 |
def info(cache_dir=CACHE_DIR, product=DEFAULT_PRODUCT):
"""Show info about the product cache.
:param cache_dir: Root of the DEM cache folder.
:param product: DEM product choice.
"""
datasource_root, _ = ensure_setup(cache_dir, product)
util.check_call_make(datasource_root, targets=['info']) | 0.003165 |
def _check_args(logZ, f, x, samples, weights):
""" Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples`
"""
# convert to arrays
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
weights = [weights]
# logZ
logZ = numpy.array(logZ, dtype='double')
if len(logZ.shape) is not 1:
raise ValueError("logZ should be a 1D array")
# x
x = numpy.array(x, dtype='double')
if len(x.shape) is not 1:
raise ValueError("x should be a 1D array")
# f
if len(logZ) != len(f):
raise ValueError("len(logZ) = %i != len(f)= %i"
% (len(logZ), len(f)))
for func in f:
if not callable(func):
raise ValueError("first argument f must be function"
"(or list of functions) of two variables")
# samples
if len(logZ) != len(samples):
raise ValueError("len(logZ) = %i != len(samples)= %i"
% (len(logZ), len(samples)))
samples = [numpy.array(s, dtype='double') for s in samples]
for s in samples:
if len(s.shape) is not 2:
raise ValueError("each set of samples should be a 2D array")
# weights
if len(logZ) != len(weights):
raise ValueError("len(logZ) = %i != len(weights)= %i"
% (len(logZ), len(weights)))
weights = [numpy.array(w, dtype='double') if w is not None
else numpy.ones(len(s), dtype='double')
for w, s in zip(weights, samples)]
for w, s in zip(weights, samples):
if len(w.shape) is not 1:
raise ValueError("each set of weights should be a 1D array")
if len(w) != len(s):
raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w)))
return logZ, f, x, samples, weights | 0.000505 |
def date(self):
"""
Getter/setter for the date member.
The setter can take a string or a :meth:`datetime.datetime` and will do the
appropriate transformation.
"""
if self._date:
return self._date
return datetime.datetime.now().strftime('%Y-%m-%d') | 0.009464 |
def parse_message(self, message):
""" Parse a given message and run the command using the
connection and the json protocals """
service = None
try:
service = get_service(message)
except ValueError:
pass
conn = self.connection
# check if valid request
if service is None or service not in requests.keys():
logging.info('No such request!')
conn.write_message(create_error('1', 'no such request'))
return
request_name = requests[service]
# process requests without data
if request_name == 'ping':
conn.write_message(create_pong())
return
elif request_name == 'roomlist':
conn.write_message(create_roomlist(conn.possible_rooms))
return
data = get_data(message)
logging.debug(data)
# process requests with data
if request_name == 'join':
logging.debug("Name is " + conn.id.name)
logging.debug("Name should be " + data['username'])
if conn.id.name != data['username']:
try:
conn.id.change_name(data['username'])
except UsernameInUseException:
conn.write_message(create_error(2, 'Username was already in use!'))
conn.join_room(data['room'])
conn._send_to_all_rooms(create_connect(conn.id.name))
elif request_name == 'userlist':
room = conn.get_room(data['room'])
if room is None:
conn.write_message('No such room as {room}'.format(room=data['room']))
else:
conn.write_message(create_userlist(room.user_names))
elif request_name == 'send':
room = conn.get_room(data['room'])
if room is not None:
room.send_message(create_message(data['username'], data['message']))
elif request_name == 'get_user_dump':
username = data['username']
for room in conn.possible_rooms:
if room.get_user(username) is not None:
conn.write_message(create_user_dump(room.get_user(username).id))
break
else:
if username == conn.id.name:
conn.write_message(create_user_dump(conn.id))
else:
conn.write_message(create_error(3, 'User not found in any active rooms'))
elif request_name == 'get_users_dump':
room = conn.get_room(data['room'])
if room is not None:
conn.write_message(create_users_dump(room.get_users_connected))
elif request_name == 'send_dump':
conn.id._load_from_json(data)
elif request_name == 'next_slide':
room = conn.get_room(data['room'])
room.send_message(create_next_slide())
elif request_name == 'previous_slide':
room = conn.get_room(data['room'])
room.send_message(create_previous_slide())
elif request_name == 'jump_to_slide':
room = conn.get_room(data['room'])
room.send_message(create_jump_to(data['slideNumber'])) | 0.005003 |
def prepare(cls, model, device='CPU', **kwargs):
"""For running end to end model(used for onnx test backend)
Parameters
----------
model : onnx ModelProto object
loaded onnx graph
device : 'CPU'
specifying device to run test on
kwargs :
other arguments
Returns
-------
MXNetBackendRep : object
Returns object of MXNetBackendRep class which will be in turn
used to run inference on the input model and return the result for comparison.
"""
graph = GraphProto()
sym, params = graph.from_onnx(model.graph)
return MXNetBackendRep(sym, params, device) | 0.004208 |
def AgregarVehiculo(self, dominio_vehiculo=None, dominio_acoplado=None, **kwargs):
"Agrega la información referente al vehiculo usado en el viaje del remito electrónico cárnico"
self.remito['viaje']['vehiculo'] = {'dominioVehiculo': dominio_vehiculo, 'dominioAcoplado': dominio_acoplado}
return True | 0.01548 |
def check_without_your_collusion(text):
"""Check the textself."""
err = "misc.illogic.collusion"
msg = "It's impossible to defraud yourself. Try 'aquiescence'."
regex = "without your collusion"
return existence_check(
text, [regex], err, msg, require_padding=False, offset=-1) | 0.003268 |
def p_ident_parts(self, p):
""" ident_parts : ident_part
| selector
| filter_group
"""
if not isinstance(p[1], list):
p[1] = [p[1]]
p[0] = p[1] | 0.00722 |
def get_endpoint(self, endpoint_id):
'''use a transfer client to get a specific endpoint based on an endpoint id.
Parameters
==========
endpoint_id: the endpoint_id to retrieve
'''
endpoint = None
if not hasattr(self, 'transfer_client'):
self._init_transfer_client()
try:
endpoint = self.transfer_client.get_endpoint(endpoint_id).data
except TransferAPIError:
bot.info('%s does not exist.' %endpoint_id)
return endpoint | 0.013645 |
def sudo_run(c, command):
"""
Run some command under Travis-oriented sudo subshell/virtualenv.
:param str command:
Command string to run, e.g. ``inv coverage``, ``inv integration``, etc.
(Does not necessarily need to be an Invoke task, but...)
"""
# NOTE: explicit shell wrapper because sourcing the venv works best here;
# test tasks currently use their own subshell to call e.g. 'pytest --blah',
# so the tactic of '$VIRTUAL_ENV/bin/inv coverage' doesn't help - only that
# intermediate process knows about the venv!
cmd = "source $VIRTUAL_ENV/bin/activate && {}".format(command)
c.sudo('bash -c "{0}"'.format(cmd), user=c.travis.sudo.user) | 0.001431 |
def _ttv_compute(self, v, dims, vidx, remdims):
"""
Tensor times vector product
Parameter
---------
"""
if not isinstance(v, tuple):
raise ValueError('v must be a tuple of vectors')
ndim = self.ndim
order = list(remdims) + list(dims)
if ndim > 1:
T = self.transpose(order)
sz = array(self.shape)[order]
for i in np.arange(len(dims), 0, -1):
T = T.reshape((sz[:ndim - 1].prod(), sz[ndim - 1]))
T = T.dot(v[vidx[i - 1]])
ndim -= 1
if ndim > 0:
T = T.reshape(sz[:ndim])
return T | 0.003067 |
def find(self, sub, in_current_line=False, include_current_position=False,
ignore_case=False, count=1):
"""
Find `text` after the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurance.
"""
assert isinstance(ignore_case, bool)
if in_current_line:
text = self.current_line_after_cursor
else:
text = self.text_after_cursor
if not include_current_position:
if len(text) == 0:
return # (Otherwise, we always get a match for the empty string.)
else:
text = text[1:]
flags = re.IGNORECASE if ignore_case else 0
iterator = re.finditer(re.escape(sub), text, flags)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
if include_current_position:
return match.start(0)
else:
return match.start(0) + 1
except StopIteration:
pass | 0.003527 |
def interaction(
self,
frame,
tb=None,
exception='Wdb',
exception_description='Stepping',
init=None,
shell=False,
shell_vars=None,
source=None,
iframe_mode=False,
timeout=None,
post_mortem=False
):
"""User interaction handling blocking on socket receive"""
log.info(
'Interaction %r %r %r %r' %
(frame, tb, exception, exception_description)
)
self.reconnect_if_needed()
self.stepping = not shell
if not iframe_mode:
opts = {}
if shell:
opts['type_'] = 'shell'
if post_mortem:
opts['type_'] = 'pm'
self.open_browser(**opts)
lvl = len(self.interaction_stack)
if lvl:
exception_description += ' [recursive%s]' % (
'^%d' % lvl if lvl > 1 else ''
)
interaction = Interaction(
self,
frame,
tb,
exception,
exception_description,
init=init,
shell=shell,
shell_vars=shell_vars,
source=source,
timeout=timeout
)
self.interaction_stack.append(interaction)
# For meta debugging purpose
self._ui = interaction
if self.begun:
# Each new state sends the trace and selects a frame
interaction.init()
else:
self.begun = True
interaction.loop()
self.interaction_stack.pop()
if lvl:
self.interaction_stack[-1].init() | 0.001756 |
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, conn_info_map = InitConnect.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_init_connect(ret_code, msg, conn_info_map)
return ret_code, msg | 0.009934 |
def verify(ctx):
"""Upgrade locked dependency versions"""
oks = run_configurations(
skipper(verify_environments),
read_sections,
)
ctx.exit(0
if False not in oks
else 1) | 0.004405 |
def commit_history(filename):
"""Retrieve the commit history for a given filename.
Keyword Arguments:
:filename: (str) -- full name of the file
Returns:
list of dicts -- list of commit
if the file is not found, returns an empty list
"""
result = []
repo = Repo()
for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):
result.append({'date':
datetime.fromtimestamp(commit.committed_date +
commit.committer_tz_offset),
'hexsha': commit.hexsha})
return result | 0.001555 |
def to_underscore(s):
"""Transform camel or pascal case to underscore separated string
"""
return re.sub(
r'(?!^)([A-Z]+)',
lambda m: "_{0}".format(m.group(1).lower()),
re.sub(r'(?!^)([A-Z]{1}[a-z]{1})', lambda m: "_{0}".format(m.group(1).lower()), s)
).lower() | 0.006289 |
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from IPython.lib.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
import wx
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app | 0.00437 |
async def connect(self):
requester = AiohttpRequester()
factory = UpnpFactory(requester)
device = await factory.async_create_device(self.url)
self.service = device.service('urn:schemas-sony-com:service:Group:1')
if not self.service:
_LOGGER.error("Unable to find group service!")
return False
for act in self.service.actions.values():
_LOGGER.debug("Action: %s (%s)", act, [arg.name for arg in act.in_arguments()])
return True
"""
Available actions
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_GetDeviceInfo)> ([])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_GetState)> ([])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_GetStateM)> ([])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_SetGroupName)> (['GroupName'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_ChangeGroupVolume)> (['GroupVolume'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_GetAllGroupMemory)> ([])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_DeleteGroupMemory)> (['MemoryID'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_UpdateGroupMemory)> (['MemoryID', 'GroupMode', 'GroupName', 'SlaveList', 'CodecType', 'CodecBitrate'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Start)> (['GroupMode', 'GroupName', 'SlaveList', 'CodecType', 'CodecBitrate'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Entry)> (['MasterSessionID', 'SlaveList'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_EntryM)> (['MasterSessionID', 'SlaveList'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Leave)> (['MasterSessionID', 'SlaveList'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_LeaveM)> (['MasterSessionID', 'SlaveList'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Abort)> (['MasterSessionID'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_SetGroupMute)> (['GroupMute'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_SetCodec)> (['CodecType', 'CodecBitrate'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_GetCodec)> ([])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Invite)> (['GroupMode', 'GroupName', 'MasterUUID', 'MasterSessionID'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Exit)> (['SlaveSessionID'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Play)> (['MasterSessionID'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Stop)> (['MasterSessionID'])
INFO:songpal.upnpctl:Action: <UpnpService.Action(X_Delegate)> (['GroupMode', 'SlaveList', 'DelegateURI', 'DelegateURIMetaData'])
""" | 0.007954 |
def vote_total(self):
"""
Calculates vote total as total_upvotes - total_downvotes. We are adding a method here instead of relying on django-secretballot's addition since that doesn't work for subclasses.
"""
modelbase_obj = self.modelbase_obj
return modelbase_obj.votes.filter(vote=+1).count() - modelbase_obj.votes.filter(vote=-1).count() | 0.010526 |
def _at_warn(self, calculator, rule, scope, block):
"""
Implements @warn
"""
value = calculator.calculate(block.argument)
log.warn(repr(value)) | 0.010929 |
def get_bundle_imported_services(self, bundle):
"""
Returns this bundle's ServiceReference list for all services it is
using or returns None if this bundle is not using any services.
A bundle is considered to be using a service if its use count for that
service is greater than zero.
The list is valid at the time of the call to this method, however, as
the Framework is a very dynamic environment, services can be modified
or unregistered at any time.
:param bundle: The bundle to look into
:return: The references of the services used by this bundle
"""
with self.__svc_lock:
return sorted(self.__bundle_imports.get(bundle, [])) | 0.002703 |
def configure_client(
cls, address: Union[str, Tuple[str, int], Path] = 'localhost', port: int = 6379,
db: int = 0, password: str = None, ssl: Union[bool, str, SSLContext] = False,
**client_args) -> Dict[str, Any]:
"""
Configure a Redis client.
:param address: IP address, host name or path to a UNIX socket
:param port: port number to connect to (ignored for UNIX sockets)
:param db: database number to connect to
:param password: password used if the server requires authentication
:param ssl: one of the following:
* ``False`` to disable SSL
* ``True`` to enable SSL using the default context
* an :class:`~ssl.SSLContext` instance
* a ``module:varname`` reference to an :class:`~ssl.SSLContext` instance
* name of an :class:`~ssl.SSLContext` resource
:param client_args: extra keyword arguments passed to :func:`~aioredis.create_redis_pool`
"""
assert check_argument_types()
if isinstance(address, str) and not address.startswith('/'):
address = (address, port)
elif isinstance(address, Path):
address = str(address)
client_args.update({
'address': address,
'db': db,
'password': password,
'ssl': resolve_reference(ssl)
})
return client_args | 0.004175 |
def __send_run(self):
"""Send request thread
"""
while not self.__end.is_set():
try:
with Connection(userid=self.__prefix + self.__epid,
password=self.__passwd,
virtual_host=self.__vhost,
heartbeat=self.__heartbeat,
connect_timeout=self.__socket_timeout,
operation_timeout=self.__socket_timeout,
ssl=self.__get_ssl_context(self.__sslca),
host=self.__host) as conn,\
conn.channel(auto_encode_decode=False) as channel:
self.__send_channel = channel
self.__send_exc_clear(log_if_exc_set='reconnected')
self.__send_ready.set()
try:
self.__send_ready_callback(self.__send_exc_time)
while not self.__end.is_set():
with self.__send_lock:
try:
# deal with any incoming messages (AMQP protocol only, not QAPI)
conn.drain_events(0)
except (BlockingIOError, SocketTimeout):
pass
conn.heartbeat_tick()
# idle
self.__end.wait(.25)
finally:
# locked so can make sure another call to send() is not made whilst shutting down
with self.__send_lock:
self.__send_ready.clear()
except exceptions.AccessRefused:
self.__send_log_set_exc_and_wait('Access Refused (Credentials already in use?)')
except exceptions.ConnectionForced:
self.__send_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)')
except SocketTimeout:
self.__send_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?')
except SSLError:
self.__send_log_set_exc_and_wait('ssl.SSLError Bad Certificate?')
except (exceptions.AMQPError, SocketError):
self.__send_log_set_exc_and_wait('amqp/transport failure, sleeping before retry')
except:
self.__send_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0)
break
logger.debug('finished') | 0.004167 |
def postprocess_authorKeywords(self, entry):
"""
Parse author keywords.
Author keywords are usually semicolon-delimited.
"""
if type(entry.authorKeywords) not in [str, unicode]:
aK = u' '.join([unicode(k) for k in entry.authorKeywords])
else:
aK = entry.authorKeywords
entry.authorKeywords = [k.strip().upper() for k in aK.split(';')] | 0.004808 |
def compile(pattern, flags=0, auto_compile=None): # noqa A001
"""Compile both the search or search and replace into one object."""
if isinstance(pattern, Bre):
if auto_compile is not None:
raise ValueError("Cannot compile Bre with a different auto_compile!")
elif flags != 0:
raise ValueError("Cannot process flags argument with a compiled pattern")
return pattern
else:
if auto_compile is None:
auto_compile = True
return Bre(compile_search(pattern, flags), auto_compile) | 0.005329 |
def stopped(name=None,
containers=None,
shutdown_timeout=None,
unpause=False,
error_on_absent=True,
**kwargs):
'''
Ensure that a container (or containers) is stopped
name
Name or ID of the container
containers
Run this state on more than one container at a time. The following two
examples accomplish the same thing:
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- names:
- foo
- bar
- baz
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- containers:
- foo
- bar
- baz
However, the second example will be a bit quicker since Salt will stop
all specified containers in a single run, rather than executing the
state separately on each image (as it would in the first example).
shutdown_timeout
Timeout for graceful shutdown of the container. If this timeout is
exceeded, the container will be killed. If this value is not passed,
then the container's configured ``stop_timeout`` will be observed. If
``stop_timeout`` was also unset on the container, then a timeout of 10
seconds will be used.
unpause : False
Set to ``True`` to unpause any paused containers before stopping. If
unset, then an error will be raised for any container that was paused.
error_on_absent : True
By default, this state will return an error if any of the specified
containers are absent. Set this to ``False`` to suppress that error.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not name and not containers:
ret['comment'] = 'One of \'name\' and \'containers\' must be provided'
return ret
if containers is not None:
if not isinstance(containers, list):
ret['comment'] = 'containers must be a list'
return ret
targets = []
for target in containers:
if not isinstance(target, six.string_types):
target = six.text_type(target)
targets.append(target)
elif name:
if not isinstance(name, six.string_types):
targets = [six.text_type(name)]
else:
targets = [name]
containers = {}
for target in targets:
try:
c_state = __salt__['docker.state'](target)
except CommandExecutionError:
containers.setdefault('absent', []).append(target)
else:
containers.setdefault(c_state, []).append(target)
errors = []
if error_on_absent and 'absent' in containers:
errors.append(
'The following container(s) are absent: {0}'.format(
', '.join(containers['absent'])
)
)
if not unpause and 'paused' in containers:
ret['result'] = False
errors.append(
'The following container(s) are paused: {0}'.format(
', '.join(containers['paused'])
)
)
if errors:
ret['result'] = False
ret['comment'] = '. '.join(errors)
return ret
to_stop = containers.get('running', []) + containers.get('paused', [])
if not to_stop:
ret['result'] = True
if len(targets) == 1:
ret['comment'] = 'Container \'{0}\' is '.format(targets[0])
else:
ret['comment'] = 'All specified containers are '
if 'absent' in containers:
ret['comment'] += 'absent or '
ret['comment'] += 'not running'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = (
'The following container(s) will be stopped: {0}'
.format(', '.join(to_stop))
)
return ret
stop_errors = []
for target in to_stop:
stop_kwargs = {'unpause': unpause}
if shutdown_timeout:
stop_kwargs['timeout'] = shutdown_timeout
changes = __salt__['docker.stop'](target, **stop_kwargs)
if changes['result'] is True:
ret['changes'][target] = changes
else:
if 'comment' in changes:
stop_errors.append(changes['comment'])
else:
stop_errors.append(
'Failed to stop container \'{0}\''.format(target)
)
if stop_errors:
ret['comment'] = '; '.join(stop_errors)
return ret
ret['result'] = True
ret['comment'] = (
'The following container(s) were stopped: {0}'
.format(', '.join(to_stop))
)
return ret | 0.000206 |
def resolve_dep(self, depname):
""" Locate dep in the search path; if found, return its path.
If not found in the search path, and the dep is not a system-provided
dep, raise an error """
for d in self._search_path:
name = os.path.join(d, depname)
if self._mock:
return name
if os.path.exists(name):
return name
if self.resolve_dep_from_path(depname):
# It's a system dep, so skip it
return None
message = "unable to find %s in %r" % (depname, self._search_path + self._path)
print(message)
# The conditional is to ease future debugging
if True:
raise RuntimeError(message)
return None | 0.003896 |
def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y):
r"""Generate a natural neighbor interpolation of the given points to a regular grid.
This assigns values to the given grid using the Liang and Hale [Liang2010]_.
approach.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations
yp: (N, ) ndarray
y-coordinates of observations
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i])
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension
Returns
-------
img: (M, N) ndarray
Interpolated values on a 2-dimensional grid
See Also
--------
natural_neighbor_to_points
"""
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = list(zip(xp, yp))
points_grid = generate_grid_coords(grid_x, grid_y)
img = natural_neighbor_to_points(points_obs, variable, points_grid)
return img.reshape(grid_x.shape) | 0.002629 |
def alloca(self, typ, size=None, name=''):
"""
Stack-allocate a slot for *size* elements of the given type.
(default one element)
"""
if size is None:
pass
elif isinstance(size, (values.Value, values.Constant)):
assert isinstance(size.type, types.IntType)
else:
# If it is not a Value instance,
# assume to be a Python integer.
size = values.Constant(types.IntType(32), size)
al = instructions.AllocaInstr(self.block, typ, size, name)
self._insert(al)
return al | 0.003322 |
def svmachine(self, data: ['SASdata', str] = None,
autotune: str = None,
code: str = None,
id: str = None,
input: [str, list, dict] = None,
kernel: str = None,
output: [str, bool, 'SASdata'] = None,
partition: str = None,
savestate: str = None,
solver: str = None,
target: [str, list, dict] = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the SVMACHINE procedure
Documentation link:
https://go.documentation.sas.com/?docsetId=casml&docsetTarget=casml_svmachine_toc.htm&docsetVersion=8.3&locale=en
:param data: SASdata object or string. This parameter is required.
:parm autotune: The autotune variable can only be a string type.
:parm code: The code variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm kernel: The kernel variable can only be a string type.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm partition: The partition variable can only be a string type.
:parm savestate: The savestate variable can only be a string type.
:parm solver: The solver variable can only be a string type.
:parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | 0.010062 |
def repeat(element, count):
'''Generate a sequence with one repeated value.
Note: This method uses deferred execution.
Args:
element: The value to be repeated.
count: The number of times to repeat the value.
Raises:
ValueError: If the count is negative.
'''
if count < 0:
raise ValueError("repeat() count cannot be negative")
return query(itertools.repeat(element, count)) | 0.002227 |
def is_valid_mark(comps, mark_trans):
"""
Check whether the mark given by mark_trans is valid to add to the components
"""
if mark_trans == "*_":
return True
components = list(comps)
if mark_trans[0] == 'd' and components[0] \
and components[0][-1].lower() in ("d", "đ"):
return True
elif components[1] != "" and \
strip(components[1]).lower().find(mark_trans[0]) != -1:
return True
else:
return False | 0.004082 |
def last_versions_with_age(self, col_name='age'):
'''
Leaves only the latest version for each object.
Adds a new column which represents age.
The age is computed by subtracting _start of the oldest version
from one of these possibilities::
# psuedo-code
if self._rbound is None:
if latest_version._end is pd.NaT:
current_time is used
else:
min(current_time, latest_version._end) is used
else:
if latest_version._end is pd.NaT:
self._rbound is used
else:
min(self._rbound, latest_version._end) is used
:param index: name of the new column.
'''
min_start_map = {}
max_start_map = {}
max_start_ser_map = {}
cols = self.columns.tolist()
i_oid = cols.index('_oid')
i_start = cols.index('_start')
i_end = cols.index('_end')
for row in self.values:
s = row[i_start]
oid = row[i_oid]
mins = min_start_map.get(oid, s)
if s <= mins:
min_start_map[oid] = s
maxs = max_start_map.get(oid, s)
if s >= maxs:
max_start_map[oid] = s
max_start_ser_map[oid] = row
vals = max_start_ser_map.values()
cut_ts = datetime.utcnow()
ages = []
for row in vals:
end = row[i_end]
end = cut_ts if end is pd.NaT else min(cut_ts, end)
age = end - min_start_map[row[i_oid]]
age = age - timedelta(microseconds=age.microseconds)
ages.append(age)
res = pd.DataFrame(max_start_ser_map.values(), columns=cols)
res[col_name] = pd.Series(ages, index=res.index)
return res | 0.001069 |
def deallocate_network_ipv4(self, id_network_ipv4):
"""
Deallocate all relationships between NetworkIPv4.
:param id_network_ipv4: ID for NetworkIPv4
:return: Nothing
:raise InvalidParameterError: Invalid ID for NetworkIPv4.
:raise NetworkIPv4NotFoundError: NetworkIPv4 not found.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_network_ipv4):
raise InvalidParameterError(
u'The identifier of NetworkIPv4 is invalid or was not informed.')
url = 'network/ipv4/' + str(id_network_ipv4) + '/deallocate/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml) | 0.003619 |
def set_resolving(self, **kw):
"""
Certain log fields can be individually resolved. Use this
method to set these fields. Valid keyword arguments:
:param str timezone: string value to set timezone for audits
:param bool time_show_zone: show the time zone in the audit.
:param bool time_show_millis: show timezone in milliseconds
:param bool keys: resolve log field keys
:param bool ip_elements: resolve IP's to SMC elements
:param bool ip_dns: resolve IP addresses using DNS
:param bool ip_locations: resolve locations
"""
if 'timezone' in kw and 'time_show_zone' not in kw:
kw.update(time_show_zone=True)
self.data['resolving'].update(**kw) | 0.005222 |
def generate_prediction_data(self):
"""
Create data that caches intermediate results used for predicting
the label of new/unseen points. This data is only useful if
you are intending to use functions from ``hdbscan.prediction``.
"""
if self.metric in FAST_METRICS:
min_samples = self.min_samples or self.min_cluster_size
if self.metric in KDTree.valid_metrics:
tree_type = 'kdtree'
elif self.metric in BallTree.valid_metrics:
tree_type = 'balltree'
else:
warn('Metric {} not supported for prediction data!'.format(self.metric))
return
self._prediction_data = PredictionData(
self._raw_data, self.condensed_tree_, min_samples,
tree_type=tree_type, metric=self.metric,
**self._metric_kwargs
)
else:
warn('Cannot generate prediction data for non-vector'
'space inputs -- access to the source data rather'
'than mere distances is required!') | 0.002667 |
def _set_stats_data(self, test_id, metrics):
"""
Get summary stats data from each metric and set it in the _Analysis object specified by test_id to make it available
for retrieval
:return: currently always returns CONSTANTS.OK. Maybe enhanced in future to return additional status
"""
for metric in metrics:
self._analyses[test_id].stats_data[metric.label] = metric.summary_stats
return CONSTANTS.OK | 0.009238 |
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first object of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case insensitive.
| If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object().
| If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find.
"""
# Call regular find() if USER_IFIND_MODE is nocase_collation
if self.user_manager.USER_IFIND_MODE=='nocase_collation':
return self.find_first_object(ObjectClass, **kwargs)
raise NotImplementedError | 0.008104 |
def select_features(X, y, test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
n_jobs=defaults.N_PROCESSES, chunksize=defaults.CHUNKSIZE,
ml_task='auto'):
"""
Check the significance of all features (columns) of feature matrix X and return a possibly reduced feature matrix
only containing relevant features.
The feature matrix must be a pandas.DataFrame in the format:
+-------+-----------+-----------+-----+-----------+
| index | feature_1 | feature_2 | ... | feature_N |
+=======+===========+===========+=====+===========+
| A | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| B | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| ... | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| ... | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| ... | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
Each column will be handled as a feature and tested for its significance to the target.
The target vector must be a pandas.Series or numpy.array in the form
+-------+--------+
| index | target |
+=======+========+
| A | ... |
+-------+--------+
| B | ... |
+-------+--------+
| . | ... |
+-------+--------+
| . | ... |
+-------+--------+
and must contain all id's that are in the feature matrix. If y is a numpy.array without index, it is assumed
that y has the same order and length than X and the rows correspond to each other.
Examples
========
>>> from tsfresh.examples import load_robot_execution_failures
>>> from tsfresh import extract_features, select_features
>>> df, y = load_robot_execution_failures()
>>> X_extracted = extract_features(df, column_id='id', column_sort='time')
>>> X_selected = select_features(X_extracted, y)
:param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.
It can contain both binary or real-valued features at the same time.
:type X: pandas.DataFrame
:param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.
:type y: pandas.Series or numpy.ndarray
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature (currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant
features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param n_jobs: Number of processes to use during the p-value calculation
:type n_jobs: int
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumend to be classification,
else regression.
:type ml_task: str
:return: The same DataFrame as X, but possibly with reduced number of columns ( = features).
:rtype: pandas.DataFrame
:raises: ``ValueError`` when the target vector does not fit to the feature matrix
or `ml_task` is not one of `'auto'`, `'classification'` or `'regression'`.
"""
assert isinstance(X, pd.DataFrame), "Please pass features in X as pandas.DataFrame."
check_for_nans_in_columns(X)
assert isinstance(y, (pd.Series, np.ndarray)), "The type of target vector y must be one of: " \
"pandas.Series, numpy.ndarray"
assert len(y) > 1, "y must contain at least two samples."
assert len(X) == len(y), "X and y must contain the same number of samples."
assert len(set(y)) > 1, "Feature selection is only possible if more than 1 label/class is provided"
if isinstance(y, pd.Series) and set(X.index) != set(y.index):
raise ValueError("Index of X and y must be identical if provided")
if isinstance(y, np.ndarray):
y = pd.Series(y, index=X.index)
relevance_table = calculate_relevance_table(
X, y, ml_task=ml_task, n_jobs=n_jobs, chunksize=chunksize,
test_for_binary_target_real_feature=test_for_binary_target_real_feature,
fdr_level=fdr_level, hypotheses_independent=hypotheses_independent,
)
relevant_features = relevance_table[relevance_table.relevant].feature
return X.loc[:, relevant_features] | 0.004584 |
def is_business_day(self, holiday_obj=None):
"""
:param list holiday_obj : datetime.date list defining business holidays
:return: bool
method to check if a date falls neither on weekend nor is holiday
"""
y, m, d = BusinessDate.to_ymd(self)
if weekday(y, m, d) > FRIDAY:
return False
holiday_list = holiday_obj if holiday_obj is not None else DEFAULT_HOLIDAYS
if self in holiday_list:
return False
elif date(y, m, d) in holiday_list:
return False
return True | 0.005146 |
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite | 0.000387 |
def meta(self, file_list, **kwargs):
"""获得文件(s)的metainfo
:param file_list: 文件路径列表,如 ['/aaa.txt']
:type file_list: list
:return: requests.Response
.. note ::
示例
* 文件不存在
{"errno":12,"info":[{"errno":-9}],"request_id":3294861771}
* 文件存在
{
"errno": 0,
"info": [
{
"fs_id": 文件id,
"path": "\/\u5c0f\u7c73\/mi2s\u5237recovery.rar",
"server_filename": "mi2s\u5237recovery.rar",
"size": 8292134,
"server_mtime": 1391274570,
"server_ctime": 1391274570,
"local_mtime": 1391274570,
"local_ctime": 1391274570,
"isdir": 0,
"category": 6,
"path_md5": 279827390796736883,
"delete_fs_id": 0,
"object_key": "84221121-2193956150-1391274570512754",
"block_list": [
"76b469302a02b42fd0a548f1a50dd8ac"
],
"md5": "76b469302a02b42fd0a548f1a50dd8ac",
"errno": 0
}
],
"request_id": 2964868977
}
"""
if not isinstance(file_list, list):
file_list = [file_list]
data = {'target': json.dumps(file_list)}
return self._request('filemetas?blocks=0&dlink=1', 'filemetas', data=data, **kwargs) | 0.002964 |
def _evalAndDer(self,x):
'''
Returns the level and first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der (etc).
'''
if _isscalar(x):
pos = np.searchsorted(self.x_list,x)
if pos == 0:
y = self.coeffs[0,0] + self.coeffs[0,1]*(x - self.x_list[0])
dydx = self.coeffs[0,1]
elif (pos < self.n):
alpha = (x - self.x_list[pos-1])/(self.x_list[pos] - self.x_list[pos-1])
y = self.coeffs[pos,0] + alpha*(self.coeffs[pos,1] + alpha*(self.coeffs[pos,2] + alpha*self.coeffs[pos,3]))
dydx = (self.coeffs[pos,1] + alpha*(2*self.coeffs[pos,2] + alpha*3*self.coeffs[pos,3]))/(self.x_list[pos] - self.x_list[pos-1])
else:
alpha = x - self.x_list[self.n-1]
y = self.coeffs[pos,0] + x*self.coeffs[pos,1] - self.coeffs[pos,2]*np.exp(alpha*self.coeffs[pos,3])
dydx = self.coeffs[pos,1] - self.coeffs[pos,2]*self.coeffs[pos,3]*np.exp(alpha*self.coeffs[pos,3])
else:
m = len(x)
pos = np.searchsorted(self.x_list,x)
y = np.zeros(m)
dydx = np.zeros(m)
if y.size > 0:
out_bot = pos == 0
out_top = pos == self.n
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
# Do the "in bounds" evaluation points
i = pos[in_bnds]
coeffs_in = self.coeffs[i,:]
alpha = (x[in_bnds] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
y[in_bnds] = coeffs_in[:,0] + alpha*(coeffs_in[:,1] + alpha*(coeffs_in[:,2] + alpha*coeffs_in[:,3]))
dydx[in_bnds] = (coeffs_in[:,1] + alpha*(2*coeffs_in[:,2] + alpha*3*coeffs_in[:,3]))/(self.x_list[i] - self.x_list[i-1])
# Do the "out of bounds" evaluation points
y[out_bot] = self.coeffs[0,0] + self.coeffs[0,1]*(x[out_bot] - self.x_list[0])
dydx[out_bot] = self.coeffs[0,1]
alpha = x[out_top] - self.x_list[self.n-1]
y[out_top] = self.coeffs[self.n,0] + x[out_top]*self.coeffs[self.n,1] - self.coeffs[self.n,2]*np.exp(alpha*self.coeffs[self.n,3])
dydx[out_top] = self.coeffs[self.n,1] - self.coeffs[self.n,2]*self.coeffs[self.n,3]*np.exp(alpha*self.coeffs[self.n,3])
return y, dydx | 0.022535 |
def print(self, *args, **kwargs):
'''
Utility function that behaves identically to 'print' except it only
prints if verbose
'''
if self._last_args and self._last_args.verbose:
print(*args, **kwargs) | 0.008 |
def from_dict(data, ctx):
"""
Instantiate a new OpenTradeFinancing from a dict (generally from
loading a JSON response). The data used to instantiate the
OpenTradeFinancing is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
return OpenTradeFinancing(**data) | 0.003623 |
def on_server_shutdown(self):
"""Stop the container before shutting down."""
if not self._container:
return
self._container.stop()
self._container.remove(v=True, force=True) | 0.009217 |
def create(self, product_type, attribute_set_id, sku, data):
"""
Create Product and return ID
:param product_type: String type of product
:param attribute_set_id: ID of attribute set
:param sku: SKU of the product
:param data: Dictionary of data
:return: INT id of product created
"""
return int(self.call(
'catalog_product.create',
[product_type, attribute_set_id, sku, data]
)
) | 0.004024 |
def agp(args):
"""
%prog agp <fastafile|sizesfile>
Convert the sizes file to a trivial AGP file.
"""
from jcvi.formats.agp import OO
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
sizesfile, = args
sizes = Sizes(sizesfile)
agpfile = sizes.filename.rsplit(".", 1)[0] + ".agp"
fw = open(agpfile, "w")
o = OO() # Without a filename
for ctg, size in sizes.iter_sizes():
o.add(ctg, ctg, size)
o.write_AGP(fw)
fw.close()
logging.debug("AGP file written to `{0}`.".format(agpfile))
return agpfile | 0.001546 |
def set_root(self, index):
"""Set the given index as root index of the combobox
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if not index.isValid():
self.setCurrentIndex(-1)
return
if self.model() != index.model():
self.setModel(index.model())
self.setRootModelIndex(index)
if self.model().rowCount(index):
self.setCurrentIndex(0)
else:
self.setCurrentIndex(-1) | 0.003425 |
def build_agg_vec(agg_vec, **source):
""" Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
"""
# build a dict with aggregation vectors in source and folder
if type(agg_vec) is str:
agg_vec = [agg_vec]
agg_dict = dict()
for entry in agg_vec:
try:
agg_dict[entry] = source[entry]
except KeyError:
folder = source.get('path', './')
folder = os.path.join(PYMRIO_PATH[folder], 'concordance')
for file in os.listdir(folder):
if entry == os.path.splitext(file)[0]:
_tmp = np.genfromtxt(os.path.join(folder, file), dtype=str)
if _tmp.ndim == 1:
agg_dict[entry] = [None if ee == 'None'
else ee for ee in _tmp.tolist()]
else:
agg_dict[entry] = [None if ee == 'None'
else ee
for ee in _tmp[:, -1].tolist()]
break
else:
logging.error(
'Aggregation vector -- {} -- not found'
.format(str(entry)))
# build the summary aggregation vector
def _rep(ll, ii, vv): ll[ii] = vv
miss_val = source.get('miss', 'REST')
vec_list = [agg_dict[ee] for ee in agg_vec]
out = [None, ] * len(vec_list[0])
for currvec in vec_list:
if len(currvec) != len(out):
logging.warn('Inconsistent vector length')
[_rep(out, ind, val) for ind, val in
enumerate(currvec) if not out[ind]]
[_rep(out, ind, miss_val) for ind, val in enumerate(out) if not val]
return out | 0.000219 |
def _pattern(*names, **kwargs):
"""Returns globbing pattern for name1/name2/../lastname + '--*' or
name1/name2/../lastname + extension if parameter `extension` it set.
Parameters
----------
names : strings
Which path to join. Example: _pattern('path', 'to', 'experiment') will
return `path/to/experiment--*`.
extension : string
If other extension then --* is wanted.
Example: _pattern('path', 'to', 'image', extension='*.png') will return
`path/to/image*.png`.
Returns
-------
string
Joined glob pattern string.
"""
if 'extension' not in kwargs:
kwargs['extension'] = '--*'
return os.path.join(*names) + kwargs['extension'] | 0.001372 |
def show(keyword=''):
"""
Displays a list of all environment key/value pairs for the current role.
"""
keyword = keyword.strip().lower()
max_len = max(len(k) for k in env.iterkeys())
keyword_found = False
for k in sorted(env.keys()):
if keyword and keyword not in k.lower():
continue
keyword_found = True
#print '%s: %s' % (k, env[k])
print('%s: ' % (k.ljust(max_len),))
pprint(env[k], indent=4)
if keyword:
if not keyword_found:
print('Keyword "%s" not found.' % keyword) | 0.003472 |
def _FetchMostRecentGraphSeriesFromTheLegacyDB(
label,
report_type,
token = None
):
"""Fetches the latest graph-series for a client label from the legacy DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the DB.
Raises:
AFF4AttributeTypeError: If an unexpected report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
"""
try:
stats_for_label = aff4.FACTORY.Open(
GetAFF4ClientReportsURN().Add(label),
aff4_type=aff4_stats.ClientFleetStats,
mode="r",
token=token)
except aff4.InstantiationError:
# Nothing to return for the given label and report-type.
return None
aff4_attr = _GetAFF4AttributeForReportType(report_type)
graph_series = rdf_stats.ClientGraphSeries(report_type=report_type)
if aff4_attr.attribute_type == rdf_stats.GraphSeries:
graphs = stats_for_label.Get(aff4_attr)
if graphs is None:
return None
for graph in graphs:
graph_series.graphs.Append(graph)
elif aff4_attr.attribute_type == rdf_stats.Graph:
graph = stats_for_label.Get(aff4_attr)
if graph is None:
return None
graph_series.graphs.Append(graph)
else:
raise AFF4AttributeTypeError(aff4_attr.attribute_type)
return graph_series | 0.009987 |
def stats(self):
"""Basic group statistics.
Returned dict has the following keys:
'online' - users online count
'ingame' - users currently in game count
'chatting' - users chatting count
:return: dict
"""
stats_online = CRef.cint()
stats_ingame = CRef.cint()
stats_chatting = CRef.cint()
self._iface.get_clan_stats(
self.group_id,
stats_online,
stats_ingame,
stats_chatting,
)
return {
'online': int(stats_online),
'ingame': int(stats_ingame),
'chatting': int(stats_chatting),
} | 0.002894 |
def prepare_command(self):
"""
Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
"""
try:
cmdline_args = self.loader.load_file('args', string_types)
self.command = shlex.split(cmdline_args.decode('utf-8'))
self.execution_mode = ExecutionMode.RAW
except ConfigurationError:
self.command = self.generate_ansible_command() | 0.007394 |
def zero_cluster(name):
'''
Reset performance statistics to zero across the cluster.
.. code-block:: yaml
zero_ats_cluster:
trafficserver.zero_cluster
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Zeroing cluster statistics'
return ret
__salt__['trafficserver.zero_cluster']()
ret['result'] = True
ret['comment'] = 'Zeroed cluster statistics'
return ret | 0.00189 |
def estimate(init_values,
estimator,
method,
loss_tol,
gradient_tol,
maxiter,
print_results,
use_hessian=True,
just_point=False,
**kwargs):
"""
Estimate the given choice model that is defined by `estimator`.
Parameters
----------
init_vals : 1D ndarray.
Should contain the initial values to start the optimization process
with.
estimator : an instance of the EstimationObj class.
method : str, optional.
Should be a valid string for scipy.optimize.minimize. Determines
the optimization algorithm that is used for this problem.
Default `== 'bfgs'`.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next that is needed to determine
convergence. Default `== 1e-06`.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default `== 1e-06`.
maxiter : int, optional.
Determines the maximum number of iterations used by the optimizer.
Default `== 1000`.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
Default `== True`.
use_hessian : bool, optional.
Determines whether the `calc_neg_hessian` method of the `estimator`
object will be used as the hessian function during the estimation. This
kwarg is used since some models (such as the Mixed Logit and Nested
Logit) use a rather crude (i.e. the BHHH) approximation to the Fisher
Information Matrix, and users may prefer to not use this approximation
for the hessian during estimation.
just_point : bool, optional.
Determines whether or not calculations that are non-critical for
obtaining the maximum likelihood point estimate will be performed.
Default == False.
Return
------
results : dict.
The dictionary of estimation results that is returned by
scipy.optimize.minimize. It will also have (at minimum) the following
keys:
- "log-likelihood_null"
- "final_log_likelihood"
- "utility_coefs"
- "intercept_params"
- "shape_params"
- "nest_params"
- "chosen_probs"
- "long_probs"
- "residuals"
- "ind_chi_squareds"
- "rho_squared"
- "rho_bar_squared"
- "final_gradient"
- "final_hessian"
- "fisher_info"
"""
if not just_point:
# Perform preliminary calculations
log_likelihood_at_zero =\
estimator.convenience_calc_log_likelihood(estimator.zero_vector)
initial_log_likelihood =\
estimator.convenience_calc_log_likelihood(init_values)
if print_results:
# Print the log-likelihood at zero
null_msg = "Log-likelihood at zero: {:,.4f}"
print(null_msg.format(log_likelihood_at_zero))
# Print the log-likelihood at the starting values
init_msg = "Initial Log-likelihood: {:,.4f}"
print(init_msg.format(initial_log_likelihood))
sys.stdout.flush()
# Get the hessian fucntion for this estimation process
hess_func = estimator.calc_neg_hessian if use_hessian else None
# Estimate the actual parameters of the model
start_time = time.time()
results = minimize(estimator.calc_neg_log_likelihood_and_neg_gradient,
init_values,
method=method,
jac=True,
hess=hess_func,
tol=loss_tol,
options={'gtol': gradient_tol,
"maxiter": maxiter},
**kwargs)
if not just_point:
if print_results:
# Stop timing the estimation process and report the timing results
end_time = time.time()
elapsed_sec = (end_time - start_time)
elapsed_min = elapsed_sec / 60.0
if elapsed_min > 1.0:
msg = "Estimation Time for Point Estimation: {:.2f} minutes."
print(msg.format(elapsed_min))
else:
msg = "Estimation Time for Point Estimation: {:.2f} seconds."
print(msg.format(elapsed_sec))
print("Final log-likelihood: {:,.4f}".format(-1 * results["fun"]))
sys.stdout.flush()
# Store the log-likelihood at zero
results["log_likelihood_null"] = log_likelihood_at_zero
# Calculate and store the post-estimation results
results = calc_and_store_post_estimation_results(results, estimator)
return results | 0.000199 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.