code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def bounds(self):
"""
Return the axis aligned bounding box of the current path.
Returns
----------
bounds: (2, dimension) float, (min, max) coordinates
"""
# get the exact bounds of each entity
# some entities (aka 3- point Arc) have bounds that can't
# be generated from just bound box of vertices
points = np.array([e.bounds(self.vertices)
for e in self.entities],
dtype=np.float64)
# flatten bound extrema into (n, dimension) array
points = points.reshape((-1, self.vertices.shape[1]))
# get the max and min of all bounds
bounds = np.array([points.min(axis=0),
points.max(axis=0)],
dtype=np.float64)
return bounds | Return the axis aligned bounding box of the current path.
Returns
----------
bounds: (2, dimension) float, (min, max) coordinates |
def weighted_minkowski(x, y, w=_mock_identity, p=2):
"""A weighted version of Minkowski distance.
..math::
D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}}
If weights w_i are inverse standard deviations of data in each dimension
then this represented a standardised Minkowski distance (and is
equivalent to standardised Euclidean distance for p=1).
"""
result = 0.0
for i in range(x.shape[0]):
result += (w[i] * np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p) | A weighted version of Minkowski distance.
..math::
D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}}
If weights w_i are inverse standard deviations of data in each dimension
then this represented a standardised Minkowski distance (and is
equivalent to standardised Euclidean distance for p=1). |
def dict_strict_update(base_dict, update_dict):
"""
This function updates base_dict with update_dict if and only if update_dict does not contain
keys that are not already in base_dict. It is essentially a more strict interpretation of the
term "updating" the dict.
If update_dict contains keys that are not in base_dict, a RuntimeError is raised.
:param base_dict: The dict that is to be updated. This dict is modified.
:param update_dict: The dict containing the new values.
"""
additional_keys = set(update_dict.keys()) - set(base_dict.keys())
if len(additional_keys) > 0:
raise RuntimeError(
'The update dictionary contains keys that are not part of '
'the base dictionary: {}'.format(str(additional_keys)),
additional_keys)
base_dict.update(update_dict) | This function updates base_dict with update_dict if and only if update_dict does not contain
keys that are not already in base_dict. It is essentially a more strict interpretation of the
term "updating" the dict.
If update_dict contains keys that are not in base_dict, a RuntimeError is raised.
:param base_dict: The dict that is to be updated. This dict is modified.
:param update_dict: The dict containing the new values. |
def _lookup_enum_in_ns(namespace, value):
"""Return the attribute of namespace corresponding to value."""
for attribute in dir(namespace):
if getattr(namespace, attribute) == value:
return attribute | Return the attribute of namespace corresponding to value. |
def iter_predict(self, X, include_init=False):
"""Returns the predictions for ``X`` at every stage of the boosting procedure.
Args:
X (array-like or sparse matrix of shape (n_samples, n_features): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples,) containing the predicted values at each stage
"""
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
y_pred = self.init_estimator_.predict(X)
# The user decides if the initial prediction should be included or not
if include_init:
yield y_pred
for estimators, line_searchers, cols in itertools.zip_longest(self.estimators_,
self.line_searchers_,
self.columns_):
for i, (estimator, line_searcher) in enumerate(itertools.zip_longest(estimators,
line_searchers or [])):
# If we used column sampling then we have to make sure the columns of X are arranged
# in the correct order
if cols is None:
direction = estimator.predict(X)
else:
direction = estimator.predict(X[:, cols])
if line_searcher:
direction = line_searcher.update(direction)
y_pred[:, i] += self.learning_rate * direction
yield y_pred | Returns the predictions for ``X`` at every stage of the boosting procedure.
Args:
X (array-like or sparse matrix of shape (n_samples, n_features): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples,) containing the predicted values at each stage |
async def deserialize(self, data: dict, silent=True):
'''
Deserializes a Python ``dict`` into the model by assigning values to their respective fields.
Ignores data attributes that do not match one of the Model's fields.
Ignores data attributes who's matching fields are declared with the ``readonly`` attribute
Validates the data after import.
Override in sub classes to modify or add to deserialization behavior
:param data:
Python dictionary with data
:type data:
``dict``
:param silent:
Determines if an exception is thrown if illegal fields are passed. Such fields can be non existent or readonly. Default is True
:type silent:
``bool``
'''
self.import_data(self. _deserialize(data))
self.validate() | Deserializes a Python ``dict`` into the model by assigning values to their respective fields.
Ignores data attributes that do not match one of the Model's fields.
Ignores data attributes who's matching fields are declared with the ``readonly`` attribute
Validates the data after import.
Override in sub classes to modify or add to deserialization behavior
:param data:
Python dictionary with data
:type data:
``dict``
:param silent:
Determines if an exception is thrown if illegal fields are passed. Such fields can be non existent or readonly. Default is True
:type silent:
``bool`` |
def _parse_json(self, json, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
features = json['features']
if features == []:
return None
def parse_feature(feature):
location = feature['place_name']
place = feature['text']
longitude = feature['geometry']['coordinates'][0]
latitude = feature['geometry']['coordinates'][1]
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_feature(features[0])
else:
return [parse_feature(feature) for feature in features] | Returns location, (latitude, longitude) from json feed. |
def _ssl_agent(self):
"""
Get a Twisted Agent that performs Client SSL authentication for Koji.
"""
# Load "cert" into a PrivateCertificate.
certfile = self.lookup(self.profile, 'cert')
certfile = os.path.expanduser(certfile)
with open(certfile) as certfp:
pemdata = certfp.read()
client_cert = PrivateCertificate.loadPEM(pemdata)
trustRoot = None # Use Twisted's platformTrust().
# Optionally load "serverca" into a Certificate.
servercafile = self.lookup(self.profile, 'serverca')
if servercafile:
servercafile = os.path.expanduser(servercafile)
trustRoot = RootCATrustRoot(servercafile)
policy = ClientCertPolicy(trustRoot=trustRoot, client_cert=client_cert)
return Agent(reactor, policy) | Get a Twisted Agent that performs Client SSL authentication for Koji. |
def get_fuzzed(self, indent=False, utf8=False):
"""
Return the fuzzed object
"""
try:
if "array" in self.json:
return self.fuzz_elements(dict(self.json))["array"]
else:
return self.fuzz_elements(dict(self.json))
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) | Return the fuzzed object |
def encode_list(cls, value):
"""
Encodes a list *value* into a string via base64 encoding.
"""
encoded = base64.b64encode(six.b(" ".join(str(v) for v in value) or "-"))
return encoded.decode("utf-8") if six.PY3 else encoded | Encodes a list *value* into a string via base64 encoding. |
def _get_parameter_values(template_dict, parameter_overrides):
"""
Construct a final list of values for CloudFormation template parameters based on user-supplied values,
default values provided in template, and sane defaults for pseudo-parameters.
Parameters
----------
template_dict : dict
SAM template dictionary
parameter_overrides : dict
User-supplied values for CloudFormation template parameters
Returns
-------
dict
Values for template parameters to substitute in template with
"""
default_values = SamBaseProvider._get_default_parameter_values(template_dict)
# NOTE: Ordering of following statements is important. It makes sure that any user-supplied values
# override the defaults
parameter_values = {}
parameter_values.update(SamBaseProvider._DEFAULT_PSEUDO_PARAM_VALUES)
parameter_values.update(default_values)
parameter_values.update(parameter_overrides or {})
return parameter_values | Construct a final list of values for CloudFormation template parameters based on user-supplied values,
default values provided in template, and sane defaults for pseudo-parameters.
Parameters
----------
template_dict : dict
SAM template dictionary
parameter_overrides : dict
User-supplied values for CloudFormation template parameters
Returns
-------
dict
Values for template parameters to substitute in template with |
def word_break(el, max_width=40,
avoid_elements=_avoid_word_break_elements,
avoid_classes=_avoid_word_break_classes,
break_character=unichr(0x200b)):
"""
Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
"""
# Character suggestion of ​ comes from:
# http://www.cs.tut.fi/~jkorpela/html/nobr.html
if el.tag in _avoid_word_break_elements:
return
class_name = el.get('class')
if class_name:
dont_break = False
class_name = class_name.split()
for avoid in avoid_classes:
if avoid in class_name:
dont_break = True
break
if dont_break:
return
if el.text:
el.text = _break_text(el.text, max_width, break_character)
for child in el:
word_break(child, max_width=max_width,
avoid_elements=avoid_elements,
avoid_classes=avoid_classes,
break_character=break_character)
if child.tail:
child.tail = _break_text(child.tail, max_width, break_character) | Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion |
def _build_command_chain(self, command):
"""
Builds execution chain including all intercepters and the specified command.
:param command: the command to build a chain.
"""
next = command
for intercepter in reversed(self._intercepters):
next = InterceptedCommand(intercepter, next)
self._commands_by_name[next.get_name()] = next | Builds execution chain including all intercepters and the specified command.
:param command: the command to build a chain. |
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
if isinstance(col1, Column):
arg1 = col1._jc
elif isinstance(col1, basestring):
arg1 = _create_column_from_name(col1)
else:
arg1 = float(col1)
if isinstance(col2, Column):
arg2 = col2._jc
elif isinstance(col2, basestring):
arg2 = _create_column_from_name(col2)
else:
arg2 = float(col2)
jc = getattr(sc._jvm.functions, name)(arg1, arg2)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _ | Create a binary mathfunction by name |
def disk_check_size(ctx, param, value):
""" Validation callback for disk size parameter."""
if value:
# if we've got a prefix
if isinstance(value, tuple):
val = value[1]
else:
val = value
if val % 1024:
raise click.ClickException('Size must be a multiple of 1024.')
return value | Validation callback for disk size parameter. |
def vspec(data):
"""
Takes the vector mean of replicate measurements at a given step
"""
vdata, Dirdata, step_meth = [], [], []
tr0 = data[0][0] # set beginning treatment
data.append("Stop")
k, R = 1, 0
for i in range(k, len(data)):
Dirdata = []
if data[i][0] != tr0:
if i == k: # sample is unique
vdata.append(data[i - 1])
step_meth.append(" ")
else: # sample is not unique
for l in range(k - 1, i):
Dirdata.append([data[l][1], data[l][2], data[l][3]])
dir, R = vector_mean(Dirdata)
vdata.append([data[i - 1][0], dir[0], dir[1],
old_div(R, (i - k + 1)), '1', 'g'])
step_meth.append("DE-VM")
tr0 = data[i][0]
k = i + 1
if tr0 == "stop":
break
del data[-1]
return step_meth, vdata | Takes the vector mean of replicate measurements at a given step |
def _start_connect(self, connect_type):
"""Starts the connection process, as called (internally)
from the user context, either from auto_connect() or connect().
Never call this from the _comm() process context.
"""
if self._connect_state.value != self.CS_NOT_CONNECTED:
# already done or in process, assume success
return
self._connected.value = 0
self._connect_state.value = self.CS_ATTEMPTING_CONNECT
# tell comm process to attempt connection
self._attempting_connect.value = connect_type
# EXTREMELY IMPORTANT - for this to work at all in Windows,
# where the above processes are spawned (vs forked in Unix),
# the thread objects (as sattributes of this object) must be
# assigned to this object AFTER we have spawned the processes.
# That way, multiprocessing can pickle the freshroastsr700
# successfully. (It can't pickle thread-related stuff.)
if self.update_data_func is not None:
# Need to launch the thread that will listen to the event
self._create_update_data_system(
None, setFunc=False, createThread=True)
self.update_data_thread.start()
if self.state_transition_func is not None:
# Need to launch the thread that will listen to the event
self._create_state_transition_system(
None, setFunc=False, createThread=True)
self.state_transition_thread.start() | Starts the connection process, as called (internally)
from the user context, either from auto_connect() or connect().
Never call this from the _comm() process context. |
def validate_split_runs_file(split_runs_file):
"""Check if structure of file is as expected and return dictionary linking names to run_IDs."""
try:
content = [l.strip() for l in split_runs_file.readlines()]
if content[0].upper().split('\t') == ['NAME', 'RUN_ID']:
return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c}
else:
sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
except IndexError:
sys.exit("ERROR: Format of --split_runs tab separated file not as expected")
logging.error("ERROR: Format of --split_runs tab separated file not as expected") | Check if structure of file is as expected and return dictionary linking names to run_IDs. |
def umi_transform(data):
"""
transform each read by identifying the barcode and UMI for each read
and putting the information in the read name
"""
fqfiles = data["files"]
fqfiles.extend(list(repeat("", 4-len(fqfiles))))
fq1, fq2, fq3, fq4 = fqfiles
umi_dir = os.path.join(dd.get_work_dir(data), "umis")
safe_makedir(umi_dir)
transform = dd.get_umi_type(data)
if not transform:
logger.info("No UMI transform specified, assuming pre-transformed data.")
if is_transformed(fq1):
logger.info("%s detected as pre-transformed, passing it on unchanged." % fq1)
data["files"] = [fq1]
return [[data]]
else:
logger.error("No UMI transform was specified, but %s does not look "
"pre-transformed." % fq1)
sys.exit(1)
if file_exists(transform):
transform_file = transform
else:
transform_file = get_transform_file(transform)
if not file_exists(transform_file):
logger.error(
"The UMI transform can be specified as either a file or a "
"bcbio-supported transform. Either the file %s does not exist "
"or the transform is not supported by bcbio. Supported "
"transforms are %s."
%(dd.get_umi_type(data), ", ".join(SUPPORTED_TRANSFORMS)))
sys.exit(1)
out_base = dd.get_sample_name(data) + ".umitransformed.fq.gz"
out_file = os.path.join(umi_dir, out_base)
if file_exists(out_file):
data["files"] = [out_file]
return [[data]]
cellular_barcodes = get_cellular_barcodes(data)
if len(cellular_barcodes) > 1:
split_option = "--separate_cb"
else:
split_option = ""
if dd.get_demultiplexed(data):
demuxed_option = "--demuxed_cb %s" % dd.get_sample_name(data)
split_option = ""
else:
demuxed_option = ""
cores = dd.get_num_cores(data)
# skip transformation if the file already looks transformed
with open_fastq(fq1) as in_handle:
read = next(in_handle)
if "UMI_" in read:
data["files"] = [out_file]
return [[data]]
locale_export = utils.locale_export()
umis = _umis_cmd(data)
cmd = ("{umis} fastqtransform {split_option} {transform_file} "
"--cores {cores} {demuxed_option} "
"{fq1} {fq2} {fq3} {fq4}"
"| seqtk seq -L 20 - | gzip > {tx_out_file}")
message = ("Inserting UMI and barcode information into the read name of %s"
% fq1)
with file_transaction(out_file) as tx_out_file:
do.run(cmd.format(**locals()), message)
data["files"] = [out_file]
return [[data]] | transform each read by identifying the barcode and UMI for each read
and putting the information in the read name |
def execute(self, *args, **kwargs):
"""
See :py:func:`silverberg.client.CQLClient.execute`
"""
num_clients = len(self._seed_clients)
start_client = (self._client_idx + 1) % num_clients
def _client_error(failure, client_i):
failure.trap(ConnectError)
client_i = (client_i + 1) % num_clients
if client_i == start_client:
return failure
else:
return _try_execute(client_i)
def _try_execute(client_i):
self._client_idx = client_i
d = self._seed_clients[client_i].execute(*args, **kwargs)
return d.addErrback(_client_error, client_i)
return _try_execute(start_client) | See :py:func:`silverberg.client.CQLClient.execute` |
def __start_experiment(self, parameters):
"""
Start an experiment by capturing the state of the code
:param parameters: a dictionary containing the parameters of the experiment
:type parameters: dict
:return: the tag representing this experiment
:rtype: TagReference
"""
repository = Repo(self.__repository_directory, search_parent_directories=True)
if len(repository.untracked_files) > 0:
logging.warning("Untracked files will not be recorded: %s", repository.untracked_files)
current_commit = repository.head.commit
started_state_is_dirty = repository.is_dirty()
if started_state_is_dirty:
repository.index.add([p for p in self.__get_files_to_be_added(repository)])
commit_obj = repository.index.commit("Temporary commit for experiment " + self.__experiment_name)
sha = commit_obj.hexsha
else:
sha = repository.head.object.hexsha
data = {"parameters": parameters, "started": time.time(), "description": self.__description,
"commit_sha": sha}
tag_object = self.__tag_repo(data, repository)
if started_state_is_dirty:
repository.head.reset(current_commit, working_tree=False, index=True)
return tag_object | Start an experiment by capturing the state of the code
:param parameters: a dictionary containing the parameters of the experiment
:type parameters: dict
:return: the tag representing this experiment
:rtype: TagReference |
def save(state, filename=None, desc='', extra=None):
"""
Save the current state with extra information (for example samples and LL
from the optimization procedure).
Parameters
----------
state : peri.states.ImageState
the state object which to save
filename : string
if provided, will override the default that is constructed based on
the state's raw image file. If there is no filename and the state has
a RawImage, the it is saved to RawImage.filename + "-peri-save.pkl"
desc : string
if provided, will augment the default filename to be
RawImage.filename + '-peri-' + desc + '.pkl'
extra : list of pickleable objects
if provided, will be saved with the state
"""
if isinstance(state.image, util.RawImage):
desc = desc or 'save'
filename = filename or state.image.filename + '-peri-' + desc + '.pkl'
else:
if not filename:
raise AttributeError("Must provide filename since RawImage is not used")
if extra is None:
save = state
else:
save = [state] + extra
if os.path.exists(filename):
ff = "{}-tmp-for-copy".format(filename)
if os.path.exists(ff):
os.remove(ff)
os.rename(filename, ff)
pickle.dump(save, open(filename, 'wb'), protocol=2) | Save the current state with extra information (for example samples and LL
from the optimization procedure).
Parameters
----------
state : peri.states.ImageState
the state object which to save
filename : string
if provided, will override the default that is constructed based on
the state's raw image file. If there is no filename and the state has
a RawImage, the it is saved to RawImage.filename + "-peri-save.pkl"
desc : string
if provided, will augment the default filename to be
RawImage.filename + '-peri-' + desc + '.pkl'
extra : list of pickleable objects
if provided, will be saved with the state |
def clone(self, population):
"""
Copy the holder just enough to be able to run a new simulation without modifying the original simulation.
"""
new = empty_clone(self)
new_dict = new.__dict__
for key, value in self.__dict__.items():
if key not in ('population', 'formula', 'simulation'):
new_dict[key] = value
new_dict['population'] = population
new_dict['simulation'] = population.simulation
return new | Copy the holder just enough to be able to run a new simulation without modifying the original simulation. |
def align_rna(job, fastqs, univ_options, star_options):
"""
A wrapper for the entire rna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
:rtype: dict
"""
star = job.wrapJobFn(run_star, fastqs, univ_options, star_options,
cores=star_options['n'],
memory=PromisedRequirement(lambda x: int(1.85 * x.size),
star_options['index']),
disk=PromisedRequirement(star_disk, fastqs, star_options['index']))
s_and_i = job.wrapJobFn(sort_and_index_star, star.rv(), univ_options,
star_options).encapsulate()
job.addChild(star)
star.addChild(s_and_i)
return s_and_i.rv() | A wrapper for the entire rna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
:rtype: dict |
def get_requirements():
"""Parse a requirements.txt file and return as a list."""
with open(os.path.join(topdir, 'requirements.txt')) as fin:
lines = fin.readlines()
lines = [line.strip() for line in lines]
return lines | Parse a requirements.txt file and return as a list. |
def createTable(dbconn, pd):
"""Creates a database table for the given PacketDefinition."""
cols = ('%s %s' % (defn.name, getTypename(defn)) for defn in pd.fields)
sql = 'CREATE TABLE IF NOT EXISTS %s (%s)' % (pd.name, ', '.join(cols))
dbconn.execute(sql)
dbconn.commit() | Creates a database table for the given PacketDefinition. |
def _set_base_path_env(): # type: () -> None
"""Sets the environment variable SAGEMAKER_BASE_DIR as
~/sagemaker_local/{timestamp}/opt/ml
Returns:
(bool): indicating whe
"""
local_config_dir = os.path.join(os.path.expanduser('~'), 'sagemaker_local', 'jobs',
str(time.time()), 'opt', 'ml')
logger.info('Setting environment variable SAGEMAKER_BASE_DIR as %s .' % local_config_dir)
os.environ[BASE_PATH_ENV] = local_config_dir | Sets the environment variable SAGEMAKER_BASE_DIR as
~/sagemaker_local/{timestamp}/opt/ml
Returns:
(bool): indicating whe |
def logging_set_filter(name, filter_def, ttl, **kwargs):
"""
Set local filter.
"""
ctx = Context(**kwargs)
ctx.execute_action('logging:set_filter', **{
'logging_service': ctx.repo.create_secure_service('logging'),
'logger_name': name,
'filter_def': filter_def,
'ttl': ttl,
}) | Set local filter. |
def to_struct(model):
"""Cast instance of model to python structure.
:param model: Model to be casted.
:rtype: ``dict``
"""
model.validate()
resp = {}
for _, name, field in model.iterate_with_name():
value = field.__get__(model)
if value is None:
continue
value = field.to_struct(value)
resp[name] = value
return resp | Cast instance of model to python structure.
:param model: Model to be casted.
:rtype: ``dict`` |
def get_review_average(obj):
"""Returns the review average for an object."""
total = 0
reviews = get_reviews(obj)
if not reviews:
return False
for review in reviews:
average = review.get_average_rating()
if average:
total += review.get_average_rating()
if total > 0:
return total / reviews.count()
return False | Returns the review average for an object. |
def results(self, Snwp):
r"""
Returns the phase configuration at the specified non-wetting phase
(invading phase) saturation.
Parameters
----------
Snwp : scalar, between 0 and 1
The network saturation for which the phase configuration is
desired.
Returns
-------
Two dictionary containing arrays that describe the pore and throat
distribution at the given saturation. Specifically, these are:
**'pore.occupancy'** : 1 indicates the pores is invaded and 0
otherwise.
**'throat.occupancy'** : Same as described above but for throats.
"""
net = self.project.network
P12 = net['throat.conns']
# Fetch void volume for pores and throats
Vp = net[self.settings['pore_volume']]
Vt = net[self.settings['throat_volume']]
# Fetch the order of filling
Np = self['pore.invasion_sequence']
Nt = self['throat.invasion_sequence']
# Create Nt-long mask of which pores were filled when throat was filled
Pinv = (Np[P12].T == Nt).T
# If a pore and throat filled together, find combined volume
Vinv = sp.vstack(((Pinv*Vp[P12]).T, Vt)).T
Vinv = sp.sum(Vinv, axis=1)
# Convert to cumulative volume filled as each throat is invaded
x = sp.argsort(Nt) # Find order throats were invaded
Vinv_cum = np.cumsum(Vinv[x])
# Normalized cumulative volume filled into saturation
S = Vinv_cum/(Vp.sum() + Vt.sum())
# Find throat invasion step where Snwp was reached
try:
N = sp.where(S < Snwp)[0][-1]
except:
N = -np.inf
data = {'pore.occupancy': Np <= N, 'throat.occupancy': Nt <= N}
return data | r"""
Returns the phase configuration at the specified non-wetting phase
(invading phase) saturation.
Parameters
----------
Snwp : scalar, between 0 and 1
The network saturation for which the phase configuration is
desired.
Returns
-------
Two dictionary containing arrays that describe the pore and throat
distribution at the given saturation. Specifically, these are:
**'pore.occupancy'** : 1 indicates the pores is invaded and 0
otherwise.
**'throat.occupancy'** : Same as described above but for throats. |
def post(action, params=None, version=6):
"""
For the documentation, see https://foosoft.net/projects/anki-connect/
:param str action:
:param dict params:
:param int version:
:return:
"""
if params is None:
params = dict()
to_send = {
'action': action,
'version': version,
'params': params
}
r = requests.post(AnkiConnect.URL, json=to_send)
return r.json() | For the documentation, see https://foosoft.net/projects/anki-connect/
:param str action:
:param dict params:
:param int version:
:return: |
def RecreateInstanceDisks(r, instance, disks=None, nodes=None):
"""Recreate an instance's disks.
@type instance: string
@param instance: Instance name
@type disks: list of int
@param disks: List of disk indexes
@type nodes: list of string
@param nodes: New instance nodes, if relocation is desired
@rtype: string
@return: job id
"""
body = {}
if disks is not None:
body["disks"] = disks
if nodes is not None:
body["nodes"] = nodes
return r.request("post", "/2/instances/%s/recreate-disks" % instance,
content=body) | Recreate an instance's disks.
@type instance: string
@param instance: Instance name
@type disks: list of int
@param disks: List of disk indexes
@type nodes: list of string
@param nodes: New instance nodes, if relocation is desired
@rtype: string
@return: job id |
def heap_item(clock, record, shard):
"""Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer."""
# Primary ordering is by event creation time.
# However, creation time is *approximate* and has whole-second resolution.
# This means two events in the same shard within one second can't be ordered.
ordering = record["meta"]["created_at"]
# From testing, SequenceNumber isn't a guaranteed ordering either. However,
# it is guaranteed to be unique within a shard. This will be tie-breaker
# for multiple records within the same shard, within the same second.
second_ordering = int(record["meta"]["sequence_number"])
# It's possible though unlikely, that sequence numbers will collide across
# multiple shards, within the same second. The final tie-breaker is
# a monotonically increasing integer from the buffer.
total_ordering = (ordering, second_ordering, clock())
return total_ordering, record, shard | Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer. |
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
} | Return a JSON-serializeable object containing station metadata. |
def _wrap_key(function, args, kws):
'''
get the key from the function input.
'''
return hashlib.md5(pickle.dumps((_from_file(function) + function.__name__, args, kws))).hexdigest() | get the key from the function input. |
def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
"""
Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
"""
return self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
) | Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema. |
def enable_one_shot_process_breakpoints(self, dwProcessId):
"""
Enables for one shot all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# enable code breakpoints for one shot
for bp in self.get_process_code_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_one_shot_code_breakpoint(dwProcessId, bp.get_address())
# enable page breakpoints for one shot
for bp in self.get_process_page_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_one_shot_page_breakpoint(dwProcessId, bp.get_address())
# enable hardware breakpoints for one shot
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
if bp.is_disabled():
self.enable_one_shot_hardware_breakpoint(dwThreadId, bp.get_address()) | Enables for one shot all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID. |
def trim_wavs(org_wav_dir=ORG_WAV_DIR,
tgt_wav_dir=TGT_WAV_DIR,
org_xml_dir=ORG_XML_DIR):
""" Extracts sentence-level transcriptions, translations and wavs from the
Na Pangloss XML and WAV files. But otherwise doesn't preprocess them."""
logging.info("Trimming wavs...")
if not os.path.exists(os.path.join(tgt_wav_dir, "TEXT")):
os.makedirs(os.path.join(tgt_wav_dir, "TEXT"))
if not os.path.exists(os.path.join(tgt_wav_dir, "WORDLIST")):
os.makedirs(os.path.join(tgt_wav_dir, "WORDLIST"))
for fn in os.listdir(org_xml_dir):
path = os.path.join(org_xml_dir, fn)
prefix, _ = os.path.splitext(fn)
if os.path.isdir(path):
continue
if not path.endswith(".xml"):
continue
logging.info("Trimming wavs from {}".format(fn))
rec_type, _, times, _ = pangloss.get_sents_times_and_translations(path)
# Extract the wavs given the times.
for i, (start_time, end_time) in enumerate(times):
if prefix.endswith("PLUSEGG"):
in_wav_path = os.path.join(org_wav_dir, prefix.upper()[:-len("PLUSEGG")]) + ".wav"
else:
in_wav_path = os.path.join(org_wav_dir, prefix.upper()) + ".wav"
headmic_path = os.path.join(org_wav_dir, prefix.upper()) + "_HEADMIC.wav"
if os.path.isfile(headmic_path):
in_wav_path = headmic_path
out_wav_path = os.path.join(tgt_wav_dir, rec_type, "%s.%d.wav" % (prefix, i))
if not os.path.isfile(in_wav_path):
raise PersephoneException("{} not a file.".format(in_wav_path))
start_time = start_time * ureg.seconds
end_time = end_time * ureg.seconds
wav.trim_wav_ms(Path(in_wav_path), Path(out_wav_path),
start_time.to(ureg.milliseconds).magnitude,
end_time.to(ureg.milliseconds).magnitude) | Extracts sentence-level transcriptions, translations and wavs from the
Na Pangloss XML and WAV files. But otherwise doesn't preprocess them. |
def concatenate(self, other):
"""
Concatenate this line string with another one.
This will add a line segment between the end point of this line string
and the start point of `other`.
Parameters
----------
other : imgaug.augmentables.lines.LineString or ndarray \
or iterable of tuple of number
The points to add to this line string.
Returns
-------
imgaug.augmentables.lines.LineString
New line string with concatenated points.
The `label` of this line string will be kept.
"""
if not isinstance(other, LineString):
other = LineString(other)
return self.deepcopy(
coords=np.concatenate([self.coords, other.coords], axis=0)) | Concatenate this line string with another one.
This will add a line segment between the end point of this line string
and the start point of `other`.
Parameters
----------
other : imgaug.augmentables.lines.LineString or ndarray \
or iterable of tuple of number
The points to add to this line string.
Returns
-------
imgaug.augmentables.lines.LineString
New line string with concatenated points.
The `label` of this line string will be kept. |
def duplicate(self):
"""Return a copy of the current Data Collection."""
collection = self.__class__(self.header.duplicate(), self.values, self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection | Return a copy of the current Data Collection. |
def sg_get_context():
r"""Get current context information
Returns:
tf.sg_opt class object which contains all context information
"""
global _context
# merge current context
res = tf.sg_opt()
for c in _context:
res += c
return res | r"""Get current context information
Returns:
tf.sg_opt class object which contains all context information |
def parse_interface(iface):
"""
Returns a docco section for the given interface.
:Parameters:
iface
Parsed IDL interface dict. Keys: 'comment', 'name', 'returns', 'params'
"""
sections = [ ]
docs = iface['comment']
code = '<span class="k">interface</span> <span class="gs">%s</span> {\n' % iface['name']
for v in iface["functions"]:
func_code = ' <span class="nf">%s</span>(' % v['name']
i = 0
for p in v["params"]:
if i == 0: i = 1
else: func_code += ", "
func_code += '<span class="na">%s</span> <span class="kt">%s</span>' % (p['name'], format_type(p))
func_code += ') <span class="kt">%s</span>\n' % format_type(v['returns'])
if v.has_key('comment') and v['comment']:
if code:
sections.append(to_section(docs, code))
docs = v['comment']
code = func_code
else:
code += func_code
code += "}"
sections.append(to_section(docs, code))
return sections | Returns a docco section for the given interface.
:Parameters:
iface
Parsed IDL interface dict. Keys: 'comment', 'name', 'returns', 'params' |
def contains(self, time: datetime.datetime,
inclusive: bool = True) -> bool:
"""
Does the interval contain a momentary time?
Args:
time: the ``datetime.datetime`` to check
inclusive: use inclusive rather than exclusive range checks?
"""
if inclusive:
return self.start <= time <= self.end
else:
return self.start < time < self.end | Does the interval contain a momentary time?
Args:
time: the ``datetime.datetime`` to check
inclusive: use inclusive rather than exclusive range checks? |
def write_msr(address, value):
"""
Set the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to write.
@type value: int
@param value: Contents to write on the MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
"""
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
raise NotImplementedError(
"MSR writing is only supported on i386 or amd64 processors.")
msr = win32.SYSDBG_MSR()
msr.Address = address
msr.Data = value
win32.NtSystemDebugControl(win32.SysDbgWriteMsr, InputBuffer = msr) | Set the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to write.
@type value: int
@param value: Contents to write on the MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary. |
def users_with_birthday(self, month, day):
"""Return a list of user objects who have a birthday on a given date."""
users = User.objects.filter(properties___birthday__month=month, properties___birthday__day=day)
results = []
for user in users:
# TODO: permissions system
results.append(user)
return results | Return a list of user objects who have a birthday on a given date. |
def _parse_posts(self, raw_posts):
"""Parse posts and returns in order."""
parsed_posts = self.parse_json(raw_posts)
# Posts are not sorted. The order is provided by
# 'order' key.
for post_id in parsed_posts['order']:
yield parsed_posts['posts'][post_id] | Parse posts and returns in order. |
def dashed(requestContext, seriesList, dashLength=5):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example::
&target=dashed(server01.instance01.memory.free,2.5)
"""
for series in seriesList:
series.name = 'dashed(%s, %g)' % (series.name, dashLength)
series.options['dashed'] = dashLength
return seriesList | Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example::
&target=dashed(server01.instance01.memory.free,2.5) |
def add_perfdata(self, *args, **kwargs):
"""
add a perfdata to the internal perfdata list
arguments:
the same arguments as for Perfdata()
"""
self._perfdata.append(Perfdata(*args, **kwargs)) | add a perfdata to the internal perfdata list
arguments:
the same arguments as for Perfdata() |
def _url_to_epub(
self):
"""*generate the epub book from a URL*
"""
self.log.debug('starting the ``_url_to_epub`` method')
from polyglot import htmlCleaner
cleaner = htmlCleaner(
log=self.log,
settings=self.settings,
url=self.urlOrPath,
outputDirectory=self.outputDirectory,
title=self.title, # SET TO FALSE TO USE WEBPAGE TITLE,
style=False, # add simpdf's styling to the HTML document
metadata=True, # include metadata in generated HTML (e.g. title),
h1=False # include title as H1 at the top of the doc
)
html = cleaner.clean()
if not html:
return None
if self.footer:
footer = self._tmp_html_file(self.footer)
footer = '"%(footer)s"' % locals()
else:
footer = ""
if self.header:
header = self._tmp_html_file(self.header)
header = '"%(header)s"' % locals()
else:
header = ""
# HTML SOURCE FILE
epub = html.replace(".html", ".epub")
pandoc = self.settings["executables"]["pandoc"]
cmd = """%(pandoc)s -S -s -f html -t epub3 %(header)s '%(html)s' %(footer)s -o '%(epub)s' """ % locals(
)
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
self.log.debug('output: %(stdout)s' % locals())
try:
with open(epub):
pass
fileExists = True
except IOError:
fileExists = False
raise IOError(
"the epub %s does not exist on this machine, here is the failure message: %s" % (epub, stderr))
os.remove(html)
self.log.debug('completed the ``_url_to_epub`` method')
return epub | *generate the epub book from a URL* |
def predict(self, predict_set ):
"""
This method accepts a list of Instances
Eg: list_of_inputs = [ Instance([0.12, 0.54, 0.84]), Instance([0.15, 0.29, 0.49]) ]
"""
predict_data = np.array( [instance.features for instance in predict_set ] )
return self.update( predict_data ) | This method accepts a list of Instances
Eg: list_of_inputs = [ Instance([0.12, 0.54, 0.84]), Instance([0.15, 0.29, 0.49]) ] |
def is_ancestor_of_family(self, id_, family_id):
"""Tests if an ``Id`` is an ancestor of a family.
arg: id (osid.id.Id): an ``Id``
arg: family_id (osid.id.Id): the ``Id`` of a family
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``family_id,`` ``false`` otherwise
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``id`` or ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=family_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=family_id) | Tests if an ``Id`` is an ancestor of a family.
arg: id (osid.id.Id): an ``Id``
arg: family_id (osid.id.Id): the ``Id`` of a family
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``family_id,`` ``false`` otherwise
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``id`` or ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
def convert_iris(directory, output_directory, output_filename='iris.hdf5'):
"""Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
classes = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
data = numpy.loadtxt(
os.path.join(directory, 'iris.data'),
converters={4: lambda x: classes[x]},
delimiter=',')
features = data[:, :-1].astype('float32')
targets = data[:, -1].astype('uint8').reshape((-1, 1))
data = (('all', 'features', features),
('all', 'targets', targets))
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'feature'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,) | Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset. |
def partial_ratio(s1, s2):
""""Return the ratio of the most similar substring
as a number between 0 and 100."""
s1, s2 = utils.make_type_consistent(s1, s2)
if len(s1) <= len(s2):
shorter = s1
longer = s2
else:
shorter = s2
longer = s1
m = SequenceMatcher(None, shorter, longer)
blocks = m.get_matching_blocks()
# each block represents a sequence of matching characters in a string
# of the form (idx_1, idx_2, len)
# the best partial match will block align with at least one of those blocks
# e.g. shorter = "abcd", longer = XXXbcdeEEE
# block = (1,3,3)
# best score === ratio("abcd", "Xbcd")
scores = []
for block in blocks:
long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
long_end = long_start + len(shorter)
long_substr = longer[long_start:long_end]
m2 = SequenceMatcher(None, shorter, long_substr)
r = m2.ratio()
if r > .995:
return 100
else:
scores.append(r)
return utils.intr(100 * max(scores)) | Return the ratio of the most similar substring
as a number between 0 and 100. |
def set_power_supplies(self, power_supplies):
"""
Sets the 2 power supplies with 0 = off, 1 = on.
:param power_supplies: list of 2 power supplies.
Example: [1, 0] = first power supply is on, second is off.
"""
power_supply_id = 0
for power_supply in power_supplies:
yield from self._hypervisor.send('c7200 set_power_supply "{name}" {power_supply_id} {powered_on}'.format(name=self._name,
power_supply_id=power_supply_id,
powered_on=power_supply))
log.info('Router "{name}" [{id}]: power supply {power_supply_id} state updated to {powered_on}'.format(name=self._name,
id=self._id,
power_supply_id=power_supply_id,
powered_on=power_supply))
power_supply_id += 1
self._power_supplies = power_supplies | Sets the 2 power supplies with 0 = off, 1 = on.
:param power_supplies: list of 2 power supplies.
Example: [1, 0] = first power supply is on, second is off. |
def list_namespaces(self):
'''
List the service bus namespaces defined on the account.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Namespaces/', None),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
_ServiceBusManagementXmlSerializer.xml_to_namespace) | List the service bus namespaces defined on the account. |
def on_backward_end(self, **kwargs):
"Clip the gradient before the optimizer step."
if self.clip: nn.utils.clip_grad_norm_(self.learn.model.parameters(), self.clip) | Clip the gradient before the optimizer step. |
def getport(self, default=None):
"""Return the port subcomponent of the URI authority as an
:class:`int`, or `default` if the original URI reference did
not contain a port or if the port was empty.
"""
port = self.port
if port:
return int(port)
else:
return default | Return the port subcomponent of the URI authority as an
:class:`int`, or `default` if the original URI reference did
not contain a port or if the port was empty. |
def entity_data(self, entity_type, entity_id, history_index):
"""Return the data dict for an entity at a specific index of its
history.
"""
return self.entity_history(entity_type, entity_id)[history_index] | Return the data dict for an entity at a specific index of its
history. |
def add_petabencana_layer(self):
"""Add petabencana layer to the map.
This uses the PetaBencana API to fetch the latest floods in JK. See
https://data.petabencana.id/floods
"""
from safe.gui.tools.peta_bencana_dialog import PetaBencanaDialog
dialog = PetaBencanaDialog(self.iface.mainWindow(), self.iface)
dialog.show() | Add petabencana layer to the map.
This uses the PetaBencana API to fetch the latest floods in JK. See
https://data.petabencana.id/floods |
def get_for_site(cls, site):
"""Return the 'main menu' instance for the provided site"""
instance, created = cls.objects.get_or_create(site=site)
return instance | Return the 'main menu' instance for the provided site |
def auto_forward(auto=True):
"""
Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool
"""
global __auto_forward_state
prev = __auto_forward_state
__auto_forward_state = auto
yield
__auto_forward_state = prev | Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool |
def next(self):
"""
Returns the next result. If no result is availble within the specified
(during construction) "timeout" then a ``PiperError`` which wraps a
``TimeoutError`` is **returned**.
If the result is a ``WorkerError`` it is also wrapped in a
``PiperError`` and is returned or raised if "debug" mode was specified
at initialization. If the result is a ``PiperError`` it is propagated.
"""
try:
next = self.outbox.next()
except StopIteration, excp:
self.log.debug('Piper %s has processed all jobs (finished)' % self)
self.finished = True
# We re-raise StopIteration as part of the iterator protocol.
# And the outbox should do the same.
raise excp
except (AttributeError, RuntimeError), excp:
# probably self.outbox.next() is self.None.next()
self.log.error('Piper %s has not yet been started.' % self)
raise PiperError('Piper %s has not yet been started.' % self, excp)
except IndexError, excp:
# probably started before connected
self.log.error('Piper %s has been started before connect.' % self)
raise PiperError('Piper %s has been started before connect.' % self, excp)
except TimeoutError, excp:
self.log.error('Piper %s timed out waited %ss.' % \
(self, self.timeout))
next = PiperError(excp)
# we do not raise TimeoutErrors so they can be skipped.
if isinstance(next, WorkerError):
# return the WorkerError instance returned (not raised) by the
# worker Process.
self.log.error('Piper %s generated %s"%s" in func. %s on argument %s' % \
(self, type(next[0]), next[0], next[1], next[2]))
if self.debug:
# This makes only sense if you are debugging a piper as it will
# most probably crash papy and python NuMap worker processes
# threads will hang.
raise PiperError('Piper %s generated %s"%s" in func %s on argument %s' % \
(self, type(next[0]), next[0], next[1], next[2]))
next = PiperError(next)
elif isinstance(next, PiperError):
# Worker/PiperErrors are wrapped by workers
if self.debug:
raise next
self.log.debug('Piper %s propagates %s' % (self, next[0]))
return next | Returns the next result. If no result is availble within the specified
(during construction) "timeout" then a ``PiperError`` which wraps a
``TimeoutError`` is **returned**.
If the result is a ``WorkerError`` it is also wrapped in a
``PiperError`` and is returned or raised if "debug" mode was specified
at initialization. If the result is a ``PiperError`` it is propagated. |
def create(dataset, target, feature=None, model = 'resnet-50',
l2_penalty=0.01,
l1_penalty=0.0,
solver='auto', feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
step_size = _DEFAULT_SOLVER_OPTIONS['step_size'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
class_weights = None,
validation_set = 'auto',
verbose=True,
seed=None,
batch_size=64):
"""
Create a :class:`ImageClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The column named by the 'feature' parameter will be
extracted for modeling.
target : string, or int
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are provided.
For example, a target variable with 'cat' and 'dog' as possible
values is mapped to 0 and 1 respectively with 0 being the base class
and 1 being the reference class. Use `model.classes` to retrieve
the order in which the classes are mapped.
feature : string, optional
indicates that the SFrame has only column of Image type and that will
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
l2_penalty : float, optional
Weight on l2 regularization of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized logistic regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful
for the model. The default weight of 0 prevents any features from
being discarded. See the LASSO regression reference for more detail.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. Available solvers are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
For this model, the Newton-Raphson method is equivalent to the
iteratively re-weighted least squares algorithm. If the l1_penalty is
greater than 0, use the 'fista' solver.
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are all
automatically tuned and the default options should function well. See
the solver options guide for setting additional parameters for each of
the solvers.
See the user guide for additional details on how the solver is chosen.
(see `here
<https://apple.github.io/turicreate/docs/userguide/supervised-learning/linear-regression.html>`_)
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : float, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level ``can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
model : string optional
Uses a pretrained model to bootstrap an image classifier:
- "resnet-50" : Uses a pretrained resnet model.
Exported Core ML model will be ~90M.
- "squeezenet_v1.1" : Uses a pretrained squeezenet model.
Exported Core ML model will be ~4.7M.
- "VisionFeaturePrint_Scene": Uses an OS internal feature extractor.
Only on available on iOS 12.0+,
macOS 10.14+ and tvOS 12.0+.
Exported Core ML model will be ~41K.
Models are downloaded from the internet if not available locally. Once
downloaded, the models are cached for future use.
step_size : float, optional
The starting step size to use for the ``fista`` solver. The default is
set to 1.0, this is an aggressive setting. If the first iteration takes
a considerable amount of time, reducing this parameter may speed up
model training.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
verbose : bool, optional
If True, prints progress updates and model details.
seed : int, optional
Seed for random number generation. Set this value to ensure that the
same model is created every time.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : ImageClassifier
A trained :class:`ImageClassifier` model.
Examples
--------
.. sourcecode:: python
>>> model = turicreate.image_classifier.create(data, target='is_expensive')
# Make predictions (in various forms)
>>> predictions = model.predict(data) # predictions
>>> predictions = model.classify(data) # predictions with confidence
>>> predictions = model.predict_topk(data) # Top-5 predictions (multiclass)
# Evaluate the model with ground truth data
>>> results = model.evaluate(data)
See Also
--------
ImageClassifier
"""
start_time = _time.time()
# Check model parameter
allowed_models = list(_pre_trained_models.MODELS.keys())
if _mac_ver() >= (10,14):
allowed_models.append('VisionFeaturePrint_Scene')
# Also, to make sure existing code doesn't break, replace incorrect name
# with the correct name version
if model == "VisionFeaturePrint_Screen":
print("WARNING: Correct spelling of model name is VisionFeaturePrint_Scene; VisionFeaturePrint_Screen will be removed in subsequent versions.")
model = "VisionFeaturePrint_Scene"
_tkutl._check_categorical_option_type('model', model, allowed_models)
# Check dataset parameter
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
if (feature is not None) and (feature not in dataset.column_names()):
raise _ToolkitError("Image feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
if not (isinstance(validation_set, _tc.SFrame) or validation_set == 'auto' or validation_set is None):
raise TypeError("Unrecognized value for 'validation_set'.")
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
feature_extractor = _image_feature_extractor._create_feature_extractor(model)
# Extract features
extracted_features = _tc.SFrame({
target: dataset[target],
'__image_features__': feature_extractor.extract_features(dataset, feature, verbose=verbose, batch_size=batch_size),
})
if isinstance(validation_set, _tc.SFrame):
extracted_features_validation = _tc.SFrame({
target: validation_set[target],
'__image_features__': feature_extractor.extract_features(validation_set, feature, verbose=verbose, batch_size=batch_size),
})
else:
extracted_features_validation = validation_set
# Train a classifier using the extracted features
extracted_features[target] = dataset[target]
lr_model = _tc.logistic_classifier.create(extracted_features,
features=['__image_features__'],
target=target,
max_iterations=max_iterations,
validation_set=extracted_features_validation,
seed=seed,
verbose=verbose, l2_penalty=l2_penalty, l1_penalty=l1_penalty,
solver=solver, feature_rescaling=feature_rescaling,
convergence_threshold=convergence_threshold,
step_size=step_size,
lbfgs_memory_level=lbfgs_memory_level,
class_weights=class_weights)
# set input image shape
if model in _pre_trained_models.MODELS:
input_image_shape = _pre_trained_models.MODELS[model].input_image_shape
else: # model == VisionFeaturePrint_Scene
input_image_shape = (3, 299, 299)
# Save the model
state = {
'classifier': lr_model,
'model': model,
'max_iterations': max_iterations,
'feature_extractor': feature_extractor,
'input_image_shape': input_image_shape,
'target': target,
'feature': feature,
'num_features': 1,
'num_classes': lr_model.num_classes,
'classes': lr_model.classes,
'num_examples': lr_model.num_examples,
'training_time': _time.time() - start_time,
'training_loss': lr_model.training_loss,
}
return ImageClassifier(state) | Create a :class:`ImageClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The column named by the 'feature' parameter will be
extracted for modeling.
target : string, or int
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are provided.
For example, a target variable with 'cat' and 'dog' as possible
values is mapped to 0 and 1 respectively with 0 being the base class
and 1 being the reference class. Use `model.classes` to retrieve
the order in which the classes are mapped.
feature : string, optional
indicates that the SFrame has only column of Image type and that will
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
l2_penalty : float, optional
Weight on l2 regularization of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized logistic regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful
for the model. The default weight of 0 prevents any features from
being discarded. See the LASSO regression reference for more detail.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. Available solvers are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
For this model, the Newton-Raphson method is equivalent to the
iteratively re-weighted least squares algorithm. If the l1_penalty is
greater than 0, use the 'fista' solver.
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are all
automatically tuned and the default options should function well. See
the solver options guide for setting additional parameters for each of
the solvers.
See the user guide for additional details on how the solver is chosen.
(see `here
<https://apple.github.io/turicreate/docs/userguide/supervised-learning/linear-regression.html>`_)
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : float, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level ``can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
model : string optional
Uses a pretrained model to bootstrap an image classifier:
- "resnet-50" : Uses a pretrained resnet model.
Exported Core ML model will be ~90M.
- "squeezenet_v1.1" : Uses a pretrained squeezenet model.
Exported Core ML model will be ~4.7M.
- "VisionFeaturePrint_Scene": Uses an OS internal feature extractor.
Only on available on iOS 12.0+,
macOS 10.14+ and tvOS 12.0+.
Exported Core ML model will be ~41K.
Models are downloaded from the internet if not available locally. Once
downloaded, the models are cached for future use.
step_size : float, optional
The starting step size to use for the ``fista`` solver. The default is
set to 1.0, this is an aggressive setting. If the first iteration takes
a considerable amount of time, reducing this parameter may speed up
model training.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
verbose : bool, optional
If True, prints progress updates and model details.
seed : int, optional
Seed for random number generation. Set this value to ensure that the
same model is created every time.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : ImageClassifier
A trained :class:`ImageClassifier` model.
Examples
--------
.. sourcecode:: python
>>> model = turicreate.image_classifier.create(data, target='is_expensive')
# Make predictions (in various forms)
>>> predictions = model.predict(data) # predictions
>>> predictions = model.classify(data) # predictions with confidence
>>> predictions = model.predict_topk(data) # Top-5 predictions (multiclass)
# Evaluate the model with ground truth data
>>> results = model.evaluate(data)
See Also
--------
ImageClassifier |
def force_leave(self, node):
"""Force a failed gossip member into the left state.
https://www.nomadproject.io/docs/http/agent-force-leave.html
returns: 200 status code
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"node": node}
return self.request("force-leave", params=params, method="post").status_code | Force a failed gossip member into the left state.
https://www.nomadproject.io/docs/http/agent-force-leave.html
returns: 200 status code
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException |
def feature_path(self, gff_path):
"""Load a GFF file with information on a single sequence and store features in the ``features`` attribute
Args:
gff_path: Path to GFF file.
"""
if not gff_path:
self.feature_dir = None
self.feature_file = None
else:
if not op.exists(gff_path):
raise OSError('{}: file does not exist!'.format(gff_path))
if not op.dirname(gff_path):
self.feature_dir = '.'
else:
self.feature_dir = op.dirname(gff_path)
self.feature_file = op.basename(gff_path) | Load a GFF file with information on a single sequence and store features in the ``features`` attribute
Args:
gff_path: Path to GFF file. |
def post(self, route: str(), callback: object()):
"""
Binds a POST route with the given callback
:rtype: object
"""
self.__set_route('post', {route: callback})
return RouteMapping | Binds a POST route with the given callback
:rtype: object |
def getWorksheet(self):
"""Returns the Worksheet to which this analysis belongs to, or None
"""
worksheet = self.getBackReferences('WorksheetAnalysis')
if not worksheet:
return None
if len(worksheet) > 1:
logger.error(
"Analysis %s is assigned to more than one worksheet."
% self.getId())
return worksheet[0] | Returns the Worksheet to which this analysis belongs to, or None |
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs | Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities. |
def create(self, instance, parameters, existing=True):
"""Create an instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
Keyword Arguments:
existing (bool): True (use an existing cluster), False (create a new cluster)
Returns:
ProvisionedServiceSpec: Status
"""
return self.service_instance.create(instance, parameters, existing) | Create an instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
Keyword Arguments:
existing (bool): True (use an existing cluster), False (create a new cluster)
Returns:
ProvisionedServiceSpec: Status |
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text)) | Performs search of selected item from Web List
@params text - string visible text |
def get_under_hollow(self):
""" Return HCP if an atom is present below the adsorbate in the
subsurface layer and FCC if not"""
C0 = self.B[-1:] * (3, 3, 1)
ads_pos = C0.positions[4]
C = self.get_subsurface_layer() * (3, 3, 1)
ret = 'FCC'
if np.any([np.linalg.norm(ads_pos[:2] - ele.position[:2]) < 0.5 *
cradii[ele.number] for ele in C]):
ret = 'HCP'
return ret | Return HCP if an atom is present below the adsorbate in the
subsurface layer and FCC if not |
def cross_signal(s1, s2, continuous=0):
""" return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1
"""
def _convert(src, other):
if isinstance(src, pd.DataFrame):
return src.min(axis=1, skipna=0), src.max(axis=1, skipna=0)
elif isinstance(src, pd.Series):
return src, src
elif isinstance(src, (int, float)):
s = pd.Series(src, index=other.index)
return s, s
elif isinstance(src, (tuple, list)):
l, u = min(src), max(src)
assert l <= u, 'lower bound must be less than upper bound'
lower, upper = pd.Series(l, index=other.index), pd.Series(u, index=other.index)
return lower, upper
else:
raise Exception('unable to handle type %s' % type(src))
lower1, upper1 = _convert(s1, s2)
lower2, upper2 = _convert(s2, s1)
df = pd.DataFrame({'upper1': upper1, 'lower1': lower1, 'upper2': upper2, 'lower2': lower2})
df.ffill(inplace=True)
signal = pd.Series(np.nan, index=df.index)
signal[df.upper1 > df.upper2] = 1
signal[df.lower1 < df.lower2] = -1
if continuous:
# Just roll with 1, -1
signal = signal.fillna(method='ffill')
m1, m2 = df.upper1.first_valid_index(), df.upper2.first_valid_index()
if m1 is not None or m2 is not None:
m1 = m2 if m1 is None else m1
m2 = m1 if m2 is None else m2
fv = max(m1, m2)
if np.isnan(signal[fv]):
signal[fv] = 0
signal.ffill(inplace=1)
else:
signal[(df.upper1 < df.upper2) & (df.lower1 > df.lower2)] = 0
# special handling when equal, determine where it previously was
eq = (df.upper1 == df.upper2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == 1.: # Line coming from above upper bound if ps == 1
signal[i] = ps
else:
signal[i] = 0
eq = (df.lower1 == df.lower2)
if eq.any(): # Set to prior value
tmp = signal[eq]
for i in tmp.index:
loc = signal.index.get_loc(i)
if loc != 0:
u, l = df.upper2.iloc[loc], df.lower2.iloc[loc]
ps = signal.iloc[loc - 1]
if u == l or ps == -1.: # Line coming from below lower bound if ps == -1
signal[i] = ps
else:
signal[i] = 0
return signal | return a signal with the following
1 : when all values of s1 cross all values of s2
-1 : when all values of s2 cross below all values of s2
0 : if s1 < max(s2) and s1 > min(s2)
np.nan : if s1 or s2 contains np.nan at position
s1: Series, DataFrame, float, int, or tuple(float|int)
s2: Series, DataFrame, float, int, or tuple(float|int)
continous: bool, if true then once the signal starts it is always 1 or -1 |
def _plot_simple_fault(self, source, border='k-', border_width=1.0):
"""
Plots the simple fault source as a composite of the fault trace
and the surface projection of the fault.
:param source:
Fault source as instance of :class: mtkSimpleFaultSource
:param str border:
Line properties of border (see matplotlib documentation for detail)
:param float border_width:
Line width of border (see matplotlib documentation for detail)
"""
# Get the trace
trace_lons = np.array([pnt.longitude
for pnt in source.fault_trace.points])
trace_lats = np.array([pnt.latitude
for pnt in source.fault_trace.points])
surface_projection = _fault_polygon_from_mesh(source)
# Plot surface projection first
x, y = self.m(surface_projection[:, 0], surface_projection[:, 1])
self.m.plot(x, y, border, linewidth=border_width)
# Plot fault trace
x, y = self.m(trace_lons, trace_lats)
self.m.plot(x, y, border, linewidth=1.3 * border_width) | Plots the simple fault source as a composite of the fault trace
and the surface projection of the fault.
:param source:
Fault source as instance of :class: mtkSimpleFaultSource
:param str border:
Line properties of border (see matplotlib documentation for detail)
:param float border_width:
Line width of border (see matplotlib documentation for detail) |
def textFromHTML(html):
"""
Cleans and parses text from the given HTML.
"""
cleaner = lxml.html.clean.Cleaner(scripts=True)
cleaned = cleaner.clean_html(html)
return lxml.html.fromstring(cleaned).text_content() | Cleans and parses text from the given HTML. |
def get(self, key, value=None):
"x.get(k[,d]) -> x[k] if k in x, else d. d defaults to None."
_key = self._prepare_key(key)
prefix, node = self._get_node_by_key(_key)
if prefix==_key and node.value is not None:
return self._unpickle_value(node.value)
else:
return value | x.get(k[,d]) -> x[k] if k in x, else d. d defaults to None. |
def _gser(a,x):
"Series representation of Gamma. NumRec sect 6.1."
ITMAX=100
EPS=3.e-7
gln=lgamma(a)
assert(x>=0),'x < 0 in gser'
if x == 0 : return 0,gln
ap = a
delt = sum = 1./a
for i in range(ITMAX):
ap=ap+1.
delt=delt*x/ap
sum=sum+delt
if abs(delt) < abs(sum)*EPS: break
else:
print('a too large, ITMAX too small in gser')
gamser=sum*np.exp(-x+a*np.log(x)-gln)
return gamser,gln | Series representation of Gamma. NumRec sect 6.1. |
def initialize(self, argv=None):
"""initialize the app"""
super(BaseParallelApplication, self).initialize(argv)
self.to_work_dir()
self.reinit_logging() | initialize the app |
def attach(self, lun_or_snap, skip_hlu_0=False):
""" Attaches lun, snap or member snap of cg snap to host.
Don't pass cg snapshot in as `lun_or_snap`.
:param lun_or_snap: the lun, snap, or a member snap of cg snap
:param skip_hlu_0: whether to skip hlu 0
:return: the hlu number
"""
# `UnityResourceAlreadyAttachedError` check was removed due to there
# is a host cache existing in Cinder driver. If the lun was attached to
# the host and the info was stored in the cache, wrong hlu would be
# returned.
# And attaching a lun to a host twice would success, if Cinder retry
# triggers another attachment of same lun to the host, the cost would
# be one more rest request of `modifyLun` and one for host instance
# query.
try:
return self._attach_with_retry(lun_or_snap, skip_hlu_0)
except ex.SystemAPINotSupported:
# Attaching snap to host not support before 4.1.
raise
except ex.UnityAttachExceedLimitError:
# The number of luns exceeds system limit
raise
except: # noqa
# other attach error, remove this lun if already attached
self.detach(lun_or_snap)
raise | Attaches lun, snap or member snap of cg snap to host.
Don't pass cg snapshot in as `lun_or_snap`.
:param lun_or_snap: the lun, snap, or a member snap of cg snap
:param skip_hlu_0: whether to skip hlu 0
:return: the hlu number |
def image_props(event):
"""
Get information for a pick event on an ``AxesImage`` artist. Returns a dict
of "i" & "j" index values of the image for the point clicked, and "z": the
(uninterpolated) value of the image at i,j.
Parameters
-----------
event : PickEvent
The pick event to process
Returns
--------
props : dict
A dict with keys: z, i, j
"""
x, y = event.mouseevent.xdata, event.mouseevent.ydata
i, j = _coords2index(event.artist, x, y)
z = event.artist.get_array()[i,j]
if z.size > 1:
# Override default numpy formatting for this specific case. Bad idea?
z = ', '.join('{:0.3g}'.format(item) for item in z)
return dict(z=z, i=i, j=j) | Get information for a pick event on an ``AxesImage`` artist. Returns a dict
of "i" & "j" index values of the image for the point clicked, and "z": the
(uninterpolated) value of the image at i,j.
Parameters
-----------
event : PickEvent
The pick event to process
Returns
--------
props : dict
A dict with keys: z, i, j |
async def find(
self,
*,
types=None,
data=None,
countries=None,
post=False,
strict=False,
dnsbl=None,
limit=0,
**kwargs
):
"""Gather and check proxies from providers or from a passed data.
:ref:`Example of usage <proxybroker-examples-find>`.
:param list types:
Types (protocols) that need to be check on support by proxy.
Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25
And levels of anonymity (HTTP only): Transparent, Anonymous, High
:param data:
(optional) String or list with proxies. Also can be a file-like
object supports `read()` method. Used instead of providers
:param list countries:
(optional) List of ISO country codes where should be located
proxies
:param bool post:
(optional) Flag indicating use POST instead of GET for requests
when checking proxies
:param bool strict:
(optional) Flag indicating that anonymity levels of types
(protocols) supported by a proxy must be equal to the requested
types and levels of anonymity. By default, strict mode is off and
for a successful check is enough to satisfy any one of the
requested types
:param list dnsbl:
(optional) Spam databases for proxy checking.
`Wiki <https://en.wikipedia.org/wiki/DNSBL>`_
:param int limit: (optional) The maximum number of proxies
:raises ValueError:
If :attr:`types` not given.
.. versionchanged:: 0.2.0
Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`.
Changed: :attr:`types` is required.
"""
ip = await self._resolver.get_real_ext_ip()
types = _update_types(types)
if not types:
raise ValueError('`types` is required')
self._checker = Checker(
judges=self._judges,
timeout=self._timeout,
verify_ssl=self._verify_ssl,
max_tries=self._max_tries,
real_ext_ip=ip,
types=types,
post=post,
strict=strict,
dnsbl=dnsbl,
loop=self._loop,
)
self._countries = countries
self._limit = limit
tasks = [asyncio.ensure_future(self._checker.check_judges())]
if data:
task = asyncio.ensure_future(self._load(data, check=True))
else:
task = asyncio.ensure_future(self._grab(types, check=True))
tasks.append(task)
self._all_tasks.extend(tasks) | Gather and check proxies from providers or from a passed data.
:ref:`Example of usage <proxybroker-examples-find>`.
:param list types:
Types (protocols) that need to be check on support by proxy.
Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25
And levels of anonymity (HTTP only): Transparent, Anonymous, High
:param data:
(optional) String or list with proxies. Also can be a file-like
object supports `read()` method. Used instead of providers
:param list countries:
(optional) List of ISO country codes where should be located
proxies
:param bool post:
(optional) Flag indicating use POST instead of GET for requests
when checking proxies
:param bool strict:
(optional) Flag indicating that anonymity levels of types
(protocols) supported by a proxy must be equal to the requested
types and levels of anonymity. By default, strict mode is off and
for a successful check is enough to satisfy any one of the
requested types
:param list dnsbl:
(optional) Spam databases for proxy checking.
`Wiki <https://en.wikipedia.org/wiki/DNSBL>`_
:param int limit: (optional) The maximum number of proxies
:raises ValueError:
If :attr:`types` not given.
.. versionchanged:: 0.2.0
Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`.
Changed: :attr:`types` is required. |
def whoami(ctx, opts):
"""Retrieve your current authentication status."""
click.echo("Retrieving your authentication status from the API ... ", nl=False)
context_msg = "Failed to retrieve your authentication status!"
with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):
with maybe_spinner(opts):
is_auth, username, email, name = get_user_brief()
click.secho("OK", fg="green")
click.echo("You are authenticated as:")
if not is_auth:
click.secho("Nobody (i.e. anonymous user)", fg="yellow")
else:
click.secho(
"%(name)s (slug: %(username)s, email: %(email)s)"
% {
"name": click.style(name, fg="cyan"),
"username": click.style(username, fg="magenta"),
"email": click.style(email, fg="green"),
}
) | Retrieve your current authentication status. |
def apply_parallel(func: Callable,
data: List[Any],
cpu_cores: int = None) -> List[Any]:
"""
Apply function to list of elements.
Automatically determines the chunk size.
"""
if not cpu_cores:
cpu_cores = cpu_count()
try:
chunk_size = ceil(len(data) / cpu_cores)
pool = Pool(cpu_cores)
transformed_data = pool.map(func, chunked(data, chunk_size), chunksize=1)
finally:
pool.close()
pool.join()
return transformed_data | Apply function to list of elements.
Automatically determines the chunk size. |
def check_classes(self, scope=-1):
""" Check if pending identifiers are defined or not. If not,
returns a syntax error. If no scope is given, the current
one is checked.
"""
for entry in self[scope].values():
if entry.class_ is None:
syntax_error(entry.lineno, "Unknown identifier '%s'" % entry.name) | Check if pending identifiers are defined or not. If not,
returns a syntax error. If no scope is given, the current
one is checked. |
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv | This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7 |
def reindex_repo_dev_panel(self, project, repository):
"""
Reindex all of the Jira issues related to this repository, including branches and pull requests.
This automatically happens as part of an upgrade, and calling this manually should only be required
if something unforeseen happens and the index becomes out of sync.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project:
:param repository:
:return:
"""
url = 'rest/jira-dev/1.0/projects/{projectKey}/repos/{repositorySlug}/reindex'.format(projectKey=project,
repositorySlug=repository)
return self.post(url) | Reindex all of the Jira issues related to this repository, including branches and pull requests.
This automatically happens as part of an upgrade, and calling this manually should only be required
if something unforeseen happens and the index becomes out of sync.
The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource.
:param project:
:param repository:
:return: |
def get(self, name, **kwargs):
"""Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found,
:py:func:`get` will first try to retrieve it from "shared" dict. If still not
found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and
insert it to self.
Parameters
----------
name : str
Name of the desired Parameter. It will be prepended with this dictionary's
prefix.
**kwargs : dict
The rest of key-word arguments for the created :py:class:`Parameter`.
Returns
-------
Parameter
The created or retrieved :py:class:`Parameter`.
"""
name = self.prefix + name
param = self._get_impl(name)
if param is None: # pylint: disable=too-many-nested-blocks
param = Parameter(name, **kwargs)
self._params[name] = param
else:
for k, v in kwargs.items():
if hasattr(param, k) and getattr(param, k) is not None:
existing = getattr(param, k)
if k == 'shape' and len(v) == len(existing):
inferred_shape = []
matched = True
for dim1, dim2 in zip(v, existing):
if dim1 != dim2 and dim1 * dim2 != 0:
matched = False
break
elif dim1 == dim2:
inferred_shape.append(dim1)
elif dim1 == 0:
inferred_shape.append(dim2)
else:
inferred_shape.append(dim1)
if matched:
param._shape = tuple(inferred_shape)
continue
elif k == 'dtype' and np.dtype(v) == np.dtype(existing):
continue
assert v is None or v == existing, \
"Cannot retrieve Parameter '%s' because desired attribute " \
"does not match with stored for attribute '%s': " \
"desired '%s' vs stored '%s'."%(
name, k, str(v), str(getattr(param, k)))
else:
setattr(param, k, v)
return param | Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found,
:py:func:`get` will first try to retrieve it from "shared" dict. If still not
found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and
insert it to self.
Parameters
----------
name : str
Name of the desired Parameter. It will be prepended with this dictionary's
prefix.
**kwargs : dict
The rest of key-word arguments for the created :py:class:`Parameter`.
Returns
-------
Parameter
The created or retrieved :py:class:`Parameter`. |
def preprocess(S, coloring_method=None):
"""Preprocess splitting functions.
Parameters
----------
S : csr_matrix
Strength of connection matrix
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
weights: ndarray
Weights from a graph coloring of G
S : csr_matrix
Strength matrix with ones
T : csr_matrix
transpose of S
G : csr_matrix
union of S and T
Notes
-----
Performs the following operations:
- Checks input strength of connection matrix S
- Replaces S.data with ones
- Creates T = S.T in CSR format
- Creates G = S union T in CSR format
- Creates random weights
- Augments weights with graph coloring (if use_color == True)
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
if S.shape[0] != S.shape[1]:
raise ValueError('expected square matrix, shape=%s' % (S.shape,))
N = S.shape[0]
S = csr_matrix((np.ones(S.nnz, dtype='int8'), S.indices, S.indptr),
shape=(N, N))
T = S.T.tocsr() # transpose S for efficient column access
G = S + T # form graph (must be symmetric)
G.data[:] = 1
weights = np.ravel(T.sum(axis=1)) # initial weights
# weights -= T.diagonal() # discount self loops
if coloring_method is None:
weights = weights + sp.rand(len(weights))
else:
coloring = vertex_coloring(G, coloring_method)
num_colors = coloring.max() + 1
weights = weights + (sp.rand(len(weights)) + coloring)/num_colors
return (weights, G, S, T) | Preprocess splitting functions.
Parameters
----------
S : csr_matrix
Strength of connection matrix
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
weights: ndarray
Weights from a graph coloring of G
S : csr_matrix
Strength matrix with ones
T : csr_matrix
transpose of S
G : csr_matrix
union of S and T
Notes
-----
Performs the following operations:
- Checks input strength of connection matrix S
- Replaces S.data with ones
- Creates T = S.T in CSR format
- Creates G = S union T in CSR format
- Creates random weights
- Augments weights with graph coloring (if use_color == True) |
def get_thread(self, thread_id, update_if_cached=True, raise_404=False):
"""Get a thread from 4chan via 4chan API.
Args:
thread_id (int): Thread ID
update_if_cached (bool): Whether the thread should be updated if it's already in our cache
raise_404 (bool): Raise an Exception if thread has 404'd
Returns:
:class:`basc_py4chan.Thread`: Thread object
"""
# see if already cached
cached_thread = self._thread_cache.get(thread_id)
if cached_thread:
if update_if_cached:
cached_thread.update()
return cached_thread
res = self._requests_session.get(
self._url.thread_api_url(
thread_id = thread_id
)
)
# check if thread exists
if raise_404:
res.raise_for_status()
elif not res.ok:
return None
thread = Thread._from_request(self, res, thread_id)
self._thread_cache[thread_id] = thread
return thread | Get a thread from 4chan via 4chan API.
Args:
thread_id (int): Thread ID
update_if_cached (bool): Whether the thread should be updated if it's already in our cache
raise_404 (bool): Raise an Exception if thread has 404'd
Returns:
:class:`basc_py4chan.Thread`: Thread object |
def smooth(polylines):
"""
smooth every polyline using spline interpolation
"""
for c in polylines:
if len(c) < 9:
# smoothing wouldn't make sense here
continue
x = c[:, 0]
y = c[:, 1]
t = np.arange(x.shape[0], dtype=float)
t /= t[-1]
x = UnivariateSpline(t, x)(t)
y = UnivariateSpline(t, y)(t)
c[:, 0] = x
c[:, 1] = y | smooth every polyline using spline interpolation |
def map_concepts_to_indicators(
self, n: int = 1, min_temporal_res: Optional[str] = None
):
""" Map each concept node in the AnalysisGraph instance to one or more
tangible quantities, known as 'indicators'.
Args:
n: Number of matches to keep
min_temporal_res: Minimum temporal resolution that the indicators
must have data for.
"""
for node in self.nodes(data=True):
query_parts = [
"select Indicator from concept_to_indicator_mapping",
f"where `Concept` like '{node[0]}'",
]
# TODO May need to delve into SQL/database stuff a bit more deeply
# for this. Foreign keys perhaps?
query = " ".join(query_parts)
results = engine.execute(query)
if min_temporal_res is not None:
if min_temporal_res not in ["month"]:
raise ValueError("min_temporal_res must be 'month'")
vars_with_required_temporal_resolution = [
r[0]
for r in engine.execute(
"select distinct `Variable` from indicator where "
f"`{min_temporal_res.capitalize()}` is not null"
)
]
results = [
r
for r in results
if r[0] in vars_with_required_temporal_resolution
]
node[1]["indicators"] = {
x: Indicator(x, "MITRE12")
for x in [r[0] for r in take(n, results)]
} | Map each concept node in the AnalysisGraph instance to one or more
tangible quantities, known as 'indicators'.
Args:
n: Number of matches to keep
min_temporal_res: Minimum temporal resolution that the indicators
must have data for. |
def send_stream_tail(self):
"""
Send stream tail via the transport.
"""
with self.lock:
if not self._socket or self._hup:
logger.debug(u"Cannot send stream closing tag: already closed")
return
data = self._serializer.emit_tail()
try:
self._write(data.encode("utf-8"))
except (IOError, SystemError, socket.error), err:
logger.debug(u"Sending stream closing tag failed: {0}"
.format(err))
self._serializer = None
self._hup = True
if self._tls_state is None:
try:
self._socket.shutdown(socket.SHUT_WR)
except socket.error:
pass
self._set_state("closing")
self._write_queue.clear()
self._write_queue_cond.notify() | Send stream tail via the transport. |
def pack(self, value=None):
"""Pack the message into a binary data.
One of the basic operations on a Message is the pack operation. During
the packing process, we convert all message attributes to binary
format.
Since that this is usually used before sending the message to a switch,
here we also call :meth:`update_header_length`.
.. seealso:: This method call its parent's :meth:`GenericStruct.pack`
after :meth:`update_header_length`.
Returns:
bytes: A binary data thats represents the Message.
Raises:
Exception: If there are validation errors.
"""
if value is None:
self.update_header_length()
return super().pack()
elif isinstance(value, type(self)):
return value.pack()
else:
msg = "{} is not an instance of {}".format(value,
type(self).__name__)
raise PackException(msg) | Pack the message into a binary data.
One of the basic operations on a Message is the pack operation. During
the packing process, we convert all message attributes to binary
format.
Since that this is usually used before sending the message to a switch,
here we also call :meth:`update_header_length`.
.. seealso:: This method call its parent's :meth:`GenericStruct.pack`
after :meth:`update_header_length`.
Returns:
bytes: A binary data thats represents the Message.
Raises:
Exception: If there are validation errors. |
def register_metrics(self, metrics_collector, interval):
"""Registers its metrics to a given metrics collector with a given interval"""
for field, metrics in self.metrics.items():
metrics_collector.register_metric(field, metrics, interval) | Registers its metrics to a given metrics collector with a given interval |
def transform(self, Y):
r"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_)
Kernel matrix. Values are normalized to lie within [0, 1].
"""
check_is_fitted(self, 'X_fit_')
n_samples_x, n_features = self.X_fit_.shape
Y = numpy.asarray(Y)
if Y.shape[1] != n_features:
raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1]))
n_samples_y = Y.shape[0]
mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float)
continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64),
self.X_fit_[:, self._numeric_columns].astype(numpy.float64),
self._numeric_ranges, mat)
if len(self._nominal_columns) > 0:
_nominal_kernel(Y[:, self._nominal_columns],
self.X_fit_[:, self._nominal_columns],
mat)
mat /= n_features
return mat | r"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_)
Kernel matrix. Values are normalized to lie within [0, 1]. |
def run_recipe_timed(task, recipe, rinput):
"""Run the recipe and count the time it takes."""
_logger.info('running recipe')
now1 = datetime.datetime.now()
task.state = 1
task.time_start = now1
#
result = recipe(rinput)
_logger.info('result: %r', result)
task.result = result
#
now2 = datetime.datetime.now()
task.state = 2
task.time_end = now2
return task | Run the recipe and count the time it takes. |
def __fade_in(self):
"""
Starts the Widget fade in.
"""
self.__timer.stop()
self.__vector = self.__fade_speed
self.__timer.start() | Starts the Widget fade in. |
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence | here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence: |
def decorate_set_on_listener(prototype):
""" Private decorator for use in the editor.
Allows the Editor to create listener methods.
Args:
params (str): The list of parameters for the listener
method (es. "(self, new_value)")
"""
# noinspection PyDictCreation,PyProtectedMember
def add_annotation(method):
method._event_info = {}
method._event_info['name'] = method.__name__
method._event_info['prototype'] = prototype
return method
return add_annotation | Private decorator for use in the editor.
Allows the Editor to create listener methods.
Args:
params (str): The list of parameters for the listener
method (es. "(self, new_value)") |
def list_members(self, list_id=None, slug=None, owner_screen_name=None, owner_id=None):
"""
Returns the members of a list.
List id or (slug and (owner_screen_name or owner_id)) are required
"""
assert list_id or (slug and (owner_screen_name or owner_id))
url = 'https://api.twitter.com/1.1/lists/members.json'
params = {'cursor': -1}
if list_id:
params['list_id'] = list_id
else:
params['slug'] = slug
if owner_screen_name:
params['owner_screen_name'] = owner_screen_name
else:
params['owner_id'] = owner_id
while params['cursor'] != 0:
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.error("no matching list")
raise e
users = resp.json()
for user in users['users']:
yield user
params['cursor'] = users['next_cursor'] | Returns the members of a list.
List id or (slug and (owner_screen_name or owner_id)) are required |
Subsets and Splits