text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def genPrime():
"""
Generate 2 large primes `p_prime` and `q_prime` and use them
to generate another 2 primes `p` and `q` of 1024 bits
"""
prime = cmod.randomPrime(LARGE_PRIME)
i = 0
while not cmod.isPrime(2 * prime + 1):
prime = cmod.randomPrime(LARGE_PRIME)
i += 1
return prime | 0.003058 |
def calculate_width_widget_int(width, border = False, margin = None, margin_left = None, margin_right = None):
"""
Calculate actual widget content width based on given margins and paddings.
"""
if margin_left is None:
margin_left = margin
if margin_right is None:
margin_right = margin
if margin_left is not None:
width -= int(margin_left)
if margin_right is not None:
width -= int(margin_right)
if border:
width -= 2
return width if width > 0 else None | 0.020513 |
def run_cell(self, cell, store_history=True):
"""Run a complete IPython cell.
Parameters
----------
cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
"""
if (not cell) or cell.isspace():
return
if cell.strip() == 'exit':
# explicitly handle 'exit' command
return self.ask_exit()
self._executing = True
# flush stale replies, which could have been ignored, due to missed heartbeats
while self.km.shell_channel.msg_ready():
self.km.shell_channel.get_msg()
# shell_channel.execute takes 'hidden', which is the inverse of store_hist
msg_id = self.km.shell_channel.execute(cell, not store_history)
while not self.km.shell_channel.msg_ready() and self.km.is_alive:
try:
self.handle_stdin_request(timeout=0.05)
except Empty:
# display intermediate print statements, etc.
self.handle_iopub()
pass
if self.km.shell_channel.msg_ready():
self.handle_execute_reply(msg_id)
self._executing = False | 0.003531 |
def add_done_callback(self, fn):
"""Adds a callback to be completed once future is done
:parm fn: A callable that takes no arguments. Note that is different
than concurrent.futures.Future.add_done_callback that requires
a single argument for the future.
"""
# The done callback for concurrent.futures.Future will always pass a
# the future in as the only argument. So we need to create the
# proper signature wrapper that will invoke the callback provided.
def done_callback(future_passed_to_callback):
return fn()
self._future.add_done_callback(done_callback) | 0.003026 |
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key) | 0.001718 |
def probably_geojson(input):
'''A quick check to see if this input looks like GeoJSON. If not a dict
JSON-like object, attempt to parse input as JSON. If the resulting object
has a type property that looks like GeoJSON, return that object or None'''
valid = False
if not isinstance(input, dict):
try:
input = json.loads(input)
except ValueError:
return None
typename = input.get('type', None)
supported_types = set([
'Point', 'LineString', 'Polygon', 'MultiPoint', 'MultiLineString',
'MultiPolygon', 'GeometryCollection', 'Feature', 'FeatureCollection'
])
valid = typename in supported_types
return input if valid else None | 0.001391 |
def save_token(self, token, request, *args, **kwargs):
"""Persist the token with a token type specific method.
Currently, only save_bearer_token is supported.
:param token: A (Bearer) token dict.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
return self.save_bearer_token(token, request, *args, **kwargs) | 0.005089 |
def migrate_thrift_obj(self, obj):
"""Helper function that can be called when serializing/deserializing thrift objects whose definitions
have changed, we need to make sure we initialize the new attributes to their default value"""
if not hasattr(obj, "thrift_spec"):
return
obj_key_set = set(obj.__dict__.keys())
thrift_field_map = {t[2]: t[4] for t in obj.thrift_spec if t}
obj.__dict__.update({f: copy.copy(thrift_field_map[f]) for f in set(thrift_field_map.keys()) - obj_key_set})
for value in obj.__dict__.values():
self.migrate_thrift_obj(value) | 0.007924 |
def stream_template(template_name, **context):
'''
Some templates can be huge, this function returns an streaming response,
sending the content in chunks and preventing from timeout.
:param template_name: template
:param **context: parameters for templates.
:yields: HTML strings
'''
app.update_template_context(context)
template = app.jinja_env.get_template(template_name)
stream = template.generate(context)
return Response(stream_with_context(stream)) | 0.002004 |
def is_img_id_exists(img_id):
"""
Checks if img_id has real file on filesystem.
"""
main_rel_path = get_relative_path_from_img_id(img_id)
main_path = media_path(main_rel_path)
return os.path.isfile(main_path) | 0.00431 |
def put(self, key):
"""Insert the key
:return: Key name
"""
self.client.put_object(
Body=json.dumps(key),
Bucket=self.db_path,
Key=key['name'])
return key['name'] | 0.008403 |
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters,
self.lags), 'k-')
plt.show() | 0.005 |
def _convert_list2str(self, fields):
"""
:param fields: ('bdate', 'domain')
:return: 'bdate,domain'
"""
if isinstance(fields, tuple) or isinstance(fields, list):
return ','.join(fields)
return fields | 0.007722 |
def refresh_image(self):
"""Get the most recent camera image."""
url = str.replace(CONST.TIMELINE_IMAGES_ID_URL,
'$DEVID$', self.device_id)
response = self._abode.send_request("get", url)
_LOGGER.debug("Get image response: %s", response.text)
return self.update_image_location(json.loads(response.text)) | 0.005391 |
def _parse_property(cls, name, value):
"""Parse a property received from the API into an internal object.
Args:
name (str): Name of the property on the object.
value (mixed): The unparsed API value.
Raises:
HelpScoutValidationException: In the event that the property name
is not found.
Returns:
mixed: A value compatible with the internal models.
"""
prop = cls._props.get(name)
return_value = value
if not prop:
logger.debug(
'"%s" with value "%s" is not a valid property for "%s".' % (
name, value, cls,
),
)
return_value = None
elif isinstance(prop, properties.Instance):
return_value = prop.instance_class.from_api(**value)
elif isinstance(prop, properties.List):
return_value = cls._parse_property_list(prop, value)
elif isinstance(prop, properties.Color):
return_value = cls._parse_property_color(value)
return return_value | 0.001787 |
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags) | 0.000949 |
def get_address(self, address):
"""Retrieve an address from the wallet.
:param str address: address in the wallet to look up
:return: an instance of :class:`Address` class
"""
params = self.build_basic_request()
params['address'] = address
response = util.call_api("merchant/{0}/address_balance".format(self.identifier), params,
base_url=self.service_url)
json_response = json.loads(response)
self.parse_error(json_response)
return Address(json_response['balance'],
json_response['address'],
None,
json_response['total_received']) | 0.00684 |
def resolve_resource_id_refs(self, input_dict, supported_resource_id_refs):
"""
Resolve resource references within a GetAtt dict.
Example:
{ "Fn::GetAtt": ["LogicalId", "Arn"] } => {"Fn::GetAtt": ["ResolvedLogicalId", "Arn"]}
Theoretically, only the first element of the array can contain reference to SAM resources. The second element
is name of an attribute (like Arn) of the resource.
However tools like AWS CLI apply the assumption that first element of the array is a LogicalId and cannot
contain a 'dot'. So they break at the first dot to convert YAML tag to JSON map like this:
`!GetAtt LogicalId.Arn` => {"Fn::GetAtt": [ "LogicalId", "Arn" ] }
Therefore to resolve the reference, we join the array into a string, break it back up to check if it contains
a known reference, and resolve it if we can.
:param input_dict: Dictionary to be resolved
:param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones.
:return: Resolved dictionary
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
value = input_dict[key]
# Value must be an array with *at least* two elements. If not, this is invalid GetAtt syntax. We just pass along
# the input to CFN for it to do the "official" validation.
if not isinstance(value, list) or len(value) < 2:
return input_dict
value_str = self._resource_ref_separator.join(value)
splits = value_str.split(self._resource_ref_separator)
logical_id = splits[0]
remaining = splits[1:] # if any
resolved_value = supported_resource_id_refs.get(logical_id)
return self._get_resolved_dictionary(input_dict, key, resolved_value, remaining) | 0.005299 |
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name] | 0.004088 |
def rooms_favorite(self, room_id=None, room_name=None, favorite=True):
"""Favorite or unfavorite room."""
if room_id is not None:
return self.__call_api_post('rooms.favorite', roomId=room_id, favorite=favorite)
elif room_name is not None:
return self.__call_api_post('rooms.favorite', roomName=room_name, favorite=favorite)
else:
raise RocketMissingParamException('roomId or roomName required') | 0.008658 |
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj) | 0.004315 |
def line_iterator(readable_file, size=None):
# type: (IO[bytes], Optional[int]) -> Iterator[bytes]
"""Iterate over the lines of a file.
Implementation reads each char individually, which is not very
efficient.
Yields:
str: a single line in the file.
"""
read = readable_file.read
line = []
byte = b"1"
if size is None or size < 0:
while byte:
byte = read(1)
line.append(byte)
if byte in b"\n":
yield b"".join(line)
del line[:]
else:
while byte and size:
byte = read(1)
size -= len(byte)
line.append(byte)
if byte in b"\n" or not size:
yield b"".join(line)
del line[:] | 0.001271 |
def return_action(self, text, loc, ret):
"""Code executed after recognising a return statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("RETURN:",ret)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
if not self.symtab.same_types(self.shared.function_index, ret.exp[0]):
raise SemanticException("Incompatible type in return")
#set register for function's return value to expression value
reg = self.codegen.take_function_register()
self.codegen.move(ret.exp[0], reg)
#after return statement, register for function's return value is available again
self.codegen.free_register(reg)
#jump to function's exit
self.codegen.unconditional_jump(self.codegen.label(self.shared.function_name+"_exit", True)) | 0.011547 |
def decode_cpu_id(self, cpuid):
"""Decode the CPU id into a string"""
ret = ()
for i in cpuid.split(':'):
ret += (eval('0x' + i),)
return ret | 0.010753 |
def on_open_output_tool_clicked(self):
"""Autoconnect slot activated when open output tool button is clicked.
"""
output_path = self.output_path.text()
if not output_path:
output_path = os.path.expanduser('~')
# noinspection PyCallByClass,PyTypeChecker
filename, __ = QFileDialog.getSaveFileName(
self, tr('Output file'), output_path, tr('Raster file (*.tif)'))
if filename:
self.output_path.setText(filename) | 0.003992 |
def declalltypes(self):
"""generator on all declaration of type"""
for f in self.body:
if (hasattr(f, '_ctype')
and f._ctype._storage == Storages.TYPEDEF):
yield f | 0.008811 |
def create_request(query):
"""
Creates a GET request to Yarr! server
:param query: Free-text search query
:returns: Requests object
"""
yarr_url = app.config.get('YARR_URL', False)
if not yarr_url:
raise('No URL to Yarr! server specified in config.')
api_token = app.config.get('YARR_API_TOKEN', False)
headers = {'X-API-KEY': api_token} if api_token else {}
payload = {'q': query}
url = '%s/search' % yarr_url
return requests.get(url, params=payload, headers=headers) | 0.001894 |
def quit(self):
"""Restore previous stdout/stderr and destroy the window."""
sys.stdout = self._oldstdout
sys.stderr = self._oldstderr
self.destroy() | 0.01105 |
def _evaluate_hodograph(s, nodes):
r"""Evaluate the Hodograph curve at a point :math:`s`.
The Hodograph (first derivative) of a B |eacute| zier curve
degree :math:`d = n - 1` and is given by
.. math::
B'(s) = n \sum_{j = 0}^{d} \binom{d}{j} s^j
(1 - s)^{d - j} \cdot \Delta v_j
where each forward difference is given by
:math:`\Delta v_j = v_{j + 1} - v_j`.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes of a curve.
s (float): A parameter along the curve at which the Hodograph
is to be evaluated.
Returns:
numpy.ndarray: The point on the Hodograph curve (as a two
dimensional NumPy array with a single row).
"""
_, num_nodes = np.shape(nodes)
first_deriv = nodes[:, 1:] - nodes[:, :-1]
return (num_nodes - 1) * evaluate_multi(
first_deriv, np.asfortranarray([s])
) | 0.000992 |
def googlenet_resize(im, targ, min_area_frac, min_aspect_ratio, max_aspect_ratio, flip_hw_p, interpolation=cv2.INTER_AREA):
""" Randomly crop an image with an aspect ratio and returns a squared resized image of size targ
References:
1. https://arxiv.org/pdf/1409.4842.pdf
2. https://arxiv.org/pdf/1802.07888.pdf
"""
h,w,*_ = im.shape
area = h*w
for _ in range(10):
targetArea = random.uniform(min_area_frac, 1.0) * area
aspectR = random.uniform(min_aspect_ratio, max_aspect_ratio)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if flip_hw_p:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else random.randint(0, w - ww)
y1 = 0 if h == hh else random.randint(0, h - hh)
out = im[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (targ, targ), interpolation=interpolation)
return out
out = scale_min(im, targ, interpolation=interpolation)
out = center_crop(out)
return out | 0.0055 |
def COOKIES(self):
""" Cookie information parsed into a dictionary.
Secure cookies are NOT decoded automatically. See
Request.get_cookie() for details.
"""
if self._COOKIES is None:
raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
self._COOKIES = {}
for cookie in raw_dict.itervalues():
self._COOKIES[cookie.key] = cookie.value
return self._COOKIES | 0.00641 |
def gc(self):
'''Find the frequency of G and C in the current sequence.'''
gc = len([base for base in self.seq if base == 'C' or base == 'G'])
return float(gc) / len(self) | 0.010256 |
def cov_from_scales(self, scales):
"""Return a covariance matrix built from a dictionary of scales.
`scales` is a dictionary keyed by stochastic instances, and the
values refer are the variance of the jump distribution for each
stochastic. If a stochastic is a sequence, the variance must
have the same length.
"""
# Get array of scales
ord_sc = []
for stochastic in self.stochastics:
ord_sc.append(np.ravel(scales[stochastic]))
ord_sc = np.concatenate(ord_sc)
if np.squeeze(ord_sc).shape[0] != self.dim:
raise ValueError("Improper initial scales, dimension don't match",
(np.squeeze(ord_sc), self.dim))
# Scale identity matrix
return np.eye(self.dim) * ord_sc | 0.002436 |
def nested_option(default=False):
""" Attaches the option ``nested`` with its *default* value to the
keyword arguments when the option does not exist. All positional
arguments and keyword arguments are forwarded unchanged.
"""
def decorator(method):
@wraps(method)
def wrapper(*args, **kwargs):
option = Option.nested.value
kwargs[option] = kwargs.get(option, bool(default))
return method(*args, **kwargs)
return wrapper
return decorator | 0.001908 |
def getImage(path, dockerfile, tag):
'''Check if an image with a given tag exists. If not, build an image from
using a given dockerfile in a given path, tagging it with a given tag.
No extra side effects. Handles and reraises BuildError, TypeError, and
APIError exceptions.
'''
image = getImageByTag(tag)
if not image:
# Build an Image using the dockerfile in the path
try:
image = client.images.build(
path=path,
dockerfile=dockerfile,
tag=tag
)
except BuildError as exc:
eprint("Failed to build docker image")
raise exc
except TypeError as exc:
eprint("You must give a path to the build environemnt.")
raise exc
except APIError as exc:
eprint("Unhandled error while building image", tag)
raise exc
return image | 0.001082 |
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits | 0.002646 |
def batchcancel_order(self, order_ids: list):
"""
批量撤销订单
:param order_id:
:return:
"""
assert isinstance(order_ids, list)
params = {'order-ids': order_ids}
path = f'/v1/order/orders/batchcancel'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | 0.004525 |
def format_duration(secs):
"""
Format a duration in seconds as minutes and seconds.
"""
secs = int(secs)
if abs(secs) > 60:
mins = abs(secs) / 60
secs = abs(secs) - (mins * 60)
return '%s%im %02is' % ('-' if secs < 0 else '', mins, secs)
return '%is' % secs | 0.003247 |
def p_factor(self, tok):
"""factor : IPV4
| IPV6
| DATETIME
| TIMEDELTA
| INTEGER
| FLOAT
| VARIABLE
| CONSTANT
| FUNCTION RPAREN
| FUNCTION expression RPAREN
| LBRACK list RBRACK
| LPAREN expression RPAREN"""
if len(tok) == 2:
tok[0] = self._create_factor_rule(tok[1])
elif len(tok) == 3:
tok[0] = self._create_function_rule(tok[1])
elif tok[1][0] == 'FUNCTION':
tok[0] = self._create_function_rule(tok[1], tok[2])
else:
tok[0] = tok[2] | 0.002766 |
def spare_disk(self, disk_xml=None):
""" Number of spare disk per type.
For example: storage.ontap.filer201.disk.SATA
"""
spare_disk = {}
disk_types = set()
for filer_disk in disk_xml:
disk_types.add(filer_disk.find('effective-disk-type').text)
if not filer_disk.find('raid-state').text == 'spare':
continue
disk_type = filer_disk.find('effective-disk-type').text
if disk_type in spare_disk:
spare_disk[disk_type] += 1
else:
spare_disk[disk_type] = 1
for disk_type in disk_types:
if disk_type in spare_disk:
self.push('spare_' + disk_type, 'disk', spare_disk[disk_type])
else:
self.push('spare_' + disk_type, 'disk', 0) | 0.002356 |
def verify(self):
"""
Running all conditions in the instance variable valid_list
Return:
True: pass all conditions
False: fail at more than one condition
"""
if self not in self._queue:
return False
valid = True
for check in self.valid_list:
valid = valid & check()
return valid | 0.005128 |
def add_to_manifest(self, manifest):
"""
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
"""
# Add this service to the list of services
manifest.add_service(self.service.name)
# Add environment variables
url = predix.config.get_env_key(self.use_class, 'url')
manifest.add_env_var(url, self.service.settings.data['url'])
akid = predix.config.get_env_key(self.use_class, 'access_key_id')
manifest.add_env_var(akid, self.service.settings.data['access_key_id'])
bucket = predix.config.get_env_key(self.use_class, 'bucket_name')
manifest.add_env_var(bucket, self.service.settings.data['bucket_name'])
host = predix.config.get_env_key(self.use_class, 'host')
manifest.add_env_var(host, self.service.settings.data['host'])
secret_access_key = predix.config.get_env_key(self.use_class, 'secret_access_key')
manifest.add_env_var(secret_access_key, self.service.settings.data['secret_access_key'])
manifest.write_manifest() | 0.003132 |
def wait_script(name,
source=None,
template=None,
onlyif=None,
unless=None,
cwd=None,
runas=None,
shell=None,
env=None,
stateful=False,
umask=None,
use_vt=False,
output_loglevel='debug',
hide_output=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs):
'''
Download a script from a remote source and execute it only if a watch
statement calls it.
source
The source script being downloaded to the minion, this source script is
hosted on the salt master server. If the file is located on the master
in the directory named spam, and is called eggs, the source string is
salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
cwd
The current working directory to execute the command in, defaults to
/root
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.wait_script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.wait_script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will be allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
success_stderr: This parameter will be allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
'''
# Ignoring our arguments is intentional.
return {'name': name,
'changes': {},
'result': True,
'comment': ''} | 0.000788 |
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoicesField, self).validate(value)
if value and not self.valid_value(value):
self._on_invalid_value(value) | 0.008163 |
def parse(cls, json):
# type: (dict) -> Any
"""Parse a json dict and return the correct subclass of :class:`ValidatorEffect`.
It uses the 'effect' key to determine which :class:`ValidatorEffect` to instantiate.
Please refer to :class:`enums.ValidatorEffectTypes` for the supported effects.
:param json: dictionary containing the specific keys to parse into a :class:`ValidatorEffect`
:type json: dict
:returns: the instantiated subclass of :class:`ValidatorEffect`
:rtype: :class:`ValidatorEffect` or subclass
"""
effect = json.get('effect')
if effect:
from pykechain.models.validators import effects
effect_implementation_classname = effect[0].upper() + effect[1:]
if hasattr(effects, effect_implementation_classname):
return getattr(effects, effect_implementation_classname)(json=json)
else:
raise Exception('unknown effect in json')
raise Exception("Effect unknown, incorrect json: '{}'".format(json)) | 0.007394 |
def expected_param_keys(self):
"""returns a list of params that this ConfigTemplate expects to receive"""
expected_keys = []
r = re.compile('%\(([^\)]+)\)s')
for block in self.keys():
for key in self[block].keys():
s = self[block][key]
if type(s)!=str: continue
md = re.search(r, s)
while md is not None:
k = md.group(1)
if k not in expected_keys:
expected_keys.append(k)
s = s[md.span()[1]:]
md = re.search(r, s)
return expected_keys | 0.011958 |
def _add_response(self, response, weight=1):
"""
Add a new trigger
:param response: The Response object
:type response: Response or Condition
:param weight: The weight of the response
:type weight: int
"""
# If no response with this priority level has been defined yet, create a new list
if response.priority not in self._responses:
self._responses[response.priority] = [(response, weight)]
return
# Otherwise, add this trigger to an existing priority list
self._responses[response.priority].append((response, weight)) | 0.004732 |
def write_meta_info(self, byte1, byte2, data):
"Worker method for writing meta info"
write_varlen(self.data, 0) # tick
write_byte(self.data, byte1)
write_byte(self.data, byte2)
write_varlen(self.data, len(data))
write_chars(self.data, data) | 0.00692 |
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
'''
Compile the master side low state data, and build the hidden state file
'''
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate() | 0.003731 |
def CMOVNS(cpu, dest, src):
"""
Conditional move - Not sign (non-negative).
Tests the status flags in the EFLAGS register and moves the source operand
(second operand) to the destination operand (first operand) if the given
test condition is true.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.SF == False, src.read(), dest.read())) | 0.011976 |
def _run_cnvkit_shared(inputs, backgrounds):
"""Shared functionality to run CNVkit, parallelizing over multiple BAM files.
Handles new style cases where we have pre-normalized inputs and
old cases where we run CNVkit individually.
"""
if tz.get_in(["depth", "bins", "normalized"], inputs[0]):
ckouts = []
for data in inputs:
cnr_file = tz.get_in(["depth", "bins", "normalized"], data)
cns_file = os.path.join(_sv_workdir(data), "%s.cns" % dd.get_sample_name(data))
cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data,
inputs + backgrounds, cns_file)
ckouts.append({"cnr": cnr_file, "cns": cns_file,
"background": tz.get_in(["depth", "bins", "background"], data)})
return ckouts
else:
return _run_cnvkit_shared_orig(inputs, backgrounds) | 0.005388 |
def get_feature_sequence(self, feature_id, organism=None, sequence=None):
"""
[CURRENTLY BROKEN] Get the sequence of a feature
:type feature_id: str
:param feature_id: Feature UUID
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
"""
# Choices: peptide, cds, cdna, genomic
# { "track": "Miro.v2", "features": [ { "uniquename": "714dcda6-2358-467d-855e-f495a82aa154" } ], "operation": "get_sequence", "type": "peptide" }:
# { "track": "Miro.v2", "features": [ { "uniquename": "714dcda6-2358-467d-855e-f495a82aa154" } ], "operation": "get_sequence", "flank": 500, "type": "genomic" }:
# This API is not behaving as expected. Wrong documentation?
data = {
'type': 'peptide',
'features': [
{'uniquename': feature_id}
]
}
data = self._update_data(data, organism, sequence)
return self.post('getSequence', data) | 0.003442 |
def interp_value(self, lat, lon, indexed=False):
""" Lookup a pixel value in the raster data, performing linear interpolation
if necessary. Indexed ==> nearest neighbor (*fast*). """
(px, py) = self.grid_coordinates.projection_to_raster_coords(lat, lon)
if indexed:
return self.raster_data[round(py), round(px)]
else:
# from scipy.interpolate import interp2d
# f_interp = interp2d(self.grid_coordinates.x_axis, self.grid_coordinates.y_axis, self.raster_data, bounds_error=True)
# return f_interp(lon, lat)[0]
from scipy.ndimage import map_coordinates
ret = map_coordinates(self.raster_data, [[py], [px]], order=1) # linear interp
return ret[0] | 0.010417 |
def filterData(self, key):
"""
Returns the filter data for the given key.
:param key | <str>
:return <str>
"""
if key == 'text':
default = nativestring(self.text())
else:
default = ''
return self._filterData.get(key, default) | 0.011628 |
async def read_data_frame(self, max_size: int) -> Optional[Frame]:
"""
Read a single data frame from the connection.
Process control frames received before the next data frame.
Return ``None`` if a close frame is encountered before any data frame.
"""
# 6.2. Receiving Data
while True:
frame = await self.read_frame(max_size)
# 5.5. Control Frames
if frame.opcode == OP_CLOSE:
# 7.1.5. The WebSocket Connection Close Code
# 7.1.6. The WebSocket Connection Close Reason
self.close_code, self.close_reason = parse_close(frame.data)
# Echo the original data instead of re-serializing it with
# serialize_close() because that fails when the close frame is
# empty and parse_close() synthetizes a 1005 close code.
await self.write_close_frame(frame.data)
return None
elif frame.opcode == OP_PING:
# Answer pings.
ping_hex = frame.data.hex() or "[empty]"
logger.debug(
"%s - received ping, sending pong: %s", self.side, ping_hex
)
await self.pong(frame.data)
elif frame.opcode == OP_PONG:
# Acknowledge pings on solicited pongs.
if frame.data in self.pings:
# Acknowledge all pings up to the one matching this pong.
ping_id = None
ping_ids = []
while ping_id != frame.data:
ping_id, pong_waiter = self.pings.popitem(last=False)
ping_ids.append(ping_id)
pong_waiter.set_result(None)
pong_hex = binascii.hexlify(frame.data).decode() or "[empty]"
logger.debug(
"%s - received solicited pong: %s", self.side, pong_hex
)
ping_ids = ping_ids[:-1]
if ping_ids:
pings_hex = ", ".join(
binascii.hexlify(ping_id).decode() or "[empty]"
for ping_id in ping_ids
)
plural = "s" if len(ping_ids) > 1 else ""
logger.debug(
"%s - acknowledged previous ping%s: %s",
self.side,
plural,
pings_hex,
)
else:
pong_hex = binascii.hexlify(frame.data).decode() or "[empty]"
logger.debug(
"%s - received unsolicited pong: %s", self.side, pong_hex
)
# 5.6. Data Frames
else:
return frame | 0.0017 |
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF):
"""
Blobs are stored in sequential nodes
with increasing index values.
most blobs, like scripts start at index
0, long names start at a specified
offset.
"""
startkey = self.makekey(nodeid, tag, start)
endkey = self.makekey(nodeid, tag, end)
cur = self.btree.find('ge', startkey)
data = b''
while cur.getkey() <= endkey:
data += cur.getval()
cur.next()
return data | 0.00354 |
def fill_dcnm_net_info(self, tenant_id, direc, vlan_id=0,
segmentation_id=0):
"""Fill DCNM network parameters.
Function that fills the network parameters for a tenant required by
DCNM.
"""
serv_obj = self.get_service_obj(tenant_id)
fw_dict = serv_obj.get_fw_dict()
fw_id = fw_dict.get('fw_id')
net_dict = {'status': 'ACTIVE', 'admin_state_up': True,
'tenant_id': tenant_id, 'provider:network_type': 'local',
'vlan_id': vlan_id, 'segmentation_id': segmentation_id}
if vlan_id == 0:
net_dict.update({'mob_domain': False, 'mob_domain_name': None})
else:
net_dict.update({'mob_domain': True})
# TODO(padkrish) NWK ID are not filled.
if direc == 'in':
name = fw_id[0:4] + fw_const.IN_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_dict.update({'name': name, 'part_name': None,
'config_profile': self.serv_host_prof,
'fwd_mode': self.serv_host_mode})
else:
name = fw_id[0:4] + fw_const.OUT_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_dict.update({'name': name,
'part_name': fw_const.SERV_PART_NAME,
'config_profile': self.serv_ext_prof,
'fwd_mode': self.serv_ext_mode})
return net_dict | 0.001993 |
def on_start(self, host, port, channel, nickname, password):
"""
A WebSocket session has started - create a greenlet to host
the IRC client, and start it.
"""
self.client = WebSocketIRCClient(host, port, channel, nickname,
password, self)
self.spawn(self.client.start) | 0.005602 |
def is_rectilinear(self):
"""True if the transform is rectilinear, i.e., whether a shape would
remain axis-aligned, within rounding limits, after applying the
transform.
"""
a, b, c, d, e, f, g, h, i = self
return (abs(a) < EPSILON and abs(e) < EPSILON) or (
abs(d) < EPSILON and abs(b) < EPSILON
) | 0.005464 |
async def async_get_sensor_log(self, index: int) -> Optional[SensorLogResponse]:
"""
Get an entry from the Special sensor log.
:param index: Index for the sensor log entry to be obtained.
:return: Response containing the sensor log entry, or None if not found.
"""
response = await self._protocol.async_execute(
GetSensorLogCommand(index))
if isinstance(response, SensorLogResponse):
return response
return None | 0.007984 |
def make_input_stream():
"""Creates a :py:class:`Queue` object and a co-routine yielding from that
queue. The queue should be populated with 2-tuples of the form `(command,
message)`, where `command` is one of [`msg`, `end`].
When the `end` command is recieved, the co-routine returns, ending the
stream.
When a `msg` command is received, the accompanying message is encoded and
yielded as a ``bytes`` object.
:return: tuple of (queue, stream)"""
input_queue = Queue()
def input_stream():
while True:
cmd, msg = input_queue.get()
if cmd == 'end':
input_queue.task_done()
return
elif cmd == 'msg':
yield msg.encode()
input_queue.task_done()
return input_queue, input_stream | 0.001206 |
def parse_content(self, text):
"""parse section to formal format
raw_content: {title: section(with title)}. For `help` access.
formal_content: {title: section} but the section has been dedented
without title. For parse instance"""
raw_content = self.raw_content
raw_content.clear()
formal_collect = {}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
split = self.visible_empty_line_re.split(text)
except ValueError: # python >= 3.5
split = [text]
option_split_re = self.option_split_re
name = re.compile(re.escape(self.option_name), re.IGNORECASE)
for text in filter(lambda x: x and x.strip(), split):
# logger.warning('get options group:\n%r', text)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
split_options = option_split_re.split(text)
except ValueError: # python >= 3.5
continue
split_options.pop(0)
for title, section in zip(split_options[::2], split_options[1::2]):
prefix, end = name.split(title)
prefix = prefix.strip()
section = section.rstrip()
if end.endswith('\n'):
formal = section
else:
formal = ' ' * len(title) + section
formal_collect.setdefault(prefix, []).append(formal)
# logger.error((title, section))
if prefix in raw_content:
# TODO: better handling way?
if self.namedoptions:
log = logger.warning
else:
log = logger.debug
log('duplicated options section %s', prefix)
raw_content[prefix] += '\n%s%s' % (title, section)
else:
raw_content[prefix] = title + section
if formal_collect:
for each_title, values in formal_collect.items():
value = '\n'.join(map(textwrap.dedent, values))
formal_collect[each_title] = value
self.formal_content = formal_collect | 0.000832 |
def getBestDiscount(sender,**kwargs):
'''
When a customer registers for events, discounts may need to be
automatically applied. A given shopping cart may, in fact,
be eligible for multiple different types of discounts (e.g. hours-based
discounts for increasing numbers of class hours), but typically, only one
discount should be applied. Therefore, this handler loops through all potential
discounts, finds the ones that are applicable to the passed registration or set
of items, and returns the code and discounted price of the best available discount,
in a tuple of the form (code, discounted_price).
'''
if not getConstant('general__discountsEnabled'):
return
logger.debug('Signal fired to request discounts.')
reg = kwargs.pop('registration',None)
if not reg:
logger.warning('No registration passed, discounts not applied.')
return
payAtDoor = reg.payAtDoor
# Check if this is a new customer, who may be eligible for special discounts
newCustomer = True
customer = Customer.objects.filter(email=reg.email,first_name=reg.firstName,last_name=reg.lastName).first()
if (customer and customer.numClassSeries > 0) or sender != RegistrationSummaryView:
newCustomer = False
eligible_filter = (
Q(event__series__pricingTier__isnull=False) |
Q(event__publicevent__pricingTier__isnull=False)
)
ineligible_filter = (
(Q(event__series__isnull=False) & Q(event__series__pricingTier__isnull=True)) |
(Q(event__publicevent__isnull=False) & Q(event__publicevent__pricingTier__isnull=True)) |
Q(dropIn=True)
)
if apps.is_installed('danceschool.private_lessons'):
eligible_filter = eligible_filter | Q(event__privatelessonevent__pricingTier__isnull=False)
ineligible_filter = ineligible_filter | (
Q(event__privatelessonevent__isnull=False) &
Q(event__privatelessonevent__pricingTier__isnull=True)
)
# The items for which the customer registered.
eventregs_list = reg.temporaryeventregistration_set.all()
eligible_list = eventregs_list.filter(dropIn=False).filter(eligible_filter)
ineligible_list = eventregs_list.filter(ineligible_filter)
ineligible_total = sum(
[x.event.getBasePrice(payAtDoor=payAtDoor) for x in ineligible_list.exclude(dropIn=True)] +
[x.price for x in ineligible_list.filter(dropIn=True)]
)
# Get the applicable discounts and sort them in ascending category order
# so that the best discounts are always listed in the order that they will
# be applied.
discountCodesApplicable = getApplicableDiscountCombos(eligible_list, newCustomer, reg.student, customer=customer, addOn=False, cannotCombine=False, dateTime=reg.dateTime)
discountCodesApplicable.sort(key=lambda x: x.code.category.order)
# Once we have a list of codes to try, calculate the discounted price for each possibility,
# and pick the one in each category that has the lowest total price. We also need to keep track
# of the way in which some discounts are allocated across individual events.
best_discounts = OrderedDict()
initial_prices = [x.event.getBasePrice(payAtDoor=payAtDoor) for x in eligible_list]
initial_total = sum(initial_prices)
if discountCodesApplicable:
net_allocated_prices = initial_prices
net_precategory_price = initial_total
last_category = discountCodesApplicable[0].code.category
for discount in discountCodesApplicable:
# If the category has changed, then the new net_allocated_prices and the
# new net_precategory price are whatever was found to be best in the last category.
if (discount.code.category != last_category):
last_category = discount.code.category
if best_discounts:
# Since this is an OrderedDict, we can get the last element of the dict from
# the iterator, which is the last category for which there was a valid discount.
last_discount = best_discounts.get(next(reversed(best_discounts)))
net_allocated_prices = last_discount.net_allocated_prices
net_precategory_price = last_discount.net_price
# The second item in each tuple is now adjusted, so that each item that is wholly or partially
# applied against the discount will be wholly (value goes to 0) or partially subtracted from the
# remaining value to be calculated at full price.
tieredTuples = [(x,1) for x in eligible_list[:]]
for itemTuple in discount.itemTuples:
tieredTuples = [(p,q) if p != itemTuple[0] else (p,q - itemTuple[1]) for (p,q) in tieredTuples]
response = discount.code.applyAndAllocate(net_allocated_prices,tieredTuples,payAtDoor)
# Once the final price has been calculated, apply it iff it is less than
# the previously best discount found.
current_code = best_discounts.get(discount.code.category.name, None)
if (
response and (
(not current_code and response.net_price < net_precategory_price) or
(current_code and response.net_price < current_code.net_price)
)
):
best_discounts[discount.code.category.name] = response
# Now, repeat the basic process for codes that cannot be combined. These codes are always
# compared against the base price, and there is no need to allocate across items since
# only one code will potentially be applied.
uncombinedCodesApplicable = getApplicableDiscountCombos(
eligible_list, newCustomer, reg.student, customer=customer, addOn=False, cannotCombine=True, dateTime=reg.dateTime
)
for discount in uncombinedCodesApplicable:
# The second item in each tuple is now adjusted, so that each item that is wholly or partially
# applied against the discount will be wholly (value goes to 0) or partially subtracted from the
# remaining value to be calculated at full price.
tieredTuples = [(x,1) for x in eligible_list[:]]
for itemTuple in discount.itemTuples:
tieredTuples = [(p,q) if p != itemTuple[0] else (p,q - itemTuple[1]) for (p,q) in tieredTuples]
response = discount.code.applyAndAllocate(initial_prices,tieredTuples,payAtDoor)
# Once the final price has been calculated, apply it iff it is less than
# the previously best discount or combination of discounts found.
if (
response and
response.net_price < min([x.net_price for x in best_discounts.values()] + [initial_total])
):
best_discounts = OrderedDict({discount.code.category.name: response})
if not best_discounts:
logger.debug('No applicable discounts found.')
# Return the list of discounts to be applied (in DiscountInfo tuples), along with the additional
# price of ineligible items to be added.
return DiscountCombo.DiscountApplication([x for x in best_discounts.values()], ineligible_total) | 0.007721 |
def inline_handler(self, *custom_filters, state=None, run_task=None, **kwargs):
"""
Decorator for inline query handler
Example:
.. code-block:: python3
@dp.inline_handler(lambda inline_query: True)
async def some_inline_handler(inline_query: types.InlineQuery)
:param state:
:param custom_filters: list of custom filters
:param run_task: run callback in task (no wait results)
:param kwargs:
:return: decorated function
"""
def decorator(callback):
self.register_inline_handler(callback, *custom_filters, state=state, run_task=run_task, **kwargs)
return callback
return decorator | 0.004115 |
def _edge_mapping(G):
"""Assigns a variable for each edge in G.
(u, v) and (v, u) map to the same variable.
"""
edge_mapping = {edge: idx for idx, edge in enumerate(G.edges)}
edge_mapping.update({(e1, e0): idx for (e0, e1), idx in edge_mapping.items()})
return edge_mapping | 0.006734 |
def _interfaces_removed(self, object_path, interfaces):
"""Internal method."""
old_state = copy(self._objects[object_path])
for interface in interfaces:
del self._objects[object_path][interface]
new_state = self._objects[object_path]
if Interface['Drive'] in interfaces:
self._detect_toggle(
'has_media',
self.get(object_path, old_state),
self.get(object_path, new_state),
None, 'media_removed')
if Interface['Block'] in interfaces:
slave = self.get(object_path, old_state).luks_cleartext_slave
if slave:
if not self._has_job(slave.object_path, 'device_locked'):
self.trigger('device_locked', slave)
if self._objects[object_path]:
self.trigger('device_changed',
self.get(object_path, old_state),
self.get(object_path, new_state))
else:
del self._objects[object_path]
if object_kind(object_path) in ('device', 'drive'):
self.trigger(
'device_removed',
self.get(object_path, old_state)) | 0.001612 |
def log_head(path, log_file, log_time):
"""
write headers to log file
"""
with open(path + log_file, "w") as log:
log.write("#" * 79 + "\n\n")
log.write("File : " + log_file + "\n")
log.write("Path : " + path + "\n")
log.write("Date : " + time.strftime("%d/%m/%Y") + "\n")
log.write("Time : " + log_time + "\n\n")
log.write("#" * 79 + "\n\n")
log.close() | 0.002347 |
def _fun_names_iter(self, functyp, val):
"""Iterate over the names of the functions in ``val``,
adding them to ``funcstore`` if they are missing;
or if the items in ``val`` are already the names of functions
in ``funcstore``, iterate over those.
"""
funcstore = getattr(self.engine, functyp)
for v in val:
if callable(v):
# Overwrites anything already on the funcstore, is that bad?
setattr(funcstore, v.__name__, v)
yield v.__name__
elif v not in funcstore:
raise KeyError("Function {} not present in {}".format(
v, funcstore._tab
))
else:
yield v | 0.002642 |
def curve_to(self, x1, y1, x2, y2, x3, y3):
"""Adds a cubic Bézier spline to the path
from the current point
to position ``(x3, y3)`` in user-space coordinates,
using ``(x1, y1)`` and ``(x2, y2)`` as the control points.
After this call the current point will be ``(x3, y3)``.
If there is no current point before the call to :meth:`curve_to`
this method will behave as if preceded by
a call to ``context.move_to(x1, y1)``.
:param x1: The X coordinate of the first control point.
:param y1: The Y coordinate of the first control point.
:param x2: The X coordinate of the second control point.
:param y2: The Y coordinate of the second control point.
:param x3: The X coordinate of the end of the curve.
:param y3: The Y coordinate of the end of the curve.
:type x1: float
:type y1: float
:type x2: float
:type y2: float
:type x3: float
:type y3: float
"""
cairo.cairo_curve_to(self._pointer, x1, y1, x2, y2, x3, y3)
self._check_status() | 0.001784 |
def clean(ctx, state, dry_run=False, bare=False, user=False):
"""Uninstalls all packages not specified in Pipfile.lock."""
from ..core import do_clean
do_clean(ctx=ctx, three=state.three, python=state.python, dry_run=dry_run,
system=state.system) | 0.00369 |
def parse_aioredis_url(url: str) -> DictStrAny:
"""
Convert Redis URL string to dict suitable to pass to
``aioredis.create_redis(...)`` call.
**Usage**::
async def connect_redis(url=None):
url = url or 'redis://localhost:6379/0'
return await create_redis(**get_aioredis_parts(url))
:param url: URL to access Redis instance, started with ``redis://``.
"""
parts = urlparse(url)
db = parts.path[1:] or None # type: Optional[Union[str, int]]
if db:
db = int(db)
return {
'address': (parts.hostname, parts.port or 6379),
'db': db,
'password': parts.password} | 0.001506 |
def delete(self):
"""Delete this file from the device
.. note::
After deleting the file, this object will no longer contain valid information
and further calls to delete or get_data will return :class:`~.ErrorInfo` objects
"""
target = DeviceTarget(self.device_id)
return self._fssapi.delete_file(target, self.path)[self.device_id] | 0.010152 |
def import_training_data(self,
positive_corpus_file=os.path.join(os.path.dirname(__file__),
"positive.txt"),
negative_corpus_file=os.path.join(os.path.dirname(__file__),
"negative.txt")
):
"""
This method imports the positive and negative training data from the
two corpus files and creates the training data list.
"""
positive_corpus = open(positive_corpus_file)
negative_corpus = open(negative_corpus_file)
# for line in positive_corpus:
# self.training_data.append((line, True))
# for line in negative_corpus:
# self.training_data.append((line, False))
# The following code works. Need to profile this to see if this is an
# improvement over the code above.
positive_training_data = list(map(lambda x: (x, True), positive_corpus))
negative_training_data = list(map(lambda x: (x, False), negative_corpus))
self.training_data = positive_training_data + negative_training_data | 0.012037 |
def copy_to_clipboard(self, copy=True):
"""
Copies the selected items to the clipboard
:param copy: True to copy, False to cut.
"""
urls = self.selected_urls()
if not urls:
return
mime = self._UrlListMimeData(copy)
mime.set_list(urls)
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setMimeData(mime) | 0.004988 |
def duration(self):
"""Returns the integer value of the interval, the value is in milliseconds.
If the interval has not had stop called yet,
it will report the number of milliseconds in the interval up to the current point in time.
"""
if self._stop_instant is None:
return int((instant() - self._start_instant) * 1000)
if self._duration is None:
self._duration = int((self._stop_instant - self._start_instant) * 1000)
return self._duration | 0.009597 |
def cancelTickByTickData(self, contract: Contract, tickType: str):
"""
Unsubscribe from tick-by-tick data
Args:
contract: The exact contract object that was used to
subscribe with.
"""
ticker = self.ticker(contract)
reqId = self.wrapper.endTicker(ticker, tickType)
if reqId:
self.client.cancelTickByTickData(reqId)
else:
self._logger.error(
f'cancelMktData: No reqId found for contract {contract}') | 0.003766 |
def setEnable(self, status, wanInterfaceId=1, timeout=1):
"""Set enable status for a WAN interface, be careful you don't cut yourself off.
:param bool status: enable or disable the interface
:param int wanInterfaceId: the id of the WAN interface
:param float timeout: the timeout to wait for the action to be executed
"""
namespace = Wan.getServiceType("setEnable") + str(wanInterfaceId)
uri = self.getControlURL(namespace)
if status:
setStatus = 1
else:
setStatus = 0
self.execute(uri, namespace, "SetEnable", timeout=timeout, NewEnable=setStatus) | 0.006107 |
def remove_client(self, client):
# type: (object) -> None
"""Remove the client from the users of the socket.
If there are no more clients for the socket, it
will close automatically.
"""
try:
self._clients.remove(id(client))
except ValueError:
pass
if len(self._clients) < 1:
self.close() | 0.007673 |
def canonical_dataset_to_grib(dataset, path, mode='wb', no_warn=False, grib_keys={}, **kwargs):
# type: (xr.Dataset, str, str, bool, T.Dict[str, T.Any] T.Any) -> None
"""
Write a ``xr.Dataset`` in *canonical* form to a GRIB file.
"""
if not no_warn:
warnings.warn("GRIB write support is experimental, DO NOT RELY ON IT!", FutureWarning)
# validate Dataset keys, DataArray names, and attr keys/values
xr.backends.api._validate_dataset_names(dataset)
xr.backends.api._validate_attrs(dataset)
real_grib_keys = {k[5:]: v for k, v in dataset.attrs.items() if k[:5] == 'GRIB_'}
real_grib_keys.update(grib_keys)
with open(path, mode=mode) as file:
for data_var in dataset.data_vars.values():
canonical_dataarray_to_grib(data_var, file, grib_keys=real_grib_keys, **kwargs) | 0.005959 |
def autodiscover(self, autoregister=True):
"""This function will send out an autodiscover broadcast to find a
Neteria server. Any servers that respond with an "OHAI CLIENT"
packet are servers that we can connect to. Servers that respond are
stored in the "discovered_servers" list.
Args:
autoregister (boolean): Whether or not to automatically register
with any responding servers. Defaults to True.
Returns:
None
Examples:
>>> myclient = neteria.client.NeteriaClient()
>>> myclient.listen()
>>> myclient.autodiscover()
>>> myclient.discovered_servers
{('192.168.0.20', 40080): u'1.0', ('192.168.0.82', 40080): '2.0'}
"""
logger.debug("<%s> Sending autodiscover message to broadcast "
"address" % str(self.cuuid))
if not self.listener.listening:
logger.warning("Neteria client is not listening. The client "
"will not be able to process responses from the server")
message = serialize_data(
{"method": "OHAI",
"version": self.version,
"cuuid": str(self.cuuid)},
self.compression, encryption=False)
if autoregister:
self.autoregistering = True
self.listener.send_datagram(
message, ("<broadcast>", self.server_port), message_type="broadcast") | 0.003425 |
def initialize(self):
"""
Initialize the internal objects.
"""
if self._pooler is None:
params = {
"inputWidth": self.inputWidth,
"lateralInputWidths": [self.cellCount] * self.numOtherCorticalColumns,
"cellCount": self.cellCount,
"sdrSize": self.sdrSize,
"onlineLearning": self.onlineLearning,
"maxSdrSize": self.maxSdrSize,
"minSdrSize": self.minSdrSize,
"synPermProximalInc": self.synPermProximalInc,
"synPermProximalDec": self.synPermProximalDec,
"initialProximalPermanence": self.initialProximalPermanence,
"minThresholdProximal": self.minThresholdProximal,
"sampleSizeProximal": self.sampleSizeProximal,
"connectedPermanenceProximal": self.connectedPermanenceProximal,
"predictedInhibitionThreshold": self.predictedInhibitionThreshold,
"synPermDistalInc": self.synPermDistalInc,
"synPermDistalDec": self.synPermDistalDec,
"initialDistalPermanence": self.initialDistalPermanence,
"activationThresholdDistal": self.activationThresholdDistal,
"sampleSizeDistal": self.sampleSizeDistal,
"connectedPermanenceDistal": self.connectedPermanenceDistal,
"inertiaFactor": self.inertiaFactor,
"seed": self.seed,
}
self._pooler = ColumnPooler(**params) | 0.002216 |
def _get_access_token(self, verifier=None):
"""
Fetch an access token from `self.access_token_url`.
"""
response, content = self.client(verifier).request(
self.access_token_url, "POST")
content = smart_unicode(content)
if not response['status'] == '200':
raise OAuthError(_(
u"Invalid status code %s while obtaining access token from %s: %s") %
(response['status'], self.access_token_url, content))
token = dict(urlparse.parse_qsl(content))
return (oauth.Token(token['oauth_token'], token['oauth_token_secret']),
token) | 0.015759 |
def get_data(self, file_id):
"""
Acquires the data from the table identified by the id.
The file is read only once, consecutive calls to this method will
return the sale collection.
:param file_id: identifier for the table
:return: all the values from the table
"""
if file_id not in self._file_values:
file_contents = 'cwr_%s.csv' % file_id
self._file_values[file_id] = self._reader.read_csv_file(
file_contents)
return self._file_values[file_id] | 0.003559 |
def Tphi(self,**kwargs): #pragma: no cover
"""
NAME:
Tphi
PURPOSE:
Calculate the azimuthal period
INPUT:
+scipy.integrate.quadrature keywords
OUTPUT:
T_phi(R,vT,vT)/ro/vc + estimate of the error
HISTORY:
2010-12-01 - Written - Bovy (NYU)
"""
if hasattr(self,'_Tphi'):
return self._Tphi
(rperi,rap)= self.calcRapRperi(**kwargs)
if rap == rperi:#Circular orbit
return 2.*m.pi*self._R/self._vT
TR= self.TR(**kwargs)
I= self.I(**kwargs)
Tphi= TR/I*m.pi
self._Tphi= Tphi
return self._Tphi | 0.021994 |
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti | 0.002681 |
def get_logger(
name, file_name=None, stream=None, template=None, propagate=False):
"""Get a logger by name
if file_name is specified, and the dirname() of the file_name exists, it will
write to that file. If the dirname dies not exist, it will silently ignre it. """
logger = logging.getLogger(name)
if propagate is not None:
logger.propagate = propagate
for handler in logger.handlers:
logger.removeHandler(handler)
if not template:
template = "%(name)s %(process)s %(levelname)s %(message)s"
formatter = logging.Formatter(template)
if not file_name and not stream:
stream = sys.stdout
handlers = []
if stream is not None:
handlers.append(logging.StreamHandler(stream=stream))
if file_name is not None:
if os.path.isdir(os.path.dirname(file_name)):
handlers.append(logging.FileHandler(file_name))
else:
print("ERROR: Can't open log file {}".format(file_name))
for ch in handlers:
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
return logger | 0.002602 |
def image_to_file(self, path, get_image=True):
"""Write the image to a file."""
if not self.image_url or get_image:
if not self.refresh_image():
return False
response = requests.get(self.image_url, stream=True)
if response.status_code != 200:
_LOGGER.warning(
"Unexpected response code %s when requesting image: %s",
str(response.status_code), response.text)
raise AbodeException((ERROR.CAM_IMAGE_REQUEST_INVALID))
with open(path, 'wb') as imgfile:
copyfileobj(response.raw, imgfile)
return True | 0.00311 |
def new_histogram(name, reservoir=None):
"""
Build a new histogram metric with a given reservoir object
If the reservoir is not provided, a uniform reservoir with the default size is used
"""
if reservoir is None:
reservoir = histogram.UniformReservoir(histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE)
return new_metric(name, histogram.Histogram, reservoir) | 0.007813 |
def me(cls):
"""
Returns information about the currently authenticated user.
:return:
:rtype: User
"""
return fields.ObjectField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/me').text
)
) | 0.005714 |
def relayIndextoCoord(self, i):
"""
Map 1D cell index to a 2D coordinate
:param i: integer 1D cell index
:return: (x, y), a 2D coordinate
"""
x = i % self.relayWidth
y = i / self.relayWidth
return x, y | 0.004255 |
def compile(self, session=None):
"""
Before calling the standard compile function, check to see if the size
of the data has changed and add variational parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data.
"""
if not self.num_data == self.X.shape[0]:
self.num_data = self.X.shape[0]
self.q_alpha = Parameter(np.zeros((self.num_data, self.num_latent)))
self.q_lambda = Parameter(np.ones((self.num_data, self.num_latent)),
transforms.positive)
return super(VGP_opper_archambeau, self).compile(session=session) | 0.005666 |
def rerun(store, mail, current_user, institute_id, case_name, sender, recipient):
"""Request a rerun by email."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for('cases.case', institute_id=institute_id, case_name=case_name)
store.request_rerun(institute_obj, case_obj, user_obj, link)
# this should send a JSON document to the SuSy API in the future
html = """
<p>{institute}: {case} ({case_id})</p>
<p>Re-run requested by: {name}</p>
""".format(institute=institute_obj['display_name'],
case=case_obj['display_name'], case_id=case_obj['_id'],
name=user_obj['name'].encode())
# compose and send the email message
msg = Message(subject=("SCOUT: request RERUN for {}"
.format(case_obj['display_name'])),
html=html, sender=sender, recipients=[recipient],
# cc the sender of the email for confirmation
cc=[user_obj['email']])
mail.send(msg) | 0.003656 |
def remove_vrf(self, auth, spec):
""" Remove a VRF.
* `auth` [BaseAuth]
AAA options.
* `spec` [vrf_spec]
A VRF specification.
Remove VRF matching the `spec` argument.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.remove_vrf` for full
understanding.
"""
self._logger.debug("remove_vrf called; spec: %s" % unicode(spec))
# get list of VRFs to remove before removing them
vrfs = self.list_vrf(auth, spec)
# remove prefixes in VRFs
for vrf in vrfs:
v4spec = {
'prefix': '0.0.0.0/0',
'vrf_id': vrf['id']
}
v6spec = {
'prefix': '::/0',
'vrf_id': vrf['id']
}
self.remove_prefix(auth, spec = v4spec, recursive = True)
self.remove_prefix(auth, spec = v6spec, recursive = True)
where, params = self._expand_vrf_spec(spec)
sql = "DELETE FROM ip_net_vrf WHERE %s" % where
self._execute(sql, params)
# write to audit table
for v in vrfs:
audit_params = {
'vrf_id': v['id'],
'vrf_rt': v['rt'],
'vrf_name': v['name'],
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source,
'description': 'Removed vrf %s' % v['rt']
}
sql, params = self._sql_expand_insert(audit_params)
self._execute('INSERT INTO ip_net_log %s' % sql, params) | 0.005379 |
def na_value_for_dtype(dtype, compat=True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : boolean, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype) or is_period_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
return False
return np.nan | 0.000958 |
def get_recipients(self, ar):
"""Return the AR recipients in the same format like the AR Report
expects in the records field `Recipients`
"""
plone_utils = api.get_tool("plone_utils")
def is_email(email):
if not plone_utils.validateSingleEmailAddress(email):
return False
return True
def recipient_from_contact(contact):
if not contact:
return None
email = contact.getEmailAddress()
return {
"UID": api.get_uid(contact),
"Username": contact.getUsername(),
"Fullname": to_utf8(contact.Title()),
"EmailAddress": email,
}
def recipient_from_email(email):
if not is_email(email):
return None
return {
"UID": "",
"Username": "",
"Fullname": email,
"EmailAddress": email,
}
# Primary Contacts
to = filter(None, [recipient_from_contact(ar.getContact())])
# CC Contacts
cc = filter(None, map(recipient_from_contact, ar.getCCContact()))
# CC Emails
cc_emails = map(lambda x: x.strip(), ar.getCCEmails().split(","))
cc_emails = filter(None, map(recipient_from_email, cc_emails))
return to + cc + cc_emails | 0.001425 |
def ftdetect(filename):
"""Determine if filename is markdown or notebook,
based on the file extension.
"""
_, extension = os.path.splitext(filename)
md_exts = ['.md', '.markdown', '.mkd', '.mdown', '.mkdn', '.Rmd']
nb_exts = ['.ipynb']
if extension in md_exts:
return 'markdown'
elif extension in nb_exts:
return 'notebook'
else:
return None | 0.002494 |
def add(self, process, name=None):
"""Add a new process to the registry.
:param process: A callable (either plain function or object
implementing __calll).
:param name: The name of the executable to match. If not given
it must be provided as 'name' attribute of the given `process`.
callable.
"""
name = name or process.name
assert name, "No executable name given."""
self._registry[name] = process | 0.004073 |
def passthrough(args):
"""
%prog passthrough chrY.vcf chrY.new.vcf
Pass through Y and MT vcf.
"""
p = OptionParser(passthrough.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, newvcffile = args
fp = open(vcffile)
fw = open(newvcffile, "w")
gg = ["0/0", "0/1", "1/1"]
for row in fp:
if row[0] == "#":
print(row.strip(), file=fw)
continue
v = VcfLine(row)
v.filter = "PASS"
v.format = "GT:GP"
probs = [0] * 3
probs[gg.index(v.genotype)] = 1
v.genotype = v.genotype.replace("/", "|") + \
":{0}".format(",".join("{0:.3f}".format(x) for x in probs))
print(v, file=fw)
fw.close() | 0.002532 |
def create(self, server):
"""Create the tasks on the server"""
for chunk in self.__cut_to_size():
server.post(
'tasks_admin',
chunk.as_payload(),
replacements={
'slug': chunk.challenge.slug}) | 0.006969 |
def save_matpower(self, fd):
""" Serialize the case as a MATPOWER data file.
"""
from pylon.io import MATPOWERWriter
MATPOWERWriter(self).write(fd) | 0.011173 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.