text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def remove_service(self, service):
"""Removes the service passed in from the services offered by the
current Profile. If the Analysis Service passed in is not assigned to
this Analysis Profile, returns False.
:param service: the service to be removed from this Analysis Profile
:type service: AnalysisService
:return: True if the AnalysisService has been removed successfully
"""
obj = api.get_object(service)
uid = api.get_uid(obj)
# Remove the service from the referenced services
services = self.getService()
num_services = len(services)
services.remove(obj)
self.setService(services)
removed = len(services) < num_services
# Remove the service from the settings map
settings = self.getAnalysisServicesSettings()
settings = [item for item in settings if item.get('uid', '') != uid]
self.setAnalysisServicesSettings(settings)
return removed | 0.00199 |
def create_node(xml_node: XmlNode, **init_args):
'''Creates node from xml node using namespace as module and tag name as class name'''
inst_type = get_inst_type(xml_node)
init_args['xml_node'] = xml_node
inst = create_inst(inst_type, **init_args)
if not isinstance(inst, Node):
inst = convert_to_node(inst, **init_args)
return inst | 0.00551 |
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates) | 0.004918 |
def resteem(self, identifier):
''' Waits 20 seconds as that is the required
amount of time between resteems
'''
for num_of_retries in range(default.max_retry):
try:
self.steem_instance().resteem(
identifier, self.mainaccount)
self.msg.message("resteemed " + identifier)
time.sleep(10)
except Exception as e:
self.util.retry('''COULD NOT RESTEEM
{}'''.format(identifier),
e, num_of_retries, default.wait_time)
self.s = None
else:
return True | 0.007236 |
def _find_volume(name):
'''
Find volume by name on minion
'''
docker_volumes = __salt__['docker.volumes']()['Volumes']
if docker_volumes:
volumes = [v for v in docker_volumes if v['Name'] == name]
if volumes:
return volumes[0]
return None | 0.003436 |
def scp_put(files,
remote_path=None,
recursive=False,
preserve_times=False,
saltenv='base',
**kwargs):
'''
.. versionadded:: 2019.2.0
Transfer files and directories to remote network device.
.. note::
This function is only available only when the underlying library
`scp <https://github.com/jbardin/scp.py>`_
is installed. See
:mod:`scp module <salt.modules.scp_mod>` for
more details.
files
A single path or a list of paths to be transferred.
remote_path
The path on the remote device where to store the files.
recursive: ``True``
Transfer files and directories recursively.
preserve_times: ``False``
Preserve ``mtime`` and ``atime`` of transferred files and directories.
saltenv: ``base``
The name of the Salt environment. Ignored when ``files`` is not a
``salt://`` URL.
hostname
The hostname of the remote device.
port: ``22``
The port of the remote device.
username
The username required for SSH authentication on the device.
password
Used for password authentication. It is also used for private key
decryption if ``passphrase`` is not given.
passphrase
Used for decrypting private keys.
pkey
An optional private key to use for authentication.
key_filename
The filename, or list of filenames, of optional private key(s) and/or
certificates to try for authentication.
timeout
An optional timeout (in seconds) for the TCP connect.
socket_timeout: ``10``
The channel socket timeout in seconds.
buff_size: ``16384``
The size of the SCP send buffer.
allow_agent: ``True``
Set to ``False`` to disable connecting to the SSH agent.
look_for_keys: ``True``
Set to ``False`` to disable searching for discoverable private key
files in ``~/.ssh/``
banner_timeout
An optional timeout (in seconds) to wait for the SSH banner to be
presented.
auth_timeout
An optional timeout (in seconds) to wait for an authentication
response.
auto_add_policy: ``False``
Automatically add the host to the ``known_hosts``.
CLI Example:
.. code-block:: bash
salt '*' napalm.scp_put /path/to/file /var/tmp/file auto_add_policy=True
'''
conn_args = netmiko_args(**kwargs)
conn_args['hostname'] = conn_args['host']
kwargs.update(conn_args)
return __salt__['scp.put'](files,
remote_path=remote_path,
recursive=recursive,
preserve_times=preserve_times,
saltenv=saltenv,
**kwargs) | 0.000697 |
async def ensure_process(self):
"""
Start the process
"""
# We don't want multiple requests trying to start the process at the same time
# FIXME: Make sure this times out properly?
# Invariant here should be: when lock isn't being held, either 'proc' is in state &
# running, or not.
with (await self.state['proc_lock']):
if 'proc' not in self.state:
# FIXME: Prevent races here
# FIXME: Handle graceful exits of spawned processes here
cmd = self.get_cmd()
server_env = os.environ.copy()
# Set up extra environment variables for process
server_env.update(self.get_env())
timeout = self.get_timeout()
proc = SupervisedProcess(self.name, *cmd, env=server_env, ready_func=self._http_ready_func, ready_timeout=timeout, log=self.log)
self.state['proc'] = proc
try:
await proc.start()
is_ready = await proc.ready()
if not is_ready:
await proc.kill()
raise web.HTTPError(500, 'could not start {} in time'.format(self.name))
except:
# Make sure we remove proc from state in any error condition
del self.state['proc']
raise | 0.005556 |
def on_pytoml_dumps(self, pytoml, config, dictionary, **kwargs):
""" The `pytoml <https://pypi.org/project/pytoml/>`_ dumps method.
:param module pytoml: The ``pytoml`` module
:param class config: The instance's config class
:param dict dictionary: The dictionary to serialize
:returns: The TOML serialization
:rtype: str
"""
inline_tables = set(kwargs.get("inline_tables", []))
if len(inline_tables) > 0:
warnings.warn("pytoml does not support 'inline_tables' argument")
return pytoml.dumps(dictionary) | 0.00335 |
def split_bits(value, *bits):
"""
Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4]
"""
result = []
for b in reversed(bits):
mask = (1 << b) - 1
result.append(value & mask)
value = value >> b
assert value == 0
result.reverse()
return result | 0.002646 |
def clear_doc(self, docname):
"""Remove the data associated with this instance of the domain."""
for fullname, (fn, x) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, x, x, x) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for labelname, (fn, x, x) in self.data['labels'].items():
if fn == docname:
del self.data['labels'][labelname]
for anonlabelname, (fn, x) in self.data['anonlabels'].items():
if fn == docname:
del self.data['anonlabels'][anonlabelname] | 0.002845 |
def _init_browser(self):
"""Open harness web page.
Open a quiet chrome which:
1. disables extensions,
2. ignore certificate errors and
3. always allow notifications.
"""
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-infobars')
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_experimental_option('prefs', {
'profile.managed_default_content_settings.notifications': 1
})
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.set_page_load_timeout(10)
browser.implicitly_wait(1)
browser.maximize_window()
browser.get(settings.HARNESS_URL)
self._browser = browser
if not wait_until(lambda: 'Thread' in browser.title, 30):
self.assertIn('Thread', browser.title) | 0.002075 |
def OMDict(self, items):
"""
Convert a dictionary (or list of items thereof) of OM objects into an OM object
EXAMPLES::
>>> from openmath import openmath as om
>>> from openmath.convert_pickle import PickleConverter
>>> converter = PickleConverter()
>>> a = om.OMInteger(1)
>>> b = om.OMInteger(3)
>>> o = converter.OMDict([(a,b), (b,b)]); print(o)
OMApplication(
elem=OMSymbol(name='dict', cd='Python', cdbase='http://python.org/'),
arguments=[
OMApplication(
elem=OMSymbol(name='tuple', cd='Python', cdbase='http://python.org/'),
arguments=[
OMInteger(integer=1),
OMInteger(integer=3)]),
OMApplication(
elem=OMSymbol(name='tuple', cd='Python', cdbase='http://python.org/'),
arguments=[
OMInteger(integer=3),
OMInteger(integer=3)])])
>>> converter.to_python(o)
{1: 3, 3: 3}
"""
return om.OMApplication(elem=self.OMSymbol(module='Python', name='dict'),
arguments=[self.OMTuple(item) for item in items]) | 0.006168 |
def list_evaluation_functions(kind=None):
"""Get valid word embedding functions names.
Parameters
----------
kind : ['similarity', 'analogy', None]
Return only valid names for similarity, analogy or both kinds of functions.
Returns
-------
dict or list:
A list of all the valid evaluation function names for the specified
kind. If kind is set to None, returns a dict mapping each valid name to
its respective output list. The valid names can be plugged in
`gluonnlp.model.word_evaluation_model.create(name)`.
"""
if kind is None:
kind = tuple(_REGSITRY_KIND_CLASS_MAP.keys())
if not isinstance(kind, tuple):
if kind not in _REGSITRY_KIND_CLASS_MAP.keys():
raise KeyError(
'Cannot find `kind` {}. Use '
'`list_evaluation_functions(kind=None).keys()` to get all the'
'valid kinds of evaluation functions.'.format(kind))
reg = registry.get_registry(_REGSITRY_KIND_CLASS_MAP[kind])
return list(reg.keys())
else:
return {name: list_evaluation_functions(kind=name) for name in kind} | 0.001715 |
def diff_dir(dir_cmp, left_path=True):
"""
A generator that, given a ``filecmp.dircmp`` object, yields the paths to all files that are different. Works
recursively.
:param dir_cmp: A ``filecmp.dircmp`` object representing the comparison.
:param left_path: If ``True``, paths will be relative to dircmp.left. Else paths will be relative to dircmp.right.
"""
for name in dir_cmp.diff_files:
if left_path:
path_root = dir_cmp.left
else:
path_root = dir_cmp.right
yield path.joinpath(path_root, name)
for sub in dir_cmp.subdirs.values():
# Need to iterate over the recursive call to make sure the individual values are yielded up the stack
for the_dir in diff_dir(sub, left_path):
yield the_dir | 0.005006 |
def get_headers(self, instant):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)}
if self.body_producer is None:
data = self.data
if data is None:
data = b""
headers["x-amz-content-sha256"] = hashlib.sha256(data).hexdigest()
else:
data = None
headers["x-amz-content-sha256"] = b"UNSIGNED-PAYLOAD"
for key, value in self.metadata.iteritems():
headers["x-amz-meta-" + key] = value
for key, value in self.amz_headers.iteritems():
headers["x-amz-" + key] = value
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers["Content-Type"] = self.content_type
if self.creds is not None:
headers["Authorization"] = self.sign(
headers,
data,
s3_url_context(self.endpoint, self.bucket, self.object_name),
instant,
method=self.action)
return headers | 0.0016 |
def usable_service(self):
"""Return a usable service or None if there is none.
A service is usable if enough configuration to be able to make a
connection is available. If several protocols are usable, MRP will be
preferred over DMAP.
"""
services = self._services
for protocol in self._supported_protocols:
if protocol in services and services[protocol].is_usable():
return services[protocol]
return None | 0.004008 |
def _map_arg(arg):
"""
Return `arg` appropriately parsed or mapped to a usable value.
"""
# Grab the easy to parse values
if isinstance(arg, _ast.Str):
return repr(arg.s)
elif isinstance(arg, _ast.Num):
return arg.n
elif isinstance(arg, _ast.Name):
name = arg.id
if name == 'True':
return True
elif name == 'False':
return False
elif name == 'None':
return None
return name
else:
# Everything else we don't bother with
return Unparseable() | 0.001724 |
def to_dict(self):
"""Return all the details of this MLPipeline in a dict.
The dict structure contains all the `__init__` arguments of the
MLPipeline, as well as the current hyperparameter values and the
specification of the tunable_hyperparameters::
{
"primitives": [
"a_primitive",
"another_primitive"
],
"init_params": {
"a_primitive": {
"an_argument": "a_value"
}
},
"hyperparameters": {
"a_primitive#1": {
"an_argument": "a_value",
"another_argument": "another_value",
},
"another_primitive#1": {
"yet_another_argument": "yet_another_value"
}
},
"tunable_hyperparameters": {
"another_primitive#1": {
"yet_another_argument": {
"type": "str",
"default": "a_default_value",
"values": [
"a_default_value",
"yet_another_value"
]
}
}
}
}
"""
return {
'primitives': self.primitives,
'init_params': self.init_params,
'input_names': self.input_names,
'output_names': self.output_names,
'hyperparameters': self.get_hyperparameters(),
'tunable_hyperparameters': self._tunable_hyperparameters
} | 0.001117 |
def fit(self, X, y=None):
"""
Fit the clustering model, computing the centers then embeds the centers
into 2D space using the embedding method specified.
"""
with Timer() as self.fit_time_:
# Fit the underlying estimator
self.estimator.fit(X, y)
# Get the centers
# TODO: is this how sklearn stores all centers in the model?
C = self.cluster_centers_
# Embed the centers in 2D space and get the cluster scores
self.embedded_centers_ = self.transformer.fit_transform(C)
self.scores_ = self._score_clusters(X, y)
# Draw the clusters and fit returns self
self.draw()
return self | 0.00271 |
def get_loglevel(level):
"""
Set log level.
>>> assert get_loglevel(2) == logging.WARN
>>> assert get_loglevel(10) == logging.INFO
"""
try:
return [logging.DEBUG, logging.INFO, logging.WARN][level]
except IndexError:
return logging.INFO | 0.003559 |
def on_message(self, message):
"""Evaluates the function pointed to by json-rpc."""
json_rpc = json.loads(message)
logging.log(logging.DEBUG, json_rpc)
if self.pool is None:
self.pool = multiprocessing.Pool(processes=args.workers)
# Spawn a process to protect the server against segfaults
async = self.pool.apply_async(_worker_process, [json_rpc])
try:
result = async.get(timeout=args.timeout)
error = 0
except multiprocessing.TimeoutError:
result = ("File format conversion timed out! This is due "
"either to a large input file or a segmentation "
"fault in the underlying open babel library.")
error = 1
self.pool.terminate()
self.pool = multiprocessing.Pool(processes=args.workers)
except Exception:
result = traceback.format_exc()
error = 1
logging.log(logging.DEBUG, result)
self.write_message(json.dumps({'result': result, 'error': error,
'id': json_rpc['id']},
separators=(',', ':'))) | 0.0033 |
def create_output_directories(self):
"""Create output directories for thumbnails and original images."""
check_or_create_dir(self.dst_path)
if self.medias:
check_or_create_dir(join(self.dst_path,
self.settings['thumb_dir']))
if self.medias and self.settings['keep_orig']:
self.orig_path = join(self.dst_path, self.settings['orig_dir'])
check_or_create_dir(self.orig_path) | 0.004184 |
def as_machine(self):
"""Convert to a `Machine` object.
`node_type` must be `NodeType.MACHINE`.
"""
if self.node_type != NodeType.MACHINE:
raise ValueError(
'Cannot convert to `Machine`, node_type is not a machine.')
return self._origin.Machine(self._data) | 0.006154 |
def render(self, cli, layout, is_done=False):
"""
Render the current interface to the output.
:param is_done: When True, put the cursor at the end of the interface. We
won't print any changes to this part.
"""
output = self.output
# Enter alternate screen.
if self.use_alternate_screen and not self._in_alternate_screen:
self._in_alternate_screen = True
output.enter_alternate_screen()
# Enable bracketed paste.
if not self._bracketed_paste_enabled:
self.output.enable_bracketed_paste()
self._bracketed_paste_enabled = True
# Enable/disable mouse support.
needs_mouse_support = self.mouse_support(cli)
if needs_mouse_support and not self._mouse_support_enabled:
output.enable_mouse_support()
self._mouse_support_enabled = True
elif not needs_mouse_support and self._mouse_support_enabled:
output.disable_mouse_support()
self._mouse_support_enabled = False
# Create screen and write layout to it.
size = output.get_size()
screen = Screen()
screen.show_cursor = False # Hide cursor by default, unless one of the
# containers decides to display it.
mouse_handlers = MouseHandlers()
if is_done:
height = 0 # When we are done, we don't necessary want to fill up until the bottom.
else:
height = self._last_screen.height if self._last_screen else 0
height = max(self._min_available_height, height)
# When te size changes, don't consider the previous screen.
if self._last_size != size:
self._last_screen = None
# When we render using another style, do a full repaint. (Forget about
# the previous rendered screen.)
# (But note that we still use _last_screen to calculate the height.)
if self.style.invalidation_hash() != self._last_style_hash:
self._last_screen = None
self._attrs_for_token = None
if self._attrs_for_token is None:
self._attrs_for_token = _TokenToAttrsCache(self.style.get_attrs_for_token)
self._last_style_hash = self.style.invalidation_hash()
layout.write_to_screen(cli, screen, mouse_handlers, WritePosition(
xpos=0,
ypos=0,
width=size.columns,
height=(size.rows if self.use_alternate_screen else height),
extended_height=size.rows,
))
# When grayed. Replace all tokens in the new screen.
if cli.is_aborting or cli.is_exiting:
screen.replace_all_tokens(Token.Aborted)
# Process diff and write to output.
self._cursor_pos, self._last_token = _output_screen_diff(
output, screen, self._cursor_pos,
self._last_screen, self._last_token, is_done,
use_alternate_screen=self.use_alternate_screen,
attrs_for_token=self._attrs_for_token,
size=size,
previous_width=(self._last_size.columns if self._last_size else 0))
self._last_screen = screen
self._last_size = size
self.mouse_handlers = mouse_handlers
# Write title if it changed.
new_title = cli.terminal_title
if new_title != self._last_title:
if new_title is None:
self.output.clear_title()
else:
self.output.set_title(new_title)
self._last_title = new_title
output.flush() | 0.001656 |
def in_tree(self, cmd_args):
""" if a command is in the tree """
if not cmd_args:
return True
tree = self
try:
for datum in cmd_args:
tree = tree.get_child(datum)
except ValueError:
return False
return True | 0.006536 |
def Uri(self):
""" Constructs the connection URI from name, noSsl and port instance variables. """
return ("%s://%s%s" % (("https", "http")[self._noSsl == True], self._name, (":" + str(self._port), "")[
(((self._noSsl == False) and (self._port == 80)) or ((self._noSsl == True) and (self._port == 443)))])) | 0.035144 |
def simplex_remove_arc(self, t, p, q, min_capacity, cycle):
'''
API:
simplex_remove_arc(self, p, q, min_capacity, cycle)
Description:
Removes arc (p,q), updates t, updates flows, where (k,l) is
the entering arc.
Input:
t: tree solution to be updated.
p: tail of the leaving arc.
q: head of the leaving arc.
min_capacity: capacity of the cycle.
cycle: cycle obtained when entering arc considered.
Post:
(1) updates t.
(2) updates 'flow' attributes.
'''
# augment min_capacity along cycle
n = len(cycle)
tel = list(t.edge_attr.keys())
index = 0
while index < (n-1):
if (cycle[index], cycle[index+1]) in tel:
flow_e = self.edge_attr[(cycle[index], cycle[index+1])]['flow']
self.edge_attr[(cycle[index], cycle[index+1])]['flow'] =\
flow_e+min_capacity
else:
flow_e = self.edge_attr[(cycle[index+1], cycle[index])]['flow']
self.edge_attr[(cycle[index+1], cycle[index])]['flow'] =\
flow_e-min_capacity
index += 1
# augment arc cycle[n-1], cycle[0]
if (cycle[n-1], cycle[0]) in tel:
flow_e = self.edge_attr[(cycle[n-1], cycle[0])]['flow']
self.edge_attr[(cycle[n-1], cycle[0])]['flow'] =\
flow_e+min_capacity
else:
flow_e = self.edge_attr[(cycle[0], cycle[n-1])]['flow']
self.edge_attr[(cycle[0], cycle[n-1])]['flow'] =\
flow_e-min_capacity
# remove leaving arc
t.del_edge((p, q))
# set label of removed arc
flow_pq = self.get_edge_attr(p, q, 'flow')
capacity_pq = self.get_edge_attr(p, q, 'capacity')
cost_pq = self.get_edge_attr(p, q, 'cost')
self.set_edge_attr(p, q, 'label',
"%d/%d/%d" %(flow_pq,capacity_pq,cost_pq))
for e in t.edge_attr:
flow = self.edge_attr[e]['flow']
capacity = self.edge_attr[e]['capacity']
cost = self.edge_attr[e]['cost']
t.edge_attr[e]['flow'] = flow
t.edge_attr[e]['capacity'] = capacity
t.edge_attr[e]['cost'] = cost
t.edge_attr[e]['label'] = "%d/%d/%d" %(flow,capacity,cost)
self.edge_attr[e]['label'] = "%d/%d/%d" %(flow,capacity,cost) | 0.004398 |
def get_first_property(elt, key, default=None, ctx=None):
"""Get first property related to one input key.
:param elt: first property elt. Not None methods.
:param str key: property key to get.
:param default: default value to return if key does not exist in elt.
properties
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
"""
result = default
properties = _get_properties(elt, keys=(key,), ctx=ctx, first=True)
# set value if key exists in properties
if key in properties:
result = properties[key]
return result | 0.001379 |
def encode_network(root):
"""Yield ref-containing obj table entries from object network"""
def fix_values(obj):
if isinstance(obj, Container):
obj.update((k, get_ref(v)) for (k, v) in obj.items()
if k != 'class_name')
fixed_obj = obj
elif isinstance(obj, Dictionary):
fixed_obj = obj.__class__(dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, dict):
fixed_obj = dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
fixed_obj = [get_ref(field) for field in obj]
elif isinstance(obj, Form):
fixed_obj = obj.__class__(**dict(
(field, get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, ContainsRefs):
fixed_obj = obj.__class__([get_ref(field)
for field in obj.value])
else:
return obj
fixed_obj._made_from = obj
return fixed_obj
objects = []
def get_ref(obj, objects=objects):
obj = PythonicAdapter(Pass)._encode(obj, None)
if isinstance(obj, (FixedObject, Container)):
if getattr(obj, '_index', None):
index = obj._index
else:
objects.append(None)
obj._index = index = len(objects)
objects[index - 1] = fix_values(obj)
return Ref(index)
else:
return obj # Inline value
get_ref(root)
for obj in objects:
if getattr(obj, '_index', None):
del obj._index
return objects | 0.001605 |
def predict(self, h=5, intervals=False, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
intervals : boolean (default: False)
Whether to return prediction intervals
Returns
----------
- pd.DataFrame with predictions
"""
nsims = kwargs.get('nsims', 200)
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Retrieve data, dates and (transformed) latent variables
if self.latent_variables.estimation_method in ['M-H']:
lower_1_final = 0
upper_99_final = 0
lower_5_final = 0
upper_95_final = 0
forecasted_values_final = 0
date_index = self.shift_dates(h)
for i in range(nsims):
t_params = self.draw_latent_variables(nsims=1).T[0]
a, P = self._forecast_model(t_params, h)
forecasted_values = a[0][-h:]
lower_5 = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper_95 = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_5_final += lower_5
upper_95_final += upper_95
lower_1 = forecasted_values - 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper_99 = forecasted_values + 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_1_final += lower_1
upper_99_final += upper_99
forecasted_values_final += forecasted_values
forecasted_values_final = forecasted_values_final / nsims
lower_1_final = lower_1_final / nsims
lower_5_final = lower_5_final / nsims
upper_95_final = upper_95_final / nsims
upper_99_final = upper_99_final / nsims
if intervals is False:
result = pd.DataFrame(forecasted_values_final)
result.rename(columns={0:self.data_name}, inplace=True)
else:
prediction_05 = lower_5_final
prediction_95 = upper_95_final
prediction_01 = lower_1_final
prediction_99 = upper_99_final
result = pd.DataFrame([forecasted_values_final, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result
else:
# Retrieve data, dates and (transformed) latent variables
a, P = self._forecast_model(self.latent_variables.get_z_values(),h)
date_index = self.shift_dates(h)
forecasted_values = a[0][-h:]
if intervals is False:
result = pd.DataFrame(forecasted_values)
result.rename(columns={0:self.data_name}, inplace=True)
else:
prediction_05 = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_95 = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_01 = forecasted_values - 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_99 = forecasted_values + 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
result = pd.DataFrame([forecasted_values, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result | 0.009812 |
def set_exception(self, exception):
"""Set the result of the future to the given exception.
Args:
exception (:exc:`Exception`): The exception raised.
"""
# Sanity check: A future can only complete once.
if self.done():
raise RuntimeError("set_exception can only be called once.")
# Set the exception and trigger the future.
self._exception = exception
self._trigger() | 0.004376 |
def _project_perturbation(perturbation, epsilon, input_image, clip_min=None,
clip_max=None):
"""Project `perturbation` onto L-infinity ball of radius `epsilon`.
Also project into hypercube such that the resulting adversarial example
is between clip_min and clip_max, if applicable.
"""
if clip_min is None or clip_max is None:
raise NotImplementedError("_project_perturbation currently has clipping "
"hard-coded in.")
# Ensure inputs are in the correct range
with tf.control_dependencies([
utils_tf.assert_less_equal(input_image,
tf.cast(clip_max, input_image.dtype)),
utils_tf.assert_greater_equal(input_image,
tf.cast(clip_min, input_image.dtype))
]):
clipped_perturbation = utils_tf.clip_by_value(
perturbation, -epsilon, epsilon)
new_image = utils_tf.clip_by_value(
input_image + clipped_perturbation, clip_min, clip_max)
return new_image - input_image | 0.004789 |
def set_close_callback(self, callback: Optional[Callable[[], None]]) -> None:
"""Call the given callback when the stream is closed.
This mostly is not necessary for applications that use the
`.Future` interface; all outstanding ``Futures`` will resolve
with a `StreamClosedError` when the stream is closed. However,
it is still useful as a way to signal that the stream has been
closed while no other read or write is in progress.
Unlike other callback-based interfaces, ``set_close_callback``
was not removed in Tornado 6.0.
"""
self._close_callback = callback
self._maybe_add_error_listener() | 0.002924 |
def fraction_correct_pandas(dataframe, x_series, y_series, x_cutoff = 1.0, y_cutoff = 1.0, ignore_null_values = False):
'''A little (<6%) slower than fraction_correct due to the data extraction overhead.'''
return fraction_correct(dataframe[x_series].values.tolist(), dataframe[y_series].values.tolist(), x_cutoff = x_cutoff, y_cutoff = y_cutoff, ignore_null_values = ignore_null_values) | 0.040506 |
def delete(self, request, bot_id, id, format=None):
"""
Delete existing state
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(StateDetail, self).delete(request, bot_id, id, format) | 0.007042 |
def _validate_changeset(self, changeset_id: uuid.UUID) -> None:
"""
Checks to be sure the changeset is known by the journal
"""
if not self.journal.has_changeset(changeset_id):
raise ValidationError("Changeset not found in journal: {0}".format(
str(changeset_id)
)) | 0.005935 |
def generateAPIRootBody(self):
'''
Generates the root library api file's body text. The method calls
:func:`~exhale.graph.ExhaleRoot.gerrymanderNodeFilenames` first to enable proper
internal linkage between reStructuredText documents. Afterward, it calls
:func:`~exhale.graph.ExhaleRoot.generateViewHierarchies` followed by
:func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI` to generate both
hierarchies as well as the full API listing. As a result, three files will now
be ready:
1. ``self.class_hierarchy_file``
2. ``self.file_hierarchy_file``
3. ``self.unabridged_api_file``
These three files are then *included* into the root library file. The
consequence of using an ``include`` directive is that Sphinx will complain about
these three files never being included in any ``toctree`` directive. These
warnings are expected, and preferred to using a ``toctree`` because otherwise
the user would have to click on the class view link from the ``toctree`` in
order to see it. This behavior has been acceptable for me so far, but if it
is causing you problems please raise an issue on GitHub and I may be able to
conditionally use a ``toctree`` if you really need it.
'''
try:
self.gerrymanderNodeFilenames()
self.generateViewHierarchies()
self.generateUnabridgedAPI()
with codecs.open(self.full_root_file_path, "a", "utf-8") as generated_index:
# Include the class and file hierarchies
generated_index.write(".. include:: {0}\n\n".format(
os.path.basename(self.class_hierarchy_file)
))
generated_index.write(".. include:: {0}\n\n".format(
os.path.basename(self.file_hierarchy_file)
))
# Add the afterHierarchyDescription if provided
if configs.afterHierarchyDescription:
generated_index.write(
"\n{0}\n\n".format(configs.afterHierarchyDescription)
)
# Include the unabridged API
generated_index.write(".. include:: {0}\n\n".format(
os.path.basename(self.unabridged_api_file)
))
# Add the afterBodySummary if provided
if configs.afterBodySummary:
generated_index.write(
"\n{0}\n\n".format(configs.afterBodySummary)
)
# The following should only be applied to the page library root page
# Applying it to other pages will result in an error
if self.use_tree_view and configs.treeViewIsBootstrap:
generated_index.write(textwrap.dedent('''
.. raw:: html
<script type="text/javascript">
/* NOTE: if you are reading this, Exhale generated this directly. */
$(document).ready(function() {{
/* Inspired by very informative answer to get color of links:
https://stackoverflow.com/a/2707837/3814202 */
var $fake_link = $('<a href="#"></a>').hide().appendTo("body");
var linkColor = $fake_link.css("color");
$fake_link.remove();
var $fake_p = $('<p class="{icon_mimic}"></p>').hide().appendTo("body");
var iconColor = $fake_p.css("color");
$fake_p.remove();
/* After much deliberation, using JavaScript directly to enforce that the
* link and glyphicon receive different colors is fruitless, because the
* bootstrap treeview library will overwrite the style every time. Instead,
* leaning on the library code itself to append some styling to the head,
* I choose to mix a couple of things:
*
* 1. Set the `color` property of bootstrap treeview globally, this would
* normally affect the color of both the link text and the icon.
* 2. Apply custom forced styling of the glyphicon itself in order to make
* it a little more clear to the user (via different colors) that the
* act of clicking the icon and the act of clicking the link text perform
* different actions. The icon expands, the text navigates to the page.
*/
// Part 1: use linkColor as a parameter to bootstrap treeview
// apply the class view hierarchy
$("#{class_idx}").treeview({{
data: {class_func_name}(),
enableLinks: true,
color: linkColor,
showTags: {show_tags},
collapseIcon: "{collapse_icon}",
expandIcon: "{expand_icon}",
levels: {levels},
onhoverColor: "{onhover_color}"
}});
// apply the file view hierarchy
$("#{file_idx}").treeview({{
data: {file_func_name}(),
enableLinks: true,
color: linkColor,
showTags: {show_tags},
collapseIcon: "{collapse_icon}",
expandIcon: "{expand_icon}",
levels: {levels},
onhoverColor: "{onhover_color}"
}});
// Part 2: override the style of the glyphicons by injecting some CSS
$('<style type="text/css" id="exhaleTreeviewOverride">' +
' .treeview span[class~=icon] {{ ' +
' color: ' + iconColor + ' ! important;' +
' }}' +
'</style>').appendTo('head');
}});
</script>
'''.format(
icon_mimic=configs.treeViewBootstrapIconMimicColor,
class_idx=configs._class_hierarchy_id,
class_func_name=configs._bstrap_class_hierarchy_fn_data_name,
file_idx=configs._file_hierarchy_id,
file_func_name=configs._bstrap_file_hierarchy_fn_data_name,
show_tags="true" if configs.treeViewBootstrapUseBadgeTags else "false",
collapse_icon=configs.treeViewBootstrapCollapseIcon,
expand_icon=configs.treeViewBootstrapExpandIcon,
levels=configs.treeViewBootstrapLevels,
onhover_color=configs.treeViewBootstrapOnhoverColor
)))
except:
utils.fancyError(
"Unable to create the root api body: [{0}]".format(self.full_root_file_path)
) | 0.004789 |
def sentence_starts(self):
"""The list of start positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.starts(SENTENCES) | 0.0131 |
def _parse_leaves(self, leaves) -> List[Tuple[str, int]]:
"""Returns a list of pairs (leaf_name, distance)"""
return [(self._leaf_name(leaf), 0) for leaf in leaves] | 0.011111 |
def cumsum(item_list, initial=0):
""" python cumsum
Args:
item_list (list): list of numbers or items supporting addition
initial (value): initial zero value
Returns:
list: list of accumulated values
References:
stackoverflow.com/questions/9258602/elegant-pythonic-cumsum
CommandLine:
python -m utool.util_alg cumsum
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 3, 4, 5]
>>> initial = 0
>>> result = cumsum(item_list, initial)
>>> assert result == [1, 3, 6, 10, 15]
>>> print(result)
>>> item_list = zip([1, 2, 3, 4, 5])
>>> initial = tuple()
>>> result2 = cumsum(item_list, initial)
>>> assert result2 == [(1,), (1, 2), (1, 2, 3), (1, 2, 3, 4), (1, 2, 3, 4, 5)]
>>> print(result2)
"""
def accum(acc, itm):
return op.iadd(acc, [acc[-1] + itm])
return reduce(accum, item_list, [initial])[1:] | 0.000949 |
def attach_temporary_file(self, service_desk_id, filename):
"""
Create temporary attachment, which can later be converted into permanent attachment
:param service_desk_id: str
:param filename: str
:return: Temporary Attachment ID
"""
headers = {'X-Atlassian-Token': 'no-check', 'X-ExperimentalApi': 'opt-in'}
url = 'rest/servicedeskapi/servicedesk/{}/attachTemporaryFile'.format(service_desk_id)
with open(filename, 'rb') as file:
result = self.post(url, headers=headers, files={'file': file}).get('temporaryAttachments')
temp_attachment_id = result[0].get('temporaryAttachmentId')
return temp_attachment_id | 0.008357 |
def _minimize_neldermead(
func,
x0,
args=(),
callback=None,
xtol=1e-4,
ftol=1e-4,
maxiter=None,
maxfev=None,
disp=False,
return_all=False,
): # pragma: no cover
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options for the Nelder-Mead algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
"""
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
rank = len(x0.shape)
if not -1 < rank < 2:
raise ValueError("Initial guess must be a scalar or rank-1 sequence.")
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
if rank == 0:
sim = numpy.zeros((N + 1,), dtype=x0.dtype)
else:
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt) * y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while fcalls[0] < maxfun and iterations < maxiter:
if (
numpy.max(numpy.ravel(numpy.abs((sim[1:] - sim[0]) / sim[0])))
<= xtol
and numpy.max(numpy.abs((fsim[0] - fsim[1:]) / fsim[0])) <= ftol
):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message["maxfev"]
if disp:
print("Warning: " + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message["maxiter"]
if disp:
print("Warning: " + msg)
else:
msg = _status_message["success"]
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(
fun=fval,
nit=iterations,
nfev=fcalls[0],
status=warnflag,
success=(warnflag == 0),
message=msg,
x=x,
)
if retall:
result["allvecs"] = allvecs
return result | 0.000207 |
def delete(filething):
"""Remove tags from a file.
Args:
filething (filething)
Raises:
mutagen.MutagenError
"""
dsf_file = DSFFile(filething.fileobj)
if dsf_file.dsd_chunk.offset_metdata_chunk != 0:
id3_location = dsf_file.dsd_chunk.offset_metdata_chunk
dsf_file.dsd_chunk.offset_metdata_chunk = 0
dsf_file.dsd_chunk.write()
filething.fileobj.seek(id3_location)
filething.fileobj.truncate() | 0.00211 |
def _check_view_permission(self, view):
"""
:param view: a :class:`ObjectView` class or instance
"""
security = get_service("security")
return security.has_permission(current_user, view.permission, self.obj) | 0.008097 |
def add_data(self, *args):
"""Add data to signer"""
for data in args:
self._data.append(to_binary(data)) | 0.015152 |
def line(self, x1,y1,x2,y2):
"Draw a line"
self._out(sprintf('%.2f %.2f m %.2f %.2f l S',x1*self.k,(self.h-y1)*self.k,x2*self.k,(self.h-y2)*self.k)) | 0.060976 |
def imag(self):
"""Imaginary part"""
return self.__class__.create(self.term.imag, *self.ranges) | 0.018018 |
def calculate_sunrise_sunset(locator, calc_date=datetime.utcnow()):
"""calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
}
"""
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
latitude, longitude = locator_to_latlong(locator)
if type(calc_date) != datetime:
raise ValueError
sun = ephem.Sun()
home = ephem.Observer()
home.lat = str(latitude)
home.long = str(longitude)
home.date = calc_date
sun.compute(home)
try:
nextrise = home.next_rising(sun)
nextset = home.next_setting(sun)
home.horizon = '-6'
beg_twilight = home.next_rising(sun, use_center=True)
end_twilight = home.next_setting(sun, use_center=True)
morning_dawn = beg_twilight.datetime()
sunrise = nextrise.datetime()
evening_dawn = nextset.datetime()
sunset = end_twilight.datetime()
#if sun never sets or rises (e.g. at polar circles)
except ephem.AlwaysUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
except ephem.NeverUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
result = {}
result['morning_dawn'] = morning_dawn
result['sunrise'] = sunrise
result['evening_dawn'] = evening_dawn
result['sunset'] = sunset
if morning_dawn:
result['morning_dawn'] = morning_dawn.replace(tzinfo=UTC)
if sunrise:
result['sunrise'] = sunrise.replace(tzinfo=UTC)
if evening_dawn:
result['evening_dawn'] = evening_dawn.replace(tzinfo=UTC)
if sunset:
result['sunset'] = sunset.replace(tzinfo=UTC)
return result | 0.003323 |
def exp(self):
""" Returns the exponent of the quaternion.
(not tested)
"""
# Init
vecNorm = self.x**2 + self.y**2 + self.z**2
wPart = np.exp(self.w)
q = Quaternion()
# Calculate
q.w = wPart * np.cos(vecNorm)
q.x = wPart * self.x * np.sin(vecNorm) / vecNorm
q.y = wPart * self.y * np.sin(vecNorm) / vecNorm
q.z = wPart * self.z * np.sin(vecNorm) / vecNorm
return q | 0.013889 |
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
if not self.embed:
raise ValueError('Cannot compute bounds of non-embedded GeoJSON.')
data = json.loads(self.data)
if 'features' not in data.keys():
# Catch case when GeoJSON is just a single Feature or a geometry.
if not (isinstance(data, dict) and 'geometry' in data.keys()):
# Catch case when GeoJSON is just a geometry.
data = {'type': 'Feature', 'geometry': data}
data = {'type': 'FeatureCollection', 'features': [data]}
bounds = [[None, None], [None, None]]
for feature in data['features']:
for point in iter_points(feature.get('geometry', {}).get('coordinates', {})): # noqa
bounds = [
[
none_min(bounds[0][0], point[1]),
none_min(bounds[0][1], point[0]),
],
[
none_max(bounds[1][0], point[1]),
none_max(bounds[1][1], point[0]),
],
]
return bounds | 0.001526 |
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules | 0.003559 |
def call_command(self, cmd, *argv):
"""
Runs a command.
:param cmd: command to run (key at the registry)
:param argv: arguments that would be passed to the command
"""
parser = self.get_parser()
args = [cmd] + list(argv)
namespace = parser.parse_args(args)
self.run_command(namespace) | 0.005602 |
def mavfmt(field):
'''work out the struct format for a type'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'B',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
if field.array_length:
if field.type in ['char', 'int8_t', 'uint8_t']:
return str(field.array_length)+'s'
return str(field.array_length)+map[field.type]
return map[field.type] | 0.020668 |
def _parse_day(optval):
''' Parse a --day argument '''
isreq = not optval.startswith('+')
if not isreq:
optval = optval[1:]
try:
retnval = []
unit = None
for val in optval.split(','):
if not val:
raise ValueError
if val[-1].isdigit():
newunit = 'dayofmonth' if isreq else 'day'
if unit is None:
unit = newunit
elif newunit != unit:
raise ValueError
retnval.append(int(val))
else:
newunit = 'dayofweek'
if unit is None:
unit = newunit
elif newunit != unit:
raise ValueError
weekday = Cron._parse_weekday(val)
if weekday is None:
raise ValueError
retnval.append(weekday)
if len(retnval) == 0:
raise ValueError
except ValueError:
return None, None
if len(retnval) == 1:
retnval = retnval[0]
return unit, retnval | 0.001595 |
def import_transcript_from_fs(edx_video_id, language_code, file_name, provider, resource_fs, static_dir):
"""
Imports transcript file from file system and creates transcript record in DS.
Arguments:
edx_video_id (str): Video id of the video.
language_code (unicode): Language code of the requested transcript.
file_name (unicode): File name of the transcript file.
provider (unicode): Transcript provider.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file.
"""
file_format = None
transcript_data = get_video_transcript_data(edx_video_id, language_code)
# First check if transcript record does not exist.
if not transcript_data:
# Read file from import file system and attach it to transcript record in DS.
try:
with resource_fs.open(combine(static_dir, file_name), 'rb') as f:
file_content = f.read()
file_content = file_content.decode('utf-8-sig')
except ResourceNotFound as exc:
# Don't raise exception in case transcript file is not found in course OLX.
logger.warn(
'[edx-val] "%s" transcript "%s" for video "%s" is not found.',
language_code,
file_name,
edx_video_id
)
return
except UnicodeDecodeError:
# Don't raise exception in case transcript contains non-utf8 content.
logger.warn(
'[edx-val] "%s" transcript "%s" for video "%s" contains a non-utf8 file content.',
language_code,
file_name,
edx_video_id
)
return
# Get file format from transcript content.
try:
file_format = get_transcript_format(file_content)
except Error as ex:
# Don't raise exception, just don't create transcript record.
logger.warn(
'[edx-val] Error while getting transcript format for video=%s -- language_code=%s --file_name=%s',
edx_video_id,
language_code,
file_name
)
return
# Create transcript record.
create_video_transcript(
video_id=edx_video_id,
language_code=language_code,
file_format=file_format,
content=ContentFile(file_content),
provider=provider
) | 0.003185 |
def find_closest_in_list(number, array, direction="both", strictly=False):
"""
Find the closest number in the array from x.
Parameters
----------
number : float
The number.
array : list
The list to look in.
direction : str
"both" for smaller or greater, "greater" for only greater numbers and "smaller" for the closest smaller.
strictly : bool
False for stricly superior or inferior or True for including equal.
Returns
----------
closest : int
The closest number in the array.
Example
----------
>>> import neurokit as nk
>>> nk.find_closest_in_list(1.8, [3, 5, 6, 1, 2])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
"""
if direction == "both":
closest = min(array, key=lambda x:abs(x-number))
if direction == "smaller":
if strictly is True:
closest = max(x for x in array if x < number)
else:
closest = max(x for x in array if x <= number)
if direction == "greater":
if strictly is True:
closest = min(filter(lambda x: x > number, array))
else:
closest = min(filter(lambda x: x >= number, array))
return(closest) | 0.002326 |
def _add_none_handler(validation_callable, # type: Callable
none_policy # type: int
):
# type: (...) -> Callable
"""
Adds a wrapper or nothing around the provided validation_callable, depending on the selected policy
:param validation_callable:
:param none_policy: an int representing the None policy, see NonePolicy
:return:
"""
if none_policy is NonePolicy.SKIP:
return _none_accepter(validation_callable) # accept all None values
elif none_policy is NonePolicy.FAIL:
return _none_rejecter(validation_callable) # reject all None values
elif none_policy is NonePolicy.VALIDATE:
return validation_callable # do not handle None specifically, do not wrap
else:
raise ValueError('Invalid none_policy : ' + str(none_policy)) | 0.003432 |
def _relativeize(self, filename):
"""Return the portion of a filename that is 'relative'
to the directories in this lookup.
"""
filename = posixpath.normpath(filename)
for dir in self.directories:
if filename[0:len(dir)] == dir:
return filename[len(dir):]
else:
return None | 0.005464 |
def every(self, step, offset=0):
"""
Create a new collection consisting of every n-th element.
:param step: The step size
:type step: int
:param offset: The start offset
:type offset: int
:rtype: Collection
"""
new = []
for position, item in enumerate(self.items):
if position % step == offset:
new.append(item)
return self.__class__(new) | 0.004357 |
def _indirect_jump_unresolved(self, jump):
"""
Called when we cannot resolve an indirect jump.
:param IndirectJump jump: The unresolved indirect jump.
:return: None
"""
# add a node from this node to UnresolvableJumpTarget or UnresolvalbeCallTarget node,
# depending on its jump kind
src_node = self._nodes[jump.addr]
if jump.jumpkind == 'Ijk_Boring':
unresolvable_target_addr = self._unresolvable_jump_target_addr
simprocedure_name = 'UnresolvableJumpTarget'
elif jump.jumpkind == 'Ijk_Call':
unresolvable_target_addr = self._unresolvable_call_target_addr
simprocedure_name = 'UnresolvableCallTarget'
else:
raise AngrCFGError('It should be impossible')
dst_node = CFGNode(unresolvable_target_addr, 0, self.model,
function_address=unresolvable_target_addr,
simprocedure_name=simprocedure_name,
block_id=unresolvable_target_addr,
)
# add the dst_node to self._nodes
if unresolvable_target_addr not in self._nodes:
self._nodes[unresolvable_target_addr] = dst_node
self._nodes_by_addr[unresolvable_target_addr].append(dst_node)
self._graph_add_edge(dst_node, src_node, jump.jumpkind, jump.ins_addr, jump.stmt_idx)
# mark it as a jumpout site for that function
self._function_add_transition_edge(unresolvable_target_addr, src_node, jump.func_addr,
to_outside=True,
dst_func_addr=unresolvable_target_addr,
ins_addr=jump.ins_addr,
stmt_idx=jump.stmt_idx,
)
self._deregister_analysis_job(jump.func_addr, jump)
CFGBase._indirect_jump_unresolved(self, jump) | 0.002973 |
def short_key():
"""
Generate a short key.
>>> key = short_key()
>>> len(key)
5
"""
firstlast = list(ascii_letters + digits)
middle = firstlast + list('-_')
return ''.join((
choice(firstlast), choice(middle), choice(middle),
choice(middle), choice(firstlast),
)) | 0.003135 |
def critical(self, message, *args, **kwargs):
"""Log critical event.
Compatible with logging.critical signature.
"""
self.system.critical(message, *args, **kwargs) | 0.010204 |
def get_project_details(project):
""" Get details for this user. """
result = []
for datastore in _get_datastores():
value = datastore.get_project_details(project)
value['datastore'] = datastore.config['DESCRIPTION']
result.append(value)
return result | 0.003436 |
def make_heading_affiliations(self, heading_div):
"""
Makes the content for the Author Affiliations, displays after the
Authors segment in the Heading.
Metadata element, content derived from FrontMatter
"""
#Get all of the aff element tuples from the metadata
affs = self.article.root.xpath('./front/article-meta/aff')
#Create a list of all those pertaining to the authors
author_affs = [i for i in affs if 'aff' in i.attrib['id']]
#Count them, used for formatting
if len(author_affs) == 0:
return None
else:
affs_list = etree.SubElement(heading_div,
'ul',
{'id': 'affiliations',
'class': 'simple'})
for aff in author_affs:
#Create a span element to accept extracted content
aff_item = etree.SubElement(affs_list, 'li')
aff_item.attrib['id'] = aff.attrib['id']
#Get the first label node and the first addr-line node
label = aff.find('label')
addr_line = aff.find('addr-line')
if label is not None:
bold = etree.SubElement(aff_item, 'b')
bold.text = all_text(label) + ' '
if addr_line is not None:
append_new_text(aff_item, all_text(addr_line))
else:
append_new_text(aff_item, all_text(aff)) | 0.004617 |
def removeidfobject(self, idfobject):
"""Remove an IDF object from the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove.
"""
key = idfobject.key.upper()
self.idfobjects[key].remove(idfobject) | 0.006873 |
def write_traceback(logger=None, exc_info=None):
"""
Write the latest traceback to the log.
This should be used inside an C{except} block. For example:
try:
dostuff()
except:
write_traceback(logger)
Or you can pass the result of C{sys.exc_info()} to the C{exc_info}
parameter.
"""
if exc_info is None:
exc_info = sys.exc_info()
typ, exception, tb = exc_info
traceback = "".join(_traceback_no_io.format_exception(typ, exception, tb))
_writeTracebackMessage(logger, typ, exception, traceback) | 0.001709 |
def createSensorToClassifierLinks(network, sensorRegionName,
classifierRegionName):
"""Create required links from a sensor region to a classifier region."""
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="actValueOut", destInput="actValueIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn") | 0.008224 |
def can_view(self, user):
"""
Returns True if user has permission to render this view.
At minimum this requires an active staff user. If the required_groups
attribute is not empty then the user must be a member of at least one
of those groups. If there are no required groups set for the view but
required groups are set for the bundle then the user must be a member
of at least one of those groups. If there are no groups to check this
will return True.
"""
if user.is_staff and user.is_active:
if user.is_superuser:
return True
elif self.required_groups:
return self._user_in_groups(user, self.required_groups)
elif self.bundle.required_groups:
return self._user_in_groups(user, self.bundle.required_groups)
else:
return True
return False | 0.002121 |
def makePickle(self, record):
"""
Use JSON.
"""
#ei = record.exc_info
#if ei:
# dummy = self.format(record) # just to get traceback text into record.exc_text
# record.exc_info = None # to avoid Unpickleable error
s = '%s%s:%i:%s\n' % (self.prefix, record.name, int(record.created), self.format(record))
#if ei:
# record.exc_info = ei # for next handler
return s | 0.015217 |
def _parseBlockDevice(self, block_device):
"""
Parse a higher-level view of the block device mapping into something
novaclient wants. This should be similar to how Horizon presents it.
Required keys:
device_name: The name of the device; e.g. vda or xda.
source_type: image, snapshot, volume, or blank/None.
destination_type: Destination of block device: volume or local.
delete_on_termination: True/False.
uuid: The image, snapshot, or volume id.
boot_index: Integer used for boot order.
volume_size: Size of the device in GiB.
"""
client_block_device = {}
client_block_device['device_name'] = block_device.get(
'device_name', 'vda')
client_block_device['source_type'] = block_device.get(
'source_type', 'image')
client_block_device['destination_type'] = block_device.get(
'destination_type', 'volume')
client_block_device['delete_on_termination'] = bool(
block_device.get('delete_on_termination', True))
client_block_device['uuid'] = block_device['uuid']
client_block_device['boot_index'] = int(
block_device.get('boot_index', 0))
# Allow None here. It will be rendered later.
client_block_device['volume_size'] = block_device.get('volume_size')
return client_block_device | 0.001392 |
def assert_checked_checkbox(self, value):
"""Assert the checkbox with label (recommended), name or id is checked."""
check_box = find_field(world.browser, 'checkbox', value)
assert check_box, "Cannot find checkbox '{}'.".format(value)
assert check_box.is_selected(), "Check box should be selected." | 0.003185 |
def build_b(self, scattering_fraction=0.01833):
"""Calculates the total scattering from back-scattering
:param scattering_fraction: the fraction of back-scattering to total scattering default = 0.01833
b = ( bb[sea water] + bb[p] ) /0.01833
"""
lg.info('Building b with scattering fraction of :: ' + str(scattering_fraction))
self.b = (self.b_b + self.b_water / 2.0) / scattering_fraction | 0.009132 |
def list_queues(self, prefix=None, num_results=None, include_metadata=False,
marker=None, timeout=None):
'''
Returns a generator to list the queues. The generator will lazily follow
the continuation tokens returned by the service and stop when all queues
have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
queues, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str prefix:
Filters the results to return only queues with names that begin
with the specified prefix.
:param int num_results:
The maximum number of queues to return.
:param bool include_metadata:
Specifies that container metadata be returned in the response.
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
'''
include = 'metadata' if include_metadata else None
kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include,
'marker': marker, 'timeout': timeout}
resp = self._list_queues(**kwargs)
return ListGenerator(resp, self._list_queues, (), kwargs) | 0.011923 |
def _handleDecodeHextileSubrectsColoured(self, block, bg, color, subrects, x, y, width, height, tx, ty, tw, th):
"""subrects with their own color"""
sz = self.bypp + 2
pos = 0
end = len(block)
while pos < end:
pos2 = pos + self.bypp
color = block[pos:pos2]
xy = ord(block[pos2])
wh = ord(block[pos2+1])
sx = xy >> 4
sy = xy & 0xf
sw = (wh >> 4) + 1
sh = (wh & 0xf) + 1
self.fillRectangle(tx + sx, ty + sy, sw, sh, color)
pos += sz
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty) | 0.004511 |
def ensure_one_opt(opt, parser, opt_list):
""" Check that one and only one in the opt_list is defined in opt
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
opt_list : list of strings
"""
the_one = None
for name in opt_list:
attr = name[2:].replace('-', '_')
if hasattr(opt, attr) and (getattr(opt, attr) is not None):
if the_one is None:
the_one = name
else:
parser.error("%s and %s are mutually exculsive" \
% (the_one, name))
if the_one is None:
parser.error("you must supply one of the following %s" \
% (', '.join(opt_list))) | 0.006468 |
def update_from_dict(self, keywords):
"""Set properties of metadata using key and value from keywords
:param keywords: A dictionary of keywords (key, value).
:type keywords: dict
"""
for key, value in list(keywords.items()):
setattr(self, key, value) | 0.006579 |
def create_linked_data_element(self, url, kind, id=None, # pylint: disable=W0622
relation=None, title=None):
"""
Returns a new linked data element for the given url and kind.
:param str url: URL to assign to the linked data element.
:param str kind: kind of the resource that is linked. One of the
constantes defined by :class:`everest.constants.RESOURCE_KINDS`.
:returns: object implementing :class:`ILinkedDataElement`.
"""
mp = self.__mp_reg.find_or_create_mapping(Link)
return mp.data_element_class.create(url, kind, id=id,
relation=relation, title=title) | 0.007013 |
def match_name(self, in_string, fuzzy=False):
"""Match a color to a sRGB value.
The matching will be based purely on the input string and the color names in the
registry. If there's no direct hit, a fuzzy matching algorithm is applied. This method
will never fail to return a sRGB value, but depending on the score, it might or might
not be a sensible result – as a rule of thumb, any score less then 90 indicates that
there's a lot of guessing going on. It's the callers responsibility to judge if the return
value should be trusted.
In normalization terms, this method implements "normalize an arbitrary color name
to a sRGB value".
Args:
in_string (string): The input string containing something resembling
a color name.
fuzzy (bool, optional): Try fuzzy matching if no exact match was found.
Defaults to ``False``.
Returns:
A named tuple with the members `hex_code` and `score`.
Raises:
ValueError: If ``fuzzy`` is ``False`` and no match is found
Examples:
>>> tint_registry = TintRegistry()
>>> tint_registry.match_name("rather white", fuzzy=True)
MatchResult(hex_code=u'ffffff', score=95)
"""
in_string = _normalize(in_string)
if in_string in self._hex_by_color:
return MatchResult(self._hex_by_color[in_string], 100)
if not fuzzy:
raise ValueError("No match for %r found." % in_string)
# We want the standard scorer *plus* the set scorer, because colors are often
# (but not always) related by sub-strings
color_names = self._hex_by_color.keys()
set_match = dict(fuzzywuzzy.process.extract(
in_string,
color_names,
scorer=fuzzywuzzy.fuzz.token_set_ratio
))
standard_match = dict(fuzzywuzzy.process.extract(in_string, color_names))
# This would be much easier with a collections.Counter, but alas! it's a 2.7 feature.
key_union = set(set_match) | set(standard_match)
counter = ((n, set_match.get(n, 0) + standard_match.get(n, 0)) for n in key_union)
color_name, score = sorted(counter, key=operator.itemgetter(1))[-1]
return MatchResult(self._hex_by_color[color_name], score / 2) | 0.005476 |
def save(self, **edgeArgs) :
"""Works like Document's except that you must specify '_from' and '_to' vertices before.
There's also a links() function especially for first saves."""
if not getattr(self, "_from") or not getattr(self, "_to") :
raise AttributeError("You must specify '_from' and '_to' attributes before saving. You can also use the function 'links()'")
payload = self._store.getStore()
payload["_from"] = self._from
payload["_to"] = self._to
Document._save(self, payload, **edgeArgs) | 0.010601 |
def bind(self, source=None, destination=None, node=None,
edge_title=None, edge_label=None, edge_color=None, edge_weight=None,
point_title=None, point_label=None, point_color=None, point_size=None):
"""Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b
"""
res = copy.copy(self)
res._source = source or self._source
res._destination = destination or self._destination
res._node = node or self._node
res._edge_title = edge_title or self._edge_title
res._edge_label = edge_label or self._edge_label
res._edge_color = edge_color or self._edge_color
res._edge_weight = edge_weight or self._edge_weight
res._point_title = point_title or self._point_title
res._point_label = point_label or self._point_label
res._point_color = point_color or self._point_color
res._point_size = point_size or self._point_size
return res | 0.003624 |
def _update_frozencell(self, frozen):
"""Updates frozen cell widget
Parameters
----------
frozen: Bool or string
\tUntoggled iif False
"""
toggle_state = frozen is not False
self.ToggleTool(wx.FONTFLAG_MASK, toggle_state) | 0.006897 |
def add_future(
self,
future: "Union[Future[_T], concurrent.futures.Future[_T]]",
callback: Callable[["Future[_T]"], None],
) -> None:
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
This method only accepts `.Future` objects and not other
awaitables (unlike most of Tornado where the two are
interchangeable).
"""
if isinstance(future, Future):
# Note that we specifically do not want the inline behavior of
# tornado.concurrent.future_add_done_callback. We always want
# this callback scheduled on the next IOLoop iteration (which
# asyncio.Future always does).
#
# Wrap the callback in self._run_callback so we control
# the error logging (i.e. it goes to tornado.log.app_log
# instead of asyncio's log).
future.add_done_callback(
lambda f: self._run_callback(functools.partial(callback, future))
)
else:
assert is_future(future)
# For concurrent futures, we use self.add_callback, so
# it's fine if future_add_done_callback inlines that call.
future_add_done_callback(
future, lambda f: self.add_callback(callback, future)
) | 0.002778 |
def divine_format(text):
"""Guess the format of the notebook, based on its content #148"""
try:
nbformat.reads(text, as_version=4)
return 'ipynb'
except nbformat.reader.NotJSONError:
pass
lines = text.splitlines()
for comment in ['', '#'] + _COMMENT_CHARS:
metadata, _, _, _ = header_to_metadata_and_cell(lines, comment)
ext = metadata.get('jupytext', {}).get('text_representation', {}).get('extension')
if ext:
return ext[1:] + ':' + guess_format(text, ext)[0]
# No metadata, but ``` on at least one line => markdown
for line in lines:
if line == '```':
return 'md'
return 'py:' + guess_format(text, '.py')[0] | 0.002759 |
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out)) | 0.015794 |
def get_parent_objects(self, context):
"""Return all objects of the same type from the parent object
"""
parent_object = api.get_parent(context)
portal_type = api.get_portal_type(context)
return parent_object.objectValues(portal_type) | 0.007299 |
def trends_place(self, woeid, exclude=None):
"""
Returns recent Twitter trends for the specified WOEID. If
exclude == 'hashtags', Twitter will remove hashtag trends from the
response.
"""
url = 'https://api.twitter.com/1.1/trends/place.json'
params = {'id': woeid}
if exclude:
params['exclude'] = exclude
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.info("no region matching WOEID %s", woeid)
raise e
return resp.json() | 0.003012 |
def build_from_corpus(cls,
corpus_generator,
target_vocab_size,
max_subword_length=20,
max_corpus_chars=None,
reserved_tokens=None):
"""Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note that memory
and compute scale quadratically in the length of the longest token.
max_corpus_chars: `int`, the maximum number of characters to consume from
`corpus_generator` for the purposes of building the subword vocabulary.
reserved_tokens: `list<str>`, list of tokens that will always be treated
as whole tokens and not split up. Note that these must contain a mix of
alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end
in an underscore.
Returns:
`SubwordTextEncoder`.
"""
reserved_tokens = reserved_tokens or []
_validate_build_arguments(
max_subword_length=max_subword_length,
reserved_tokens=reserved_tokens,
target_vocab_size=target_vocab_size)
token_counts = _token_counts_from_generator(
generator=corpus_generator,
max_chars=max_corpus_chars,
reserved_tokens=reserved_tokens)
# Binary search on the minimum token count to build a vocabulary with
# approximately the right size
def _binary_search(min_token_count, max_token_count):
"""Binary search min_token_count to build SubwordTextEncoder vocab."""
candidate_min = (min_token_count + max_token_count) // 2
logging.info("SubwordTextEncoder build: trying min_token_count %d",
candidate_min)
encoder = cls._build_from_token_counts(
token_counts=token_counts,
min_token_count=candidate_min,
reserved_tokens=reserved_tokens,
num_iterations=4,
max_subword_length=max_subword_length)
vocab_size = encoder.vocab_size
# Being within 1% of the target vocab size is ok
target_achieved = (
abs(vocab_size - target_vocab_size) * 100 < target_vocab_size)
if (target_achieved or min_token_count >= max_token_count or
candidate_min <= 1):
# Search complete
return encoder
# Recurse
if vocab_size > target_vocab_size:
next_encoder = _binary_search(candidate_min + 1, max_token_count)
else:
next_encoder = _binary_search(min_token_count, candidate_min - 1)
# Return the one that's closest to the target_vocab_size
if (abs(vocab_size - target_vocab_size) <
abs(next_encoder.vocab_size - target_vocab_size)):
return encoder
else:
return next_encoder
# Get min and max token counts.
min_token_count = max(min(token_counts.values()), 1)
max_token_count = max(token_counts.values())
# Another option could be to do a binary search over *ranks* of the tokens.
return _binary_search(min_token_count, max_token_count) | 0.006773 |
async def send(self, metric):
"""Transform metric to JSON bytestring and send to server.
Args:
metric (dict): Complete metric to send as JSON.
"""
message = json.dumps(metric).encode('utf-8')
await self.loop.create_datagram_endpoint(
lambda: UDPClientProtocol(message),
remote_addr=(self.ip, self.port)) | 0.005263 |
def git_clone(git_url, path):
"""Clone git repository at $git_url to $path."""
if os.path.exists(os.path.join(path, '.git')):
# get rid of local repo if it already exists
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
print('Start cloning from {}…'.format(git_url))
git_proc = subprocess.Popen(['git', 'clone', git_url, path],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env={'GIT_TERMINAL_PROMPT': '0'})
try:
stdoutmsg, stderrmsg = git_proc.communicate(timeout=120)
except subprocess.TimeoutExpired:
git_proc.kill()
stderrmsg = b'Timed out.'
if git_proc.returncode == 0:
print('Cloned {}.'.format(git_url))
else:
print('Error cloning from {}:\n{}'.format(git_url,
stderrmsg.decode('utf-8'))) | 0.00106 |
def skill_configuration(self):
# type: () -> SkillConfiguration
"""Create the skill configuration object using the registered
components.
"""
skill_config = super(StandardSkillBuilder, self).skill_configuration
skill_config.api_client = DefaultApiClient()
if self.table_name is not None:
kwargs = {"table_name": self.table_name} # type: Dict[str, Any]
if self.auto_create_table:
kwargs["create_table"] = self.auto_create_table
if self.partition_keygen:
kwargs["partition_keygen"] = self.partition_keygen
if self.dynamodb_client:
kwargs["dynamodb_resource"] = self.dynamodb_client
skill_config.persistence_adapter = DynamoDbAdapter(**kwargs)
return skill_config | 0.003584 |
def findinfiles_callback(self):
"""Find in files callback"""
widget = QApplication.focusWidget()
if not self.ismaximized:
self.dockwidget.setVisible(True)
self.dockwidget.raise_()
text = ''
try:
if widget.has_selected_text():
text = widget.get_selected_text()
except AttributeError:
# This is not a text widget deriving from TextEditBaseWidget
pass
self.findinfiles.set_search_text(text)
if text:
self.findinfiles.find() | 0.003407 |
def get_required_arguments_metricnames(self):
"""
Helper function to get metricname arguments.
Notice that it is get_argument"s" variation, which means that this can be repeated.
Raises exception if argument is missing.
Returns a list of metricname arguments
"""
try:
metricnames = self.get_arguments(constants.PARAM_METRICNAME)
if not metricnames:
raise tornado.web.MissingArgumentError(constants.PARAM_METRICNAME)
return metricnames
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) | 0.010435 |
def publish(ctx, test=False, force=False, draft=False):
""" Publish the project.
:param bool test: Publishes to PyPi test server (defaults to False)
:param bool force: Skip version check (defaults to False)
:param bool draft: Sample publish (has no effect) (defaults to False)
"""
previous_version = get_previous_version(ctx)
current_version = parver.Version.parse(metadata["version"])
if current_version <= previous_version and not force:
error_message = (
f"current version ({current_version!s}) is <= to previous version "
f"({previous_version!s}), use 'package.version' to update current version"
)
report.error(ctx, "publish", error_message)
raise ValueError(error_message)
report.info(ctx, "publish", f"publishing project {ctx.metadata['name']!r}")
report.warning(
ctx,
"publish",
f"drafting publish for project {ctx.metadata['name']!r} (has no effect)",
)
commit_message = f"Release {current_version!s}"
report.info(ctx, "publish", f"git commiting release {commit_message!r}")
git_commit_command = f"git commit -asm {commit_message!r}"
if not draft:
ctx.run(git_commit_command)
tag_content = get_tag_content(ctx).replace('"', '\\"')
git_tag_command = (
f'git tag -a "v{current_version!s}" -m '
f'"Version {current_version!s}\n\n{tag_content}"'
)
report.info(
ctx, "publish", f"git tagging commit as release for version {current_version!s}"
)
if not draft:
ctx.run(git_tag_command)
artifact_paths = [f"{_.as_posix()!r}" for _ in get_artifact_paths(ctx)]
for artifact_path in artifact_paths:
report.debug(ctx, "publish", f"publishing artifact {artifact_path}")
publish_command = f"twine upload {' '.join(artifact_paths)}"
if test:
publish_command += " --repository 'https://test.pypi.org/legacy/'"
# get user to confirm publish
try:
input(
report._get_text(
ctx,
"success",
"publish",
"about to publish, [Enter] to continue, [Ctrl-C] to abort: ",
)
)
while True:
(username, password) = get_username_password(
ctx, "PyPi Username: ", "PyPi Password: "
)
# TODO: check if username and password are valid before tyring to post
report.info(ctx, "publish", f"publishing project {ctx.metadata['name']!s}")
if not draft:
publish_command += f" -u {username!r} -p {password!r}"
publish_result = ctx.run(publish_command, warn=True)
if publish_result.exited:
report.error(
ctx,
"publish",
f"failed to publish {ctx.metadata['name']!s} (retrying)",
)
continue
break
git_push_command = "git push --tags"
report.info(ctx, "publish", f"pushing git tags")
if not draft:
ctx.run(git_push_command)
except KeyboardInterrupt:
print()
report.error(ctx, "publish", "aborting publish!")
git_remove_tag_command = f"git tag -d {current_version!s}"
report.warn(ctx, "publish", "removing git tags")
if not draft:
ctx.run(git_remove_tag_command)
git_reset_command = f"git reset --soft HEAD^"
report.warn(ctx, "publish", "softly reseting commit")
if not draft:
ctx.run(git_reset_command) | 0.001932 |
def get_return_operation_by_id(cls, return_operation_id, **kwargs):
"""Find ReturnOperation
Return single instance of ReturnOperation by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_return_operation_by_id(return_operation_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_operation_id: ID of returnOperation to return (required)
:return: ReturnOperation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_return_operation_by_id_with_http_info(return_operation_id, **kwargs)
else:
(data) = cls._get_return_operation_by_id_with_http_info(return_operation_id, **kwargs)
return data | 0.006091 |
def impute_dataframe_zero(df_impute):
"""
Replaces all ``NaNs``, ``-infs`` and ``+infs`` from the DataFrame `df_impute` with 0s.
The `df_impute` will be modified in place. All its columns will be into converted into dtype ``np.float64``.
:param df_impute: DataFrame to impute
:type df_impute: pandas.DataFrame
:return df_impute: imputed DataFrame
:rtype df_impute: pandas.DataFrame
"""
df_impute.replace([np.PINF, np.NINF], 0, inplace=True)
df_impute.fillna(0, inplace=True)
# Ensure a type of "np.float64"
df_impute.astype(np.float64, copy=False)
return df_impute | 0.004839 |
def add_xmlid(ctx, record, xmlid, noupdate=False):
""" Add a XMLID on an existing record """
try:
ref_id, __, __ = ctx.env['ir.model.data'].xmlid_lookup(xmlid)
except ValueError:
pass # does not exist, we'll create a new one
else:
return ctx.env['ir.model.data'].browse(ref_id)
if '.' in xmlid:
module, name = xmlid.split('.')
else:
module = ''
name = xmlid
return ctx.env['ir.model.data'].create({
'name': name,
'module': module,
'model': record._name,
'res_id': record.id,
'noupdate': noupdate,
}) | 0.001613 |
def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None):
"""Generate a DownloadJob that actually triggers a file download."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
base_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/{}'.format(base_url, filename)
local_file = os.path.join(directory, filename)
full_symlink = None
if symlink_path is not None:
full_symlink = os.path.join(symlink_path, filename)
# Keep metadata around
mtable = metadata.get()
mtable.add(entry, local_file)
return DownloadJob(full_url, local_file, expected_checksum, full_symlink) | 0.002759 |
def get_introspection_data(cls, tax_benefit_system):
"""
Get instrospection data about the code of the variable.
:returns: (comments, source file path, source code, start line number)
:rtype: tuple
"""
comments = inspect.getcomments(cls)
# Handle dynamically generated variable classes or Jupyter Notebooks, which have no source.
try:
absolute_file_path = inspect.getsourcefile(cls)
except TypeError:
source_file_path = None
else:
source_file_path = absolute_file_path.replace(tax_benefit_system.get_package_metadata()['location'], '')
try:
source_lines, start_line_number = inspect.getsourcelines(cls)
source_code = textwrap.dedent(''.join(source_lines))
except (IOError, TypeError):
source_code, start_line_number = None, None
return comments, source_file_path, source_code, start_line_number | 0.004107 |
def get_owner_names_value(self, obj):
"""Extract owners' names."""
return [
self._get_user(user)
for user in get_users_with_permission(obj, get_full_perm('owner', obj))
] | 0.013761 |
def parse_fields(fields, as_dict=False):
'''
Given a list of fields (or several other variants of the same),
return back a consistent, normalized form of the same.
To forms are currently supported:
dictionary form: dict 'key' is the field name
and dict 'value' is either 1 (include)
or 0 (exclude).
list form (other): list values are field names to be included
If fields passed is one of the following values, it will be assumed
the user wants to include all fields and thus, we return an empty
dict or list to indicate this, accordingly:
* all fields: ['~', None, False, True, {}, []]
'''
_fields = {}
if fields in ['~', None, False, True, {}, []]:
# all these signify 'all fields'
_fields = {}
elif isinstance(fields, dict):
_fields.update(
{unicode(k).strip(): int(v) for k, v in fields.iteritems()})
elif isinstance(fields, basestring):
_fields.update({unicode(s).strip(): 1 for s in fields.split(',')})
elif isinstance(fields, (list, tuple)):
_fields.update({unicode(s).strip(): 1 for s in fields})
else:
raise ValueError("invalid fields value")
if as_dict:
return _fields
else:
return sorted(_fields.keys()) | 0.000742 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.