text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def save(self, doc):
"""Save a doc to cache
"""
self.log.debug('save()')
self.docs.append(doc)
self.commit() | 0.013514 |
def plot_2d_single(x, y, pdffilename, **kwargs):
"""
Do make_2d_single_plot and pass all arguments
args:
x: array_like
xdata
y: array_like
ydata
filepath: string
filepath of pdf to save
**kwargs:
figure_options: passed to matplotlib.pyplot.figure
xlabel_options: dict
kwargs passed in set_xlabel
ylabel_options: dict
kwargs passed in set_ylabel
suptitle_options: dict
kwargs passed in figure.suptitle
title_options: dict
kwargs passed in set_title
scilimits: tuple
if number outside this limits, will use scientific notation
errors: dictionary, array_like, scalar
dictionary: {"xerr": xerr, "yerr": yerr}
array_like, scalar: yerr
fmt: string, default="k."
line format
bestfitfmt: string, default="k-"
bestfit line format
bestfit: BestFit child class
eg. bestfit.polyfit.PolyFit, bestfit.logfit.LogFit
bestfitlim: tuple, default=None
xlim for bestfit line
suptitle: string, default=xlim
suptitle of pdf plot, formatted with outputdict
suptitle_fontsize: int, default=15
font size of suptitle
title: string, default=None
title of the pdf plot
title_fontsize: int, default=12
font size of title, formatted with outputdict
xlabel: string, default=None
label of string xlabel, formatted with outputdict
ylabel: string, default=None
label of string ylabel, formatted with outputdict
xlim: tuple, default=None
xlim
ylim: tuple, default=None
ylim
outputdict: dictionary, default=None
pass keys and arguments for formatting and
to output
"""
pdffilepath = DataSets.get_pdffilepath(pdffilename)
plotsingle2d = PlotSingle2D(x, y, pdffilepath, **kwargs)
return plotsingle2d.plot() | 0.000813 |
def set_left_table(self, left_table=None):
"""
Sets the left table for this join clause. If no table is specified, the first table
in the query will be used
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>` or None
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query.
"""
if left_table:
self.left_table = TableFactory(
table=left_table,
owner=self.owner,
)
else:
self.left_table = self.get_left_table() | 0.008333 |
def get_nameserver_detail_output_show_nameserver_nameserver_portsymb(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_nameserver_detail = ET.Element("get_nameserver_detail")
config = get_nameserver_detail
output = ET.SubElement(get_nameserver_detail, "output")
show_nameserver = ET.SubElement(output, "show-nameserver")
nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid")
nameserver_portid_key.text = kwargs.pop('nameserver_portid')
nameserver_portsymb = ET.SubElement(show_nameserver, "nameserver-portsymb")
nameserver_portsymb.text = kwargs.pop('nameserver_portsymb')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006242 |
def endpoint_list(auth=None, **kwargs):
'''
List endpoints
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_list
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.list_endpoints(**kwargs) | 0.00361 |
def plot_scatter(f, xs, ys, size, pch, colour, title):
"""
Form a complex number.
Arguments:
f -- comma delimited file w/ x,y coordinates
xs -- if f not specified this is a file w/ x coordinates
ys -- if f not specified this is a filew / y coordinates
size -- size of the plot
pch -- shape of the points (any character)
colour -- colour of the points
title -- title of the plot
"""
cs = None
if f:
if isinstance(f, str):
with open(f) as fh:
data = [tuple(line.strip().split(',')) for line in fh]
else:
data = [tuple(line.strip().split(',')) for line in f]
xs = [float(i[0]) for i in data]
ys = [float(i[1]) for i in data]
if len(data[0]) > 2:
cs = [i[2].strip() for i in data]
elif isinstance(xs, list) and isinstance(ys, list):
pass
else:
with open(xs) as fh:
xs = [float(str(row).strip()) for row in fh]
with open(ys) as fh:
ys = [float(str(row).strip()) for row in fh]
_plot_scatter(xs, ys, size, pch, colour, title, cs) | 0.000864 |
def restart(self, instance_id):
"""restarts a paused instance.
:param str instance_id: instance identifier
:return: None
"""
try:
if not self._paused:
log.debug("node %s is not paused, can't restart", instance_id)
return
self._paused = False
result = self._subscription._sms.start_role(
service_name=self._cloud_service._name,
deployment_name=self._cloud_service._name,
role_name=instance_id)
self._subscription._wait_result(result)
except Exception as exc:
log.error('error restarting instance %s: %s', instance_id, exc)
raise
log.debug('restarted instance(instance_id=%s)', instance_id) | 0.002506 |
def disable_multicolor(self):
""" swap from the multicolor image to the single color image """
# disable the multicolor image
for color in ['red', 'green', 'blue']:
self.multicolorscales[color].config(state=tk.DISABLED, bg='grey')
self.multicolorframes[color].config(bg='grey')
self.multicolorlabels[color].config(bg='grey')
self.multicolordropdowns[color].config(bg='grey', state=tk.DISABLED)
self.multicolorminscale[color].config(bg='grey', state=tk.DISABLED)
self.multicolormaxscale[color].config(bg='grey', state=tk.DISABLED)
# enable the single color
self.singlecolorscale.config(state=tk.NORMAL, bg=self.single_color_theme)
self.singlecolorframe.config(bg=self.single_color_theme)
self.singlecolorlabel.config(bg=self.single_color_theme)
self.singlecolordropdown.config(bg=self.single_color_theme, state=tk.NORMAL)
self.singlecolorminscale.config(bg=self.single_color_theme, state=tk.NORMAL)
self.singlecolormaxscale.config(bg=self.single_color_theme, state=tk.NORMAL) | 0.006211 |
def profile_bins(self):
""" The binning to use to do the profile fitting
"""
log_mean = np.log10(self.mean())
log_half_width = max(5. * self.sigma(), 3.)
# Default is to profile over +-5 sigma,
# centered on mean, using 100 bins
return np.logspace(log_mean - log_half_width,
log_mean + log_half_width, 101)/self._j_ref | 0.004988 |
def getPostStates(self):
'''
Slightly extends the base version of this method by recalculating aLvlNow to account for the
consumer's (potential) misperception about their productivity level.
Parameters
----------
None
Returns
-------
None
'''
RepAgentConsumerType.getPostStates(self)
self.aLvlNow = self.mLvlTrue - self.cLvlNow # This is true
self.aNrmNow = self.aLvlNow/self.pLvlTrue | 0.008163 |
def orientation(point_p, point_q, point_r):
"""
To find orientation of ordered triplet (p, q, r).
:param point_p:
:type point_p: models.Point
:param point_q:
:type point_q: models.Point
:param point_r:
:type point_r: models.Point
:return: 0: p, q and r are colinear
1: clockwise
2: counterclockwise
:rtype: int
"""
# Set https://www.geeksforgeeks.org/orientation-3-ordered-points/
# for details of below formula.
r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -
(point_q.x - point_p.x) * (point_r.y - point_q.y))
if r == 0:
return 0
return 1 if r > 0 else 2 | 0.001486 |
def index():
"""Display a list of all user institutes."""
institute_objs = user_institutes(store, current_user)
institutes_count = ((institute_obj, store.cases(collaborator=institute_obj['_id']).count())
for institute_obj in institute_objs if institute_obj)
return dict(institutes=institutes_count) | 0.005917 |
async def main():
"""Run."""
async with ClientSession() as websession:
try:
client = Client(websession)
await client.load_local('<IP ADDRESS>', '<PASSWORD>', websession)
for controller in client.controllers.values():
print('CLIENT INFORMATION')
print('Name: {0}'.format(controller.name))
print('MAC Address: {0}'.format(controller.mac))
print('API Version: {0}'.format(controller.api_version))
print(
'Software Version: {0}'.format(
controller.software_version))
print(
'Hardware Version: {0}'.format(
controller.hardware_version))
# Work with diagnostics:
print()
print('RAINMACHINE DIAGNOSTICS')
data = await controller.diagnostics.current()
print('Uptime: {0}'.format(data['uptime']))
print('Software Version: {0}'.format(data['softwareVersion']))
# Work with parsers:
print()
print('RAINMACHINE PARSERS')
for parser in await controller.parsers.current():
print(parser['name'])
# Work with programs:
print()
print('ALL PROGRAMS')
for program in await controller.programs.all(
include_inactive=True):
print(
'Program #{0}: {1}'.format(
program['uid'], program['name']))
print()
print('PROGRAM BY ID')
program_1 = await controller.programs.get(1)
print(
"Program 1's Start Time: {0}".format(
program_1['startTime']))
print()
print('NEXT RUN TIMES')
for program in await controller.programs.next():
print(
'Program #{0}: {1}'.format(
program['pid'], program['startTime']))
print()
print('RUNNING PROGRAMS')
for program in await controller.programs.running():
print('Program #{0}'.format(program['uid']))
print()
print('STARTING PROGRAM #1')
print(await controller.programs.start(1))
await asyncio.sleep(3)
print()
print('STOPPING PROGRAM #1')
print(await controller.programs.stop(1))
# Work with provisioning:
print()
print('PROVISIONING INFO')
name = await controller.provisioning.device_name
print('Device Name: {0}'.format(name))
settings = await controller.provisioning.settings()
print(
'Database Path: {0}'.format(
settings['system']['databasePath']))
print(
'Station Name: {0}'.format(
settings['location']['stationName']))
wifi = await controller.provisioning.wifi()
print('IP Address: {0}'.format(wifi['ipAddress']))
# Work with restrictions:
print()
print('RESTRICTIONS')
current = await controller.restrictions.current()
print(
'Rain Delay Restrictions: {0}'.format(
current['rainDelay']))
universal = await controller.restrictions.universal()
print(
'Freeze Protect: {0}'.format(
universal['freezeProtectEnabled']))
print('Hourly Restrictions:')
for restriction in await controller.restrictions.hourly():
print(restriction['name'])
raindelay = await controller.restrictions.raindelay()
print(
'Rain Delay Counter: {0}'.format(
raindelay['delayCounter']))
# Work with restrictions:
print()
print('STATS')
today = await controller.stats.on_date(
date=datetime.date.today())
print('Min for Today: {0}'.format(today['mint']))
for day in await controller.stats.upcoming(details=True):
print('{0} Min: {1}'.format(day['day'], day['mint']))
# Work with watering:
print()
print('WATERING')
for day in await controller.watering.log(
date=datetime.date.today()):
print(
'{0} duration: {1}'.format(
day['date'], day['realDuration']))
queue = await controller.watering.queue()
print('Current Queue: {0}'.format(queue))
print('Runs:')
for watering_run in await controller.watering.runs(
date=datetime.date.today()):
print(
'{0} ({1})'.format(
watering_run['dateTime'], watering_run['et0']))
print()
print('PAUSING ALL WATERING FOR 30 SECONDS')
print(await controller.watering.pause_all(30))
await asyncio.sleep(3)
print()
print('UNPAUSING WATERING')
print(await controller.watering.unpause_all())
print()
print('STOPPING ALL WATERING')
print(await controller.watering.stop_all())
# Work with zones:
print()
print('ALL ACTIVE ZONES')
for zone in await controller.zones.all(details=True):
print(
'Zone #{0}: {1} (soil: {2})'.format(
zone['uid'], zone['name'], zone['soil']))
print()
print('ZONE BY ID')
zone_1 = await controller.zones.get(1, details=True)
print(
"Zone 1's Name: {0} (soil: {1})".format(
zone_1['name'], zone_1['soil']))
print()
print('STARTING ZONE #1 FOR 3 SECONDS')
print(await controller.zones.start(1, 3))
await asyncio.sleep(3)
print()
print('STOPPING ZONE #1')
print(await controller.zones.stop(1))
except RainMachineError as err:
print(err) | 0.000146 |
def read_dataset_schema(schema_path: str) -> Dict[str, List[TableColumn]]:
"""
Reads a schema from the text2sql data, returning a dictionary
mapping table names to their columns and respective types.
This handles columns in an arbitrary order and also allows
either ``{Table, Field}`` or ``{Table, Field} Name`` as headers,
because both appear in the data. It also uppercases table and
column names if they are not already uppercase.
Parameters
----------
schema_path : ``str``, required.
The path to the csv schema.
Returns
-------
A dictionary mapping table names to typed columns.
"""
schema: Dict[str, List[TableColumn]] = defaultdict(list)
for i, line in enumerate(open(schema_path, "r")):
if i == 0:
header = [x.strip() for x in line.split(",")]
elif line[0] == "-":
continue
else:
data = {key: value for key, value in zip(header, [x.strip() for x in line.split(",")])}
table = data.get("Table Name", None) or data.get("Table")
column = data.get("Field Name", None) or data.get("Field")
is_primary_key = data.get("Primary Key") == "y"
schema[table.upper()].append(TableColumn(column.upper(), data["Type"], is_primary_key))
return {**schema} | 0.002249 |
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False | 0.011142 |
def _check_holiday_structure(self, times):
""" To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
"""
if not isinstance(times, list):
raise TypeError("an list is required")
for time in times:
if not isinstance(time, tuple):
raise TypeError("a tuple is required")
if len(time) > 5:
raise TypeError("Target time takes at most 5 arguments"
" ('%d' given)" % len(time))
if len(time) < 5:
raise TypeError("Required argument '%s' (pos '%d')"
" not found" % (TIME_LABEL[len(time)], len(time)))
self._check_time_format(TIME_LABEL, time) | 0.003363 |
def add(self, action=None, subject=None, **conditions):
"""
Add ability are allowed using two arguments.
The first one is the action you're setting the permission for,
the second one is the class of object you're setting it on.
the third one is the subject's conditions must be matches or a function
to be test.
self.add('update', Article)
self.add('update', Article, user_id=1)
self.add('update', Article, user_id=1, title='hello')
self.add('update', Article, function=test_title)
"""
self.add_rule(Rule(True, action, subject, **conditions)) | 0.003125 |
def svds_descending(M, k):
'''
In contrast to MATLAB, numpy's svds() arranges the singular
values in ascending order. In order to have matching codes,
we wrap it around by a function which re-sorts the singular
values and singular vectors.
Args:
M: 2D numpy array; the matrix whose SVD is to be computed.
k: Number of singular values to be computed.
Returns:
u, s, vt = svds(M, k=k)
'''
u, s, vt = svds(M, k=k)
# reverse columns of u
u = u[:, ::-1]
# reverse s
s = s[::-1]
# reverse rows of vt
vt = vt[::-1, :]
return u, np.diag(s), vt.T | 0.001548 |
def _insert_stack(stack, sample_count, call_tree):
"""Inserts stack into the call tree.
Args:
stack: Call stack.
sample_count: Sample count of call stack.
call_tree: Call tree.
"""
curr_level = call_tree
for func in stack:
next_level_index = {
node['stack']: node for node in curr_level['children']}
if func not in next_level_index:
new_node = {'stack': func, 'children': [], 'sampleCount': 0}
curr_level['children'].append(new_node)
curr_level = new_node
else:
curr_level = next_level_index[func]
curr_level['sampleCount'] = sample_count | 0.002706 |
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res | 0.008696 |
def wait_until(what, times=-1):
"""Wait until `what` return True
Args:
what (Callable[bool]): Call `wait()` again and again until it returns True
times (int): Maximum times of trials before giving up
Returns:
True if success, False if times threshold reached
"""
while times:
logger.info('Waiting times left %d', times)
try:
if what() is True:
return True
except:
logger.exception('Wait failed')
else:
logger.warning('Trial[%d] failed', times)
times -= 1
time.sleep(1)
return False | 0.004739 |
def parse_time(t):
"""
Parse string time format to microsecond
"""
if isinstance(t, (str, unicode)):
b = re_time.match(t)
if b:
v, unit = int(b.group(1)), b.group(2)
if unit == 's':
return v*1000
elif unit == 'm':
return v*60*1000
elif unit == 'h':
return v*60*60*1000
else:
return v
else:
raise TimeFormatError(t)
elif isinstance(t, (int, long)):
return t
else:
raise TimeFormatError(t) | 0.001704 |
def update_option_set_by_id(cls, option_set_id, option_set, **kwargs):
"""Update OptionSet
Update attributes of OptionSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_option_set_by_id(option_set_id, option_set, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to update. (required)
:param OptionSet option_set: Attributes of optionSet to update. (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
else:
(data) = cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
return data | 0.005786 |
def copy_rec(source, dest):
"""Copy files between diferent directories.
Copy one or more files to an existing directory. This function is
recursive, if the source is a directory, all its subdirectories are created
in the destination. Existing files in destination are overwrited without
any warning.
Args:
source (str): File or directory name.
dest (str): Directory name.
Raises:
FileNotFoundError: Destination directory doesn't exist.
"""
if os.path.isdir(source):
for child in os.listdir(source):
new_dest = os.path.join(dest, child)
os.makedirs(new_dest, exist_ok=True)
copy_rec(os.path.join(source, child), new_dest)
elif os.path.isfile(source):
logging.info(' Copy "{}" to "{}"'.format(source, dest))
shutil.copy(source, dest)
else:
logging.info(' Ignoring "{}"'.format(source)) | 0.001081 |
def main():
"""
Testing function for PDA - DFA Diff Operation
"""
if len(argv) < 2:
print 'Usage: '
print ' Get A String %s CFG_fileA FST_fileB' % argv[0]
return
alphabet = createalphabet()
cfgtopda = CfgPDA(alphabet)
print '* Parsing Grammar:',
mma = cfgtopda.yyparse(argv[1])
print 'OK'
flex_a = Flexparser(alphabet)
print '* Parsing Regex:',
mmb = flex_a.yyparse(argv[2])
print mmb
print 'OK'
print '* Minimize Automaton:',
mmb.minimize()
print 'OK'
print mmb
print '* Diff:',
ops = PdaDiff(mma, mmb, alphabet)
mmc = ops.diff()
print 'OK'
print '* Get String:',
print ops.get_string() | 0.002736 |
def register_finders():
"""Register finders necessary for PEX to function properly."""
# If the previous finder is set, then we've already monkeypatched, so skip.
global __PREVIOUS_FINDER
if __PREVIOUS_FINDER:
return
# save previous finder so that it can be restored
previous_finder = _get_finder(zipimport.zipimporter)
assert previous_finder, 'This appears to be using an incompatible setuptools.'
# Enable finding zipped wheels.
pkg_resources.register_finder(
zipimport.zipimporter, ChainedFinder.of(pkg_resources.find_eggs_in_zip, find_wheels_in_zip))
# append the wheel finder
_add_finder(pkgutil.ImpImporter, find_wheels_on_path)
if importlib_machinery is not None:
_add_finder(importlib_machinery.FileFinder, find_wheels_on_path)
__PREVIOUS_FINDER = previous_finder | 0.019584 |
def block_uid(value: Union[str, BlockUID, None]) -> BlockUID:
"""
Convert value to BlockUID instance
:param value: Value to convert
:return:
"""
if isinstance(value, BlockUID):
return value
elif isinstance(value, str):
return BlockUID.from_str(value)
elif value is None:
return BlockUID.empty()
else:
raise TypeError("Cannot convert {0} to BlockUID".format(type(value))) | 0.002278 |
def get_pod_container(self,
volume_mounts,
persistence_outputs=None,
persistence_data=None,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
secret_refs=None,
configmap_refs=None,
env_vars=None,
command=None,
args=None,
resources=None,
ports=None,
ephemeral_token=None):
"""Pod job container for task."""
self._pod_container_checks()
# Env vars preparations
env_vars = to_list(env_vars, check_none=True)
env_vars += self._get_container_pod_env_vars(
persistence_outputs=persistence_outputs,
persistence_data=persistence_data,
outputs_refs_jobs=outputs_refs_jobs,
outputs_refs_experiments=outputs_refs_experiments,
ephemeral_token=ephemeral_token
)
env_vars += get_resources_env_vars(resources=resources)
# Env from configmap and secret refs
env_from = get_pod_env_from(secret_refs=secret_refs, configmap_refs=configmap_refs)
def get_ports():
_ports = to_list(ports) if ports else []
return [client.V1ContainerPort(container_port=port) for port in _ports] or None
return client.V1Container(name=self.job_container_name,
image=self.job_docker_image,
command=command,
args=args,
ports=get_ports(),
env=env_vars,
env_from=env_from,
resources=get_resources(resources),
volume_mounts=volume_mounts) | 0.008569 |
def pa11y_counts(results):
"""
Given a list of pa11y results, return three integers:
number of errors, number of warnings, and number of notices.
"""
num_error = 0
num_warning = 0
num_notice = 0
for result in results:
if result['type'] == 'error':
num_error += 1
elif result['type'] == 'warning':
num_warning += 1
elif result['type'] == 'notice':
num_notice += 1
return num_error, num_warning, num_notice | 0.002 |
def set_input_by_number(self, number, value):
"""
Set the value of form element by its number in the form
:param number: number of element
:param value: value which should be set to element
"""
sel = XpathSelector(self.form)
elem = sel.select('.//input[@type="text"]')[number].node()
return self.set_input(elem.get('name'), value) | 0.005051 |
def get_previous_version(version: str) -> Optional[str]:
"""
Returns the version prior to the given version.
:param version: A string with the version number.
:return: A string with the previous version number
"""
debug('get_previous_version')
found_version = False
for commit_hash, commit_message in get_commit_log():
debug('checking commit {}'.format(commit_hash))
if version in commit_message:
found_version = True
debug('found_version in "{}"'.format(commit_message))
continue
if found_version:
matches = re.match(r'v?(\d+.\d+.\d+)', commit_message)
if matches:
debug('version matches', commit_message)
return matches.group(1).strip()
return get_last_version([version, 'v{}'.format(version)]) | 0.001175 |
def get_model_id_constraints(model):
"""Returns constraints to target a specific model."""
pkname = model.primary_key_name
pkey = model.primary_key
return get_id_constraints(pkname, pkey) | 0.025641 |
def greedy_trails(subg, odds, verbose):
'''Greedily select trails by making the longest you can until the end'''
if verbose:
print('\tCreating edge map')
edges = defaultdict(list)
for x,y in subg.edges():
edges[x].append(y)
edges[y].append(x)
if verbose:
print('\tSelecting trails')
trails = []
for x in subg.nodes():
if verbose > 2:
print('\t\tNode {0}'.format(x))
while len(edges[x]) > 0:
y = edges[x][0]
trail = [(x,y)]
edges[x].remove(y)
edges[y].remove(x)
while len(edges[y]) > 0:
x = y
y = edges[y][0]
trail.append((x,y))
edges[x].remove(y)
edges[y].remove(x)
trails.append(trail)
return trails | 0.004711 |
def _dump_field(self, fd):
"""Dump single field.
"""
v = {}
v['label'] = Pbd.LABELS[fd.label]
v['type'] = fd.type_name if len(fd.type_name) > 0 else Pbd.TYPES[fd.type]
v['name'] = fd.name
v['number'] = fd.number
v['default'] = '[default = {}]'.format(fd.default_value) if len(fd.default_value) > 0 else ''
f = '{label} {type} {name} = {number} {default};'.format(**v)
f = ' '.join(f.split())
self._print(f)
if len(fd.type_name) > 0:
self.uses.append(fd.type_name) | 0.010187 |
def convert(image, shape, gray=False, dtype='float64', normalize='max'):
"""Convert image to standardized format.
Several properties of the input image may be changed including the shape,
data type and maximal value of the image. In addition, this function may
convert the image into an ODL object and/or a gray scale image.
"""
image = image.astype(dtype)
if gray:
image[..., 0] *= 0.2126
image[..., 1] *= 0.7152
image[..., 2] *= 0.0722
image = np.sum(image, axis=2)
if shape is not None:
image = skimage.transform.resize(image, shape, mode='constant')
image = image.astype(dtype)
if normalize == 'max':
image /= image.max()
elif normalize == 'sum':
image /= image.sum()
else:
assert False
return image | 0.001206 |
def substitute(arg, value, replacement=None, else_=None):
"""
Substitute (replace) one or more values in a value expression
Parameters
----------
value : expr-like or dict
replacement : expr-like, optional
If an expression is passed to value, this must be passed
else_ : expr, optional
Returns
-------
replaced : case statement (for now!)
"""
expr = arg.case()
if isinstance(value, dict):
for k, v in sorted(value.items()):
expr = expr.when(k, v)
else:
expr = expr.when(value, replacement)
if else_ is not None:
expr = expr.else_(else_)
else:
expr = expr.else_(arg)
return expr.end() | 0.001416 |
def url(self, endpoint):
"""
Returns full URL for specified API endpoint
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> translate.url("langs")
'https://translate.yandex.net/api/v1.5/tr.json/getLangs'
>>> translate.url("detect")
'https://translate.yandex.net/api/v1.5/tr.json/detect'
>>> translate.url("translate")
'https://translate.yandex.net/api/v1.5/tr.json/translate'
"""
return self.api_url.format(version=self.api_version,
endpoint=self.api_endpoints[endpoint]) | 0.003231 |
def _set_session_type(self, v, load=False):
"""
Setter method for session_type, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_type (session-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_type() directly.
YANG Description: Session type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-type-none': {'value': 0}, u'merged-backup': {'value': 6}, u'ingress-detour': {'value': 1}, u'egress-backup': {'value': 7}, u'repaired-session': {'value': 8}, u'bypass-ingress': {'value': 9}, u'transit-detour': {'value': 2}, u'egress-detour': {'value': 4}, u'ingress-backup': {'value': 5}, u'merged-detour': {'value': 3}},), is_leaf=True, yang_name="session-type", rest_name="session-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """session_type must be of a type compatible with session-type""",
'defined-type': "brocade-mpls-operational:session-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-type-none': {'value': 0}, u'merged-backup': {'value': 6}, u'ingress-detour': {'value': 1}, u'egress-backup': {'value': 7}, u'repaired-session': {'value': 8}, u'bypass-ingress': {'value': 9}, u'transit-detour': {'value': 2}, u'egress-detour': {'value': 4}, u'ingress-backup': {'value': 5}, u'merged-detour': {'value': 3}},), is_leaf=True, yang_name="session-type", rest_name="session-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-type', is_config=False)""",
})
self.__session_type = t
if hasattr(self, '_set'):
self._set() | 0.003979 |
def _add_thread(self, aThread):
"""
Private method to add a thread object to the snapshot.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
## if not isinstance(aThread, Thread):
## if hasattr(aThread, '__class__'):
## typename = aThread.__class__.__name__
## else:
## typename = str(type(aThread))
## msg = "Expected Thread, got %s instead" % typename
## raise TypeError(msg)
dwThreadId = aThread.dwThreadId
## if dwThreadId in self.__threadDict:
## msg = "Already have a Thread object with ID %d" % dwThreadId
## raise KeyError(msg)
aThread.set_process(self)
self.__threadDict[dwThreadId] = aThread | 0.015248 |
def ecdsa_verify_raw(msg32, vrs, pub):
"""
Takes a message, the signature being verified and a pubkey
Returns 1 if signature is valid with given pubkey
"""
# assert len(vrs) == 3
if len(vrs) == 3:
return ecdsa_verify_compact(msg32, _encode_sig(*vrs), pub)
else:
return ecdsa_verify_compact(msg32, vrs, pub) | 0.002793 |
def CmdRegister(self, challenge_param, app_param):
"""Register security key.
Ask the security key to register with a particular origin & client.
Args:
challenge_param: Arbitrary 32 byte challenge string.
app_param: Arbitrary 32 byte applciation parameter.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: A Test of User Precense is required to proceed.
ApduError: Something went wrong on the device.
"""
self.logger.debug('CmdRegister')
if len(challenge_param) != 32 or len(app_param) != 32:
raise errors.InvalidRequestError()
body = bytearray(challenge_param + app_param)
response = self.InternalSendApdu(apdu.CommandApdu(
0,
apdu.CMD_REGISTER,
0x03, # Per the U2F reference code tests
0x00,
body))
response.CheckSuccessOrRaise()
return response.body | 0.001916 |
def accept_changes(self):
"""Accept changes"""
for (i, j), value in list(self.model.changes.items()):
self.data[i, j] = value
if self.old_data_shape is not None:
self.data.shape = self.old_data_shape | 0.007937 |
def decrypt(encrypted_privkey, passphrase):
"""BIP0038 non-ec-multiply decryption. Returns WIF privkey.
:param Base58 encrypted_privkey: Private key
:param str passphrase: UTF-8 encoded passphrase for decryption
:return: BIP0038 non-ec-multiply decrypted key
:rtype: Base58
:raises SaltException: if checksum verification failed (e.g. wrong
password)
"""
d = unhexlify(base58decode(encrypted_privkey))
d = d[2:] # remove trailing 0x01 and 0x42
flagbyte = d[0:1] # get flag byte
d = d[1:] # get payload
assert flagbyte == b"\xc0", "Flagbyte has to be 0xc0"
salt = d[0:4]
d = d[4:-4]
if SCRYPT_MODULE == "scrypt": # pragma: no cover
key = scrypt.hash(passphrase, salt, 16384, 8, 8)
elif SCRYPT_MODULE == "pylibscrypt": # pragma: no cover
key = scrypt.scrypt(bytes(passphrase, "utf-8"), salt, 16384, 8, 8)
else:
raise ValueError("No scrypt module loaded") # pragma: no cover
derivedhalf1 = key[0:32]
derivedhalf2 = key[32:64]
encryptedhalf1 = d[0:16]
encryptedhalf2 = d[16:32]
aes = AES.new(derivedhalf2, AES.MODE_ECB)
decryptedhalf2 = aes.decrypt(encryptedhalf2)
decryptedhalf1 = aes.decrypt(encryptedhalf1)
privraw = decryptedhalf1 + decryptedhalf2
privraw = "%064x" % (int(hexlify(privraw), 16) ^ int(hexlify(derivedhalf1), 16))
wif = Base58(privraw)
""" Verify Salt """
privkey = PrivateKey(format(wif, "wif"))
addr = format(privkey.bitcoin.address, "BTC")
a = _bytes(addr)
saltverify = hashlib.sha256(hashlib.sha256(a).digest()).digest()[0:4]
if saltverify != salt: # pragma: no cover
raise SaltException("checksum verification failed! Password may be incorrect.")
return wif | 0.001702 |
def from_httplib(cls, message, duplicates=('set-cookie',)): # Python 2
"""Read headers from a Python 2 httplib message object."""
ret = cls(message.items())
# ret now contains only the last header line for each duplicate.
# Importing with all duplicates would be nice, but this would
# mean to repeat most of the raw parsing already done, when the
# message object was created. Extracting only the headers of interest
# separately, the cookies, should be faster and requires less
# extra code.
for key in duplicates:
ret.discard(key)
for val in message.getheaders(key):
ret.add(key, val)
return ret | 0.005533 |
def writeGraph(grph, name, edgeInfo = True, typing = False, suffix = 'csv', overwrite = True, allSameAttribute = False):
"""Writes both the edge list and the node attribute list of _grph_ to files starting with _name_.
The output files start with _name_, the file type (edgeList, nodeAttributes) then if typing is True the type of graph (directed or undirected) then the suffix, the default is as follows:
>> name_fileType.suffix
Both files are csv's with comma delimiters and double quote quoting characters. The edge list has two columns for the source and destination of the edge, `'From'` and `'To'` respectively, then, if _edgeInfo_ is `True`, for each attribute of the node another column is created. The node list has one column call "ID" with the node ids used by networkx and all other columns are the node attributes.
To read back these files use [readGraph()](#metaknowledge.graphHelpers.readGraph) and to write only one type of lsit use [writeEdgeList()](#metaknowledge.graphHelpers.writeEdgeList) or [writeNodeAttributeFile()](#metaknowledge.graphHelpers.writeNodeAttributeFile).
**Warning**: this function will overwrite files, if they are in the way of the output, to prevent this set _overwrite_ to `False`
**Note**: If any nodes or edges are missing an attribute a `KeyError` will be raised.
# Parameters
_grph_ : `networkx Graph`
> A networkx graph of the network to be written.
_name_ : `str`
> The start of the file name to be written, can include a path.
_edgeInfo_ : `optional [bool]`
> Default `True`, if `True` the the attributes of each edge are written to the edge list.
_typing_ : `optional [bool]`
> Default `False`, if `True` the directed ness of the graph will be added to the file names.
_suffix_ : `optional [str]`
> Default `"csv"`, the suffix of the file.
_overwrite_ : `optional [bool]`
> Default `True`, if `True` files will be overwritten silently, otherwise an `OSError` exception will be raised.
"""
progArgs = (0, "Writing the graph to files starting with: {}".format(name))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if typing:
if isinstance(grph, nx.classes.digraph.DiGraph) or isinstance(grph, nx.classes.multidigraph.MultiDiGraph):
grphType = "_directed"
else:
grphType = "_undirected"
else:
grphType = ''
nameCompts = os.path.split(os.path.expanduser(os.path.normpath(name)))
if nameCompts[0] == '' and nameCompts[1] == '':
edgeListName = "edgeList"+ grphType + '.' + suffix
nodesAtrName = "nodeAttributes"+ grphType + '.' + suffix
elif nameCompts[0] == '':
edgeListName = nameCompts[1] + "_edgeList"+ grphType + '.' + suffix
nodesAtrName = nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix
elif nameCompts[1] == '':
edgeListName = os.path.join(nameCompts[0], "edgeList"+ grphType + '.' + suffix)
nodesAtrName = os.path.join(nameCompts[0], "nodeAttributes"+ grphType + '.' + suffix)
else:
edgeListName = os.path.join(nameCompts[0], nameCompts[1] + "_edgeList"+ grphType + '.' + suffix)
nodesAtrName = os.path.join(nameCompts[0], nameCompts[1] + "_nodeAttributes"+ grphType + '.' + suffix)
if not overwrite:
if os.path.isfile(edgeListName):
raise OSError(edgeListName+ " already exists")
if os.path.isfile(nodesAtrName):
raise OSError(nodesAtrName + " already exists")
writeEdgeList(grph, edgeListName, extraInfo = edgeInfo, allSameAttribute = allSameAttribute, _progBar = PBar)
writeNodeAttributeFile(grph, nodesAtrName, allSameAttribute = allSameAttribute, _progBar = PBar)
PBar.finish("{} nodes and {} edges written to file".format(len(grph.nodes()), len(grph.edges()))) | 0.012512 |
def action_list(self):
"Lists all hosts on the LB"
format = "%-35s %-25s %-8s"
print format % ("HOST", "ACTION", "SUBDOMS")
for host, details in sorted(self.client.get_all().items()):
if details[0] in ("proxy", "mirror"):
action = "%s<%s>" % (
details[0],
",".join(
"%s:%s" % (host, port)
for host, port in details[1]['backends']
)
)
elif details[0] == "static":
action = "%s<%s>" % (
details[0],
details[1]['type'],
)
elif details[0] == "redirect":
action = "%s<%s>" % (
details[0],
details[1]['redirect_to'],
)
elif details[0] == "empty":
action = "%s<%s>" % (
details[0],
details[1]['code'],
)
else:
action = details[0]
print format % (host, action, details[2]) | 0.001754 |
def fromMessage(klass, message, op_endpoint=UNUSED):
"""Construct me from an OpenID Message.
@param message: An OpenID check_authentication Message
@type message: L{openid.message.Message}
@returntype: L{CheckAuthRequest}
"""
self = klass.__new__(klass)
self.message = message
self.namespace = message.getOpenIDNamespace()
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
self.sig = message.getArg(OPENID_NS, 'sig')
if (self.assoc_handle is None or
self.sig is None):
fmt = "%s request missing required parameter from message %s"
raise ProtocolError(
message, text=fmt % (self.mode, message))
self.invalidate_handle = message.getArg(OPENID_NS, 'invalidate_handle')
self.signed = message.copy()
# openid.mode is currently check_authentication because
# that's the mode of this request. But the signature
# was made on something with a different openid.mode.
# http://article.gmane.org/gmane.comp.web.openid.general/537
if self.signed.hasKey(OPENID_NS, "mode"):
self.signed.setArg(OPENID_NS, "mode", "id_res")
return self | 0.00239 |
def remove_tag(tag_id):
'''
Delete the records of certain tag.
'''
entry = TabPost2Tag.delete().where(
TabPost2Tag.tag_id == tag_id
)
entry.execute() | 0.009569 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
C = self.COEFFS[imt]
mean = (
self._get_magnitude_term(C, rup.mag) +
self._get_distance_term(C, rup.mag, dists.rrup) +
self._get_style_of_faulting_term(C, rup.rake) +
self._get_site_response_term(C, sites.vs30))
stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
return mean, stddevs | 0.003241 |
def plot_elbo(self, figsize=(15,7)):
"""
Plots the ELBO progress (if present)
"""
import matplotlib.pyplot as plt
plt.figure(figsize=figsize)
plt.plot(self.elbo_records)
plt.xlabel("Iterations")
plt.ylabel("ELBO")
plt.show() | 0.010101 |
def position(self):
""" Returns an integer corresponding to the position of the post in the topic. """
position = self.topic.posts.filter(Q(created__lt=self.created) | Q(id=self.id)).count()
return position | 0.017391 |
def _add_axislabels(xlabel,ylabel):
"""
NAME:
_add_axislabels
PURPOSE:
add axis labels to the current figure
INPUT:
xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed
ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed
OUTPUT:
(none; works on the current axes)
HISTORY:
2009-12-23 - Written - Bovy (NYU)
"""
if xlabel != None:
if xlabel[0] != '$':
thisxlabel=r'$'+xlabel+'$'
else:
thisxlabel=xlabel
pyplot.xlabel(thisxlabel)
if ylabel != None:
if ylabel[0] != '$':
thisylabel=r'$'+ylabel+'$'
else:
thisylabel=ylabel
pyplot.ylabel(thisylabel) | 0.010568 |
def credit_card_expiration_date(self, minimum: int = 16,
maximum: int = 25) -> str:
"""Generate a random expiration date for credit card.
:param minimum: Date of issue.
:param maximum: Maximum of expiration_date.
:return: Expiration date of credit card.
:Example:
03/19.
"""
month = self.random.randint(1, 12)
year = self.random.randint(minimum, maximum)
return '{0:02d}/{1}'.format(month, year) | 0.005803 |
def is_course_complete(last_update):
"""
Determine is the course is likely to have been terminated or not.
We return True if the timestamp given by last_update is 30 days or older
than today's date. Otherwise, we return True.
The intended use case for this is to detect if a given courses has not
seen any update in the last 30 days or more. Otherwise, we return True,
since it is probably too soon to declare the course complete.
"""
rv = False
if last_update >= 0:
delta = time.time() - last_update
max_delta = total_seconds(datetime.timedelta(days=30))
if delta > max_delta:
rv = True
return rv | 0.001468 |
def lazy_constant(fn):
"""Decorator to make a function that takes no arguments use the LazyConstant class."""
class NewLazyConstant(LazyConstant):
@functools.wraps(fn)
def __call__(self):
return self.get_value()
return NewLazyConstant(fn) | 0.007143 |
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to an `EllipticalAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `EllipticalAnnulus` object
An `EllipticalAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return EllipticalAnnulus(**pixel_params) | 0.002663 |
def get_private_room_history(self, room_id, oldest=None, **kwargs):
"""
Get various history of specific private group in this case private
:param room_id:
:param kwargs:
:return:
"""
return GetPrivateRoomHistory(settings=self.settings, **kwargs).call(
room_id=room_id,
oldest=oldest,
**kwargs
) | 0.005063 |
def run(self, series, exponent=None):
'''
:type series: List
:type exponent: int
:rtype: float
'''
try:
return self.calculateHurst(series, exponent)
except Exception as e:
print(" Error: %s" % e) | 0.00722 |
def update(self, authorize_redirect_url=values.unset, company_name=values.unset,
deauthorize_callback_method=values.unset,
deauthorize_callback_url=values.unset, description=values.unset,
friendly_name=values.unset, homepage_url=values.unset,
permissions=values.unset):
"""
Update the ConnectAppInstance
:param unicode authorize_redirect_url: The URL to redirect the user to after authorization
:param unicode company_name: The company name to set for the Connect App
:param unicode deauthorize_callback_method: The HTTP method to use when calling deauthorize_callback_url
:param unicode deauthorize_callback_url: The URL to call to de-authorize the Connect App
:param unicode description: A description of the Connect App
:param unicode friendly_name: A string to describe the resource
:param unicode homepage_url: A public URL where users can obtain more information
:param ConnectAppInstance.Permission permissions: The set of permissions that your ConnectApp will request
:returns: Updated ConnectAppInstance
:rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance
"""
data = values.of({
'AuthorizeRedirectUrl': authorize_redirect_url,
'CompanyName': company_name,
'DeauthorizeCallbackMethod': deauthorize_callback_method,
'DeauthorizeCallbackUrl': deauthorize_callback_url,
'Description': description,
'FriendlyName': friendly_name,
'HomepageUrl': homepage_url,
'Permissions': serialize.map(permissions, lambda e: e),
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ConnectAppInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
) | 0.006432 |
def enable_encryption(self, output_key, input_key):
"""Enable encryption with the specified keys."""
self._chacha = chacha20.Chacha20Cipher(output_key, input_key) | 0.011236 |
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant.
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notna(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result | 0.001626 |
def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Run an ESXCLI command directly on the host or list of hosts.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
cmd_str
The ESXCLI command to run. Note: This should not include the ``-s``, ``-u``,
``-p``, ``-h``, ``--protocol``, or ``--portnumber`` arguments that are
frequently passed when using a bare ESXCLI command from the command line.
Those arguments are handled by this function via the other args and kwargs.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
esxi_hosts
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
CLI Example:
.. code-block:: bash
# Used for ESXi host connection information
salt '*' vsphere.esxcli_cmd my.esxi.host root bad-password \
'system coredump network get'
# Used for connecting to a vCenter Server
salt '*' vsphere.esxcli_cmd my.vcenter.location root bad-password \
'system coredump network get' esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
'''
ret = {}
if esxi_hosts:
if not isinstance(esxi_hosts, list):
raise CommandExecutionError('\'esxi_hosts\' must be a list.')
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd_str,
protocol=protocol, port=port,
esxi_host=esxi_host, credstore=credstore)
if response['retcode'] != 0:
ret.update({esxi_host: {'Error': response.get('stdout')}})
else:
ret.update({esxi_host: response})
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd_str,
protocol=protocol, port=port,
credstore=credstore)
if response['retcode'] != 0:
ret.update({host: {'Error': response.get('stdout')}})
else:
ret.update({host: response})
return ret | 0.003971 |
def p_operation_definition5(self, p):
"""
operation_definition : operation_type variable_definitions directives selection_set
"""
p[0] = self.operation_cls(p[1])(
selections=p[4],
variable_definitions=p[2],
directives=p[3],
) | 0.009967 |
def is_binary(data):
'''
Detects if the passed string of data is binary or text
'''
if not data or not isinstance(data, (six.string_types, six.binary_type)):
return False
if isinstance(data, six.binary_type):
if b'\0' in data:
return True
elif str('\0') in data:
return True
text_characters = ''.join([chr(x) for x in range(32, 127)] + list('\n\r\t\b'))
# Get the non-text characters (map each character to itself then use the
# 'remove' option to get rid of the text characters.)
if six.PY3:
if isinstance(data, six.binary_type):
import salt.utils.data
nontext = data.translate(None, salt.utils.data.encode(text_characters))
else:
trans = ''.maketrans('', '', text_characters)
nontext = data.translate(trans)
else:
if isinstance(data, six.text_type):
trans_args = ({ord(x): None for x in text_characters},)
else:
trans_args = (None, str(text_characters)) # future lint: blacklisted-function
nontext = data.translate(*trans_args)
# If more than 30% non-text characters, then
# this is considered binary data
if float(len(nontext)) / len(data) > 0.30:
return True
return False | 0.003089 |
def vote(self):
"""选举新闻标题
Return:
title -- 新闻标题,str类型
"""
# 初始化
weight_queue = []
sameKV = 0
count = 0
# 相似度计算
for unit in self._queue:
unit_set = convert_to_set(unit)
for i in unit_set:
if i in self.wordvector_word:
sameKV += self.wordvector_weight[self.wordvector_word.index(i)]
if len(self._queue) >= 5:
# k是位置权重,离语料库越近的文本行,权重越大,区间【0,1】
k = (count + 1) / len(self._queue)
beta = normalized(self.beta_list[count], self.beta_list)
count += 1
else:
k = 1
beta = normalized(self.beta_list[count], self.beta_list)
count += 1
jaccard = sameKV / len(
(set(unit_set) | set(self.wordvector_word)) - (set(unit_set) & set(self.wordvector_word)))
unit_weight = k * beta * jaccard
weight_queue.append(unit_weight)
sameKV = 0
log('debug',
'文本行【{}】\n相似度计算参数,unit_weight:【{}】,k:【{}】,beta:【{}】,jaccard:【{}】\n'.format(unit, unit_weight, k, beta,
jaccard))
# 过滤
try:
title = self._queue[weight_queue.index(sorted(weight_queue, reverse=True)[0])]
except:
title = ''
return title | 0.005351 |
def p_iteration_statement_4(self, p):
"""
iteration_statement \
: FOR LPAREN left_hand_side_expr IN expr RPAREN statement
"""
p[0] = ast.ForIn(item=p[3], iterable=p[5], statement=p[7]) | 0.008772 |
def _replace_tex_math(node, mml_url, mc_client=None, retry=0):
"""call mml-api service to replace TeX math in body of node with mathml"""
math = node.attrib['data-math'] or node.text
if math is None:
return None
eq = {}
if mc_client:
math_key = hashlib.md5(math.encode('utf-8')).hexdigest()
eq = json.loads(mc_client.get(math_key) or '{}')
if not eq:
res = requests.post(mml_url, {'math': math.encode('utf-8'),
'mathType': 'TeX',
'mml': 'true'})
if res: # Non-error response from requests
eq = res.json()
if mc_client:
mc_client.set(math_key, res.text)
if 'components' in eq and len(eq['components']) > 0:
for component in eq['components']:
if component['format'] == 'mml':
mml = etree.fromstring(component['source'])
if node.tag.endswith('span'):
mml.set('display', 'inline')
elif node.tag.endswith('div'):
mml.set('display', 'block')
mml.tail = node.tail
return mml
else:
logger.warning('Retrying math TeX conversion: '
'{}'.format(json.dumps(eq, indent=4)))
retry += 1
if retry < 2:
return _replace_tex_math(node, mml_url, mc_client, retry)
return None | 0.000712 |
def _validate_states(states, topology):
'''Validate states to avoid ignoring states during initialization'''
states = states or []
if isinstance(states, dict):
for x in states:
assert x in topology.node
else:
assert len(states) <= len(topology)
return states | 0.003268 |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the DeviceCredential struct and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(DeviceCredential, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.DEVICE_SERIAL_NUMBER, local_stream):
self._device_serial_number = primitives.TextString(
tag=enums.Tags.DEVICE_SERIAL_NUMBER
)
self._device_serial_number.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(enums.Tags.PASSWORD, local_stream):
self._password = primitives.TextString(
tag=enums.Tags.PASSWORD
)
self._password.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.DEVICE_IDENTIFIER, local_stream):
self._device_identifier = primitives.TextString(
tag=enums.Tags.DEVICE_IDENTIFIER
)
self._device_identifier.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(enums.Tags.NETWORK_IDENTIFIER, local_stream):
self._network_identifier = primitives.TextString(
tag=enums.Tags.NETWORK_IDENTIFIER
)
self._network_identifier.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(enums.Tags.MACHINE_IDENTIFIER, local_stream):
self._machine_identifier = primitives.TextString(
tag=enums.Tags.MACHINE_IDENTIFIER
)
self._machine_identifier.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(enums.Tags.MEDIA_IDENTIFIER, local_stream):
self._media_identifier = primitives.TextString(
tag=enums.Tags.MEDIA_IDENTIFIER
)
self._media_identifier.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream) | 0.000742 |
def fits_finder_chart(
fitsfile,
outfile,
fitsext=0,
wcsfrom=None,
scale=ZScaleInterval(),
stretch=LinearStretch(),
colormap=plt.cm.gray_r,
findersize=None,
finder_coordlimits=None,
overlay_ra=None,
overlay_decl=None,
overlay_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
overlay_zoomcontain=False,
grid=False,
gridcolor='k'
):
'''This makes a finder chart for a given FITS with an optional object
position overlay.
Parameters
----------
fitsfile : str
`fitsfile` is the FITS file to use to make the finder chart.
outfile : str
`outfile` is the name of the output file. This can be a png or pdf or
whatever else matplotlib can write given a filename and extension.
fitsext : int
Sets the FITS extension in `fitsfile` to use to extract the image array
from.
wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
finder_coordlimits : list of four floats or None
If not None, `finder_coordlimits` sets x and y limits for the plot,
effectively zooming it in if these are smaller than the dimensions of
the FITS image. This should be a list of the form: [minra, maxra,
mindecl, maxdecl] all in decimal degrees.
overlay_ra, overlay_decl : np.array or None
`overlay_ra` and `overlay_decl` are ndarrays containing the RA and Dec
values to overplot on the image as an overlay. If these are both None,
then no overlay will be plotted.
overlay_pltopts : dict
`overlay_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
overlay_zoomcontain : bool
`overlay_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
grid : bool
`grid` sets if a grid will be made on the output image.
gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
Returns
-------
str or None
The filename of the generated output image if successful. None
otherwise.
'''
# read in the FITS file
if wcsfrom is None:
hdulist = pyfits.open(fitsfile)
img, hdr = hdulist[fitsext].data, hdulist[fitsext].header
hdulist.close()
frameshape = (hdr['NAXIS1'], hdr['NAXIS2'])
w = WCS(hdr)
elif os.path.exists(wcsfrom):
hdulist = pyfits.open(fitsfile)
img, hdr = hdulist[fitsext].data, hdulist[fitsext].header
hdulist.close()
frameshape = (hdr['NAXIS1'], hdr['NAXIS2'])
w = WCS(wcsfrom)
else:
LOGERROR('could not determine WCS info for input FITS: %s' %
fitsfile)
return None
# use the frame shape to set the output PNG's dimensions
if findersize is None:
fig = plt.figure(figsize=(frameshape[0]/100.0,
frameshape[1]/100.0))
else:
fig = plt.figure(figsize=findersize)
# set the coord limits if zoomcontain is True
# we'll leave 30 arcseconds of padding on each side
if (overlay_zoomcontain and
overlay_ra is not None and
overlay_decl is not None):
finder_coordlimits = [overlay_ra.min()-30.0/3600.0,
overlay_ra.max()+30.0/3600.0,
overlay_decl.min()-30.0/3600.0,
overlay_decl.max()+30.0/3600.0]
# set the coordinate limits if provided
if finder_coordlimits and isinstance(finder_coordlimits, (list,tuple)):
minra, maxra, mindecl, maxdecl = finder_coordlimits
cntra, cntdecl = (minra + maxra)/2.0, (mindecl + maxdecl)/2.0
pixelcoords = w.all_world2pix([[minra, mindecl],
[maxra, maxdecl],
[cntra, cntdecl]],1)
x1, y1, x2, y2 = (int(pixelcoords[0,0]),
int(pixelcoords[0,1]),
int(pixelcoords[1,0]),
int(pixelcoords[1,1]))
xmin = x1 if x1 < x2 else x2
xmax = x2 if x2 > x1 else x1
ymin = y1 if y1 < y2 else y2
ymax = y2 if y2 > y1 else y1
# create a new WCS with the same transform but new center coordinates
whdr = w.to_header()
whdr['CRPIX1'] = (xmax - xmin)/2
whdr['CRPIX2'] = (ymax - ymin)/2
whdr['CRVAL1'] = cntra
whdr['CRVAL2'] = cntdecl
whdr['NAXIS1'] = xmax - xmin
whdr['NAXIS2'] = ymax - ymin
w = WCS(whdr)
else:
xmin, xmax, ymin, ymax = 0, hdr['NAXIS2'], 0, hdr['NAXIS1']
# add the axes with the WCS projection
# this should automatically handle subimages because we fix the WCS
# appropriately above for these
fig.add_subplot(111,projection=w)
if scale is not None and stretch is not None:
norm = ImageNormalize(img,
interval=scale,
stretch=stretch)
plt.imshow(img[ymin:ymax,xmin:xmax],
origin='lower',
cmap=colormap,
norm=norm)
else:
plt.imshow(img[ymin:ymax,xmin:xmax],
origin='lower',
cmap=colormap)
# handle additional options
if grid:
plt.grid(color=gridcolor,ls='solid',lw=1.0)
# handle the object overlay
if overlay_ra is not None and overlay_decl is not None:
our_pltopts = dict(
transform=plt.gca().get_transform('fk5'),
marker='o',
markersize=10.0,
markerfacecolor='none',
markeredgewidth=2.0,
markeredgecolor='red',
rasterized=True,
linestyle='none'
)
if overlay_pltopts is not None and isinstance(overlay_pltopts,
dict):
our_pltopts.update(overlay_pltopts)
plt.gca().set_autoscale_on(False)
plt.gca().plot(overlay_ra, overlay_decl,
**our_pltopts)
plt.xlabel('Right Ascension [deg]')
plt.ylabel('Declination [deg]')
# get the x and y axes objects to fix the ticks
xax = plt.gca().coords[0]
yax = plt.gca().coords[1]
yax.set_major_formatter('d.ddd')
xax.set_major_formatter('d.ddd')
# save the figure
plt.savefig(outfile, dpi=100.0)
plt.close('all')
return outfile | 0.002987 |
def _append_html_element(self, item, element, html, glue=" ",
after=True):
"""Appends an html value after or before the element in the item dict
:param item: dictionary that represents an analysis row
:param element: id of the element the html must be added thereafter
:param html: element to append
:param glue: glue to use for appending
:param after: if the html content must be added after or before"""
position = after and 'after' or 'before'
item[position] = item.get(position, {})
original = item[position].get(element, '')
if not original:
item[position][element] = html
return
item[position][element] = glue.join([original, html]) | 0.003822 |
def reparse(self, filepath):
"""Reparses the specified module file from disk, overwriting any
cached representations etc. of the module."""
#The easiest way to do this is to touch the file and then call
#the regular parse method so that the cache becomes invalidated.
self.tramp.touch(filepath)
self.parse(filepath) | 0.011019 |
def dispatch(self, event):
"""Given an event, send it to all the subscribers.
Args
event (:class:`~bigchaindb.events.EventTypes`): the event to
dispatch to all the subscribers.
"""
for event_types, queues in self.queues.items():
if event.type & event_types:
for queue in queues:
queue.put(event) | 0.004938 |
def run(self, lines):
"""Filter method"""
# Nothing to do in this case
if (not self.adjust_path) and (not self.image_ext):
return lines
ret = []
for line in lines:
processed = {}
while True:
alt = ''
img_name = ''
match = re.search(r'!\[(.*?)\]\((.*?)\)', line)
# Make sure there is in fact an image file name
if match:
# Skip images we already processed
if match.group(0) in processed:
break
# Skip URLs
if re.match('\w+://', match.group(2)):
break
alt = match.group(1)
img_name = match.group(2)
else:
break
if self.image_ext:
img_name = re.sub(r'\.\w+$', '.' + self.image_ext, img_name)
if self.adjust_path and (self.image_path or self.filename):
# explicitely specified image path takes precedence over
# path relative to chapter
if self.image_path and self.filename:
img_name = os.path.join(
os.path.abspath(self.image_path),
os.path.dirname(self.filename),
img_name)
# generate image path relative to file name
if self.filename and (not self.image_path):
img_name = os.path.join(
os.path.abspath(
os.path.dirname(self.filename)),
img_name)
# handle Windows '\', although this adds a small amount of unnecessary work on Unix systems
img_name = img_name.replace(os.path.sep, '/')
line = re.sub(r'!\[(.*?)\]\((.*?)\)',
'' % (alt, img_name), line)
# Mark this image as processed
processed[match.group(0)] = True
ret.append(line)
return ret | 0.003493 |
def delete_records_safely_by_xml_id(env, xml_ids):
"""This removes in the safest possible way the records whose XML-IDs are
passed as argument.
:param xml_ids: List of XML-ID string identifiers of the records to remove.
"""
for xml_id in xml_ids:
logger.debug('Deleting record for XML-ID %s', xml_id)
try:
with env.cr.savepoint():
env.ref(xml_id).exists().unlink()
except Exception as e:
logger.error('Error deleting XML-ID %s: %s', xml_id, repr(e)) | 0.001873 |
def remove_overlap(self, also_remove_contiguous: bool = False) -> None:
"""
Merges any overlapping intervals.
Args:
also_remove_contiguous: treat contiguous (as well as overlapping)
intervals as worthy of merging?
"""
overlap = True
while overlap:
overlap = self._remove_overlap_sub(also_remove_contiguous)
self._sort() | 0.004808 |
def clear_input(self):
"""Remove all the input files (at the end of a reading)."""
for item in listdir(self.input_dir):
item_path = path.join(self.input_dir, item)
if path.isfile(item_path):
remove(item_path)
logger.debug('Removed input %s.' % item_path)
return | 0.005865 |
def make_citation_dict(td):
"""
Update a citation dictionary by editing the Author field
:param td: A BixTex format citation dict or a term
:return:
"""
from datetime import datetime
if isinstance(td, dict):
d = td
name = d['name_link']
else:
d = td.as_dict()
d['_term'] = td
try:
d['name_link'] = td.name
except AttributeError:
d['name_link'] = td['name_link'].value
if 'author' in d and isinstance(d['author'], str):
authors = []
for e in d['author'].split(';'):
author_d = HumanName(e).as_dict(include_empty=False)
if 'suffix' in author_d:
author_d['lineage'] = author_d['suffix']
del author_d['suffix']
authors.append(author_d)
d['author'] = authors
if not 'type' in d:
if '_term' in d:
t = d['_term']
if t.term_is('Root.Reference') or t.term_is('Root.Resource'):
d['type'] = 'dataset'
elif t.term_is('Root.Citation'):
d['type'] = 'article'
else:
d['type'] = 'article'
if d['type'] == 'dataset':
if not 'editor' in d:
d['editor'] = [HumanName('Missing Editor').as_dict(include_empty=False)]
if not 'accessdate' in d:
d['accessdate'] = datetime.now().strftime('%Y-%m-%d')
if not 'author' in d:
d['author'] = [HumanName('Missing Author').as_dict(include_empty=False)]
if not 'title' in d:
d['title'] = d.get('description', '<Missing Title>')
if not 'journal' in d:
d['journal'] = '<Missing Journal>'
if not 'year' in d:
d['year'] = '<Missing Year>'
if '_term' in d:
del d['_term']
return d | 0.005479 |
def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password is None:
raise errors.AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise errors.AuthError('Invalid keyHash')
if self.password is None:
raise errors.AuthError('Missing password')
keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest()
keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise errors.AuthError('Invalid Hash')
return True | 0.026166 |
def handle_msg(self, msg, **config):
"""
If we can handle the given message, return the remainder of the topic.
Returns None if we can't handle the message.
"""
match = self.__prefix__.match(msg['topic'])
if match:
return match.groups()[-1] or "" | 0.006515 |
def get_mems_of_org(self):
"""
Retrieves the emails of the members of the organization. Note this Only
gets public emails. Private emails would need authentication for each
user.
"""
print 'Getting members\' emails.'
for member in self.org_retrieved.iter_members():
login = member.to_json()['login']
user_email = self.logged_in_gh.user(login).to_json()['email']
if user_email is not None:
self.emails[login] = user_email
else:#user has no public email
self.emails[login] = 'none'
#used for sorting regardless of case
self.logins_lower[login.lower()] = login | 0.006974 |
def force_user_presence(self, user: User, presence: UserPresence):
""" Forcibly set the ``user`` presence to ``presence``.
This method is only provided to cover an edge case in our use of the Matrix protocol and
should **not** generally be used.
"""
self._userid_to_presence[user.user_id] = presence | 0.008824 |
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
# `transpose` doesn't support 8D, need other implementation
f1, f2, f3 = self._factors
# (N, C*f1*f2*f3, D, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2 * f3, 0, 0, 0)) # (N, C, f1*f2*f3, D, H, W)
x = F.swapaxes(x, 2, 3) # (N, C, D, f1*f2*f3, H, W)
x = F.reshape(x, (0, 0, 0, -4, f1, f2*f3, 0, 0)) # (N, C, D, f1, f2*f3, H, W)
x = F.reshape(x, (0, 0, -3, 0, 0, 0)) # (N, C, D*f1, f2*f3, H, W)
x = F.swapaxes(x, 3, 4) # (N, C, D*f1, H, f2*f3, W)
x = F.reshape(x, (0, 0, 0, 0, -4, f2, f3, 0)) # (N, C, D*f1, H, f2, f3, W)
x = F.reshape(x, (0, 0, 0, -3, 0, 0)) # (N, C, D*f1, H*f2, f3, W)
x = F.swapaxes(x, 4, 5) # (N, C, D*f1, H*f2, W, f3)
x = F.reshape(x, (0, 0, 0, 0, -3)) # (N, C, D*f1, H*f2, W*f3)
return x | 0.012693 |
def histogram(args):
"""
%prog histogram meryl.histogram species K
Plot the histogram based on meryl K-mer distribution, species and N are
only used to annotate the graphic.
"""
p = OptionParser(histogram.__doc__)
p.add_option("--vmin", dest="vmin", default=1, type="int",
help="minimum value, inclusive [default: %default]")
p.add_option("--vmax", dest="vmax", default=100, type="int",
help="maximum value, inclusive [default: %default]")
p.add_option("--pdf", default=False, action="store_true",
help="Print PDF instead of ASCII plot [default: %default]")
p.add_option("--coverage", default=0, type="int",
help="Kmer coverage [default: auto]")
p.add_option("--nopeaks", default=False, action="store_true",
help="Do not annotate K-mer peaks")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
histfile, species, N = args
ascii = not opts.pdf
peaks = not opts.nopeaks
N = int(N)
if histfile.rsplit(".", 1)[-1] in ("mcdat", "mcidx"):
logging.debug("CA kmer index found")
histfile = merylhistogram(histfile)
ks = KmerSpectrum(histfile)
ks.analyze(K=N)
Total_Kmers = int(ks.totalKmers)
coverage = opts.coverage
Kmer_coverage = ks.max2 if not coverage else coverage
Genome_size = int(round(Total_Kmers * 1. / Kmer_coverage))
Total_Kmers_msg = "Total {0}-mers: {1}".format(N, thousands(Total_Kmers))
Kmer_coverage_msg = "{0}-mer coverage: {1}".format(N, Kmer_coverage)
Genome_size_msg = "Estimated genome size: {0:.1f}Mb".\
format(Genome_size / 1e6)
Repetitive_msg = ks.repetitive
SNPrate_msg = ks.snprate
for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):
print(msg, file=sys.stderr)
x, y = ks.get_xy(opts.vmin, opts.vmax)
title = "{0} {1}-mer histogram".format(species, N)
if ascii:
asciiplot(x, y, title=title)
return Genome_size
plt.figure(1, (6, 6))
plt.plot(x, y, 'g-', lw=2, alpha=.5)
ax = plt.gca()
if peaks:
t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)
tcounts = [(x, y) for x, y in ks.counts if x in t]
if tcounts:
x, y = zip(*tcounts)
tcounts = dict(tcounts)
plt.plot(x, y, 'ko', lw=2, mec='k', mfc='w')
ax.text(ks.max1, tcounts[ks.max1], "SNP peak", va="top")
ax.text(ks.max2, tcounts[ks.max2], "Main peak")
messages = [Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg,
Repetitive_msg, SNPrate_msg]
write_messages(ax, messages)
ymin, ymax = ax.get_ylim()
ymax = ymax * 7 / 6
ax.set_title(markup(title))
ax.set_ylim((ymin, ymax))
xlabel, ylabel = "Coverage (X)", "Counts"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
set_human_axis(ax)
imagename = histfile.split(".")[0] + ".pdf"
savefig(imagename, dpi=100)
return Genome_size | 0.002308 |
def is_upload(action):
"""Checks if this should be a user upload
:param action:
:return: True if this is a file we intend to upload from the user
"""
return 'r' in action.type._mode and (action.default is None or
getattr(action.default, 'name') not in (sys.stderr.name, sys.stdout.name)) | 0.005731 |
def save(self, *args, **kwargs):
"""Auto-generate a slug from the name."""
self._create_slug()
self._create_date_slug()
self._render_content()
# Call ``_set_published`` the *first* time this Entry is published.
# NOTE: if this is unpublished, and then republished, this method won't
# get called; e.g. the date won't get changed and the
# ``entry_published`` signal won't get re-sent.
send_published_signal = False
if self.published and self.published_on is None:
send_published_signal = self._set_published()
super(Entry, self).save(*args, **kwargs)
# We need an ID before we can send this signal.
if send_published_signal:
entry_published.send(sender=self, entry=self) | 0.002497 |
def reset(self):
"""
Resets the agent to its initial state (e.g. on experiment start). Updates the Model's internal episode and
time step counter, internal states, and resets preprocessors.
"""
self.episode, self.timestep, self.next_internals = self.model.reset()
self.current_internals = self.next_internals | 0.008427 |
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv | 0.001847 |
def listen(self):
"""Listen to both stdout and stderr
We'll want messages of a particular origin and format to
cause QML to perform some action. Other messages are simply
forwarded, as they are expected to be plain print or error messages.
"""
def _listen():
"""This runs in a thread"""
HEADER = "pyblish-qml:popen.request"
for line in iter(self.popen.stdout.readline, b""):
if six.PY3:
line = line.decode("utf8")
try:
response = json.loads(line)
except Exception:
# This must be a regular message.
sys.stdout.write(line)
else:
if (hasattr(response, "get") and
response.get("header") == HEADER):
payload = response["payload"]
args = payload["args"]
func_name = payload["name"]
wrapper = _state.get("dispatchWrapper",
default_wrapper)
func = getattr(self.service, func_name)
result = wrapper(func, *args) # block..
# Note(marcus): This is where we wait for the host to
# finish. Technically, we could kill the GUI at this
# point which would make the following commands throw
# an exception. However, no host is capable of kill
# the GUI whilst running a command. The host is locked
# until finished, which means we are guaranteed to
# always respond.
data = json.dumps({
"header": "pyblish-qml:popen.response",
"payload": result
})
if six.PY3:
data = data.encode("ascii")
self.popen.stdin.write(data + b"\n")
self.popen.stdin.flush()
else:
# In the off chance that a message
# was successfully decoded as JSON,
# but *wasn't* a request, just print it.
sys.stdout.write(line)
if not self.listening:
self._start_pulse()
if self.modal:
_listen()
else:
thread = threading.Thread(target=_listen)
thread.daemon = True
thread.start()
self.listening = True | 0.000729 |
def nvmlDeviceGetEncoderUtilization(handle):
r"""
/**
* Retrieves the current utilization and sampling size in microseconds for the Encoder
*
* For Kepler &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param utilization Reference to an unsigned int for encoder utilization info
* @param samplingPeriodUs Reference to an unsigned int for the sampling period in US
*
* @return
* - \ref NVML_SUCCESS if \a utilization has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetEncoderUtilization
"""
c_util = c_uint()
c_samplingPeriod = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderUtilization")
ret = fn(handle, byref(c_util), byref(c_samplingPeriod))
_nvmlCheckReturn(ret)
return [c_util.value, c_samplingPeriod.value] | 0.00668 |
def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for the analysis cases. Must be called after get_experimental_ddg_values.'''
new_idxs = []
stability_classication_x_cutoff, stability_classication_y_cutoff = self.stability_classication_x_cutoff, self.stability_classication_y_cutoff
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
stability_classification_fieldname = BenchmarkRun.get_analysis_set_fieldname('StabilityClassification', analysis_set)
new_idxs.append(stability_classification_fieldname)
dataframe_record[stability_classification_fieldname] = None
if ddg_details:
stability_classification = None
if dataframe_record[exp_ddg_fieldname] != None:
stability_classification = fraction_correct([dataframe_record[exp_ddg_fieldname]], [predicted_data[self.ddg_analysis_type]], x_cutoff = stability_classication_x_cutoff, y_cutoff = stability_classication_y_cutoff)
stability_classification = int(stability_classification)
assert(stability_classification == 0 or stability_classification == 1)
dataframe_record[stability_classification_fieldname] = stability_classification
# Update the CSV headers
try:
idx = self.csv_headers.index('StabilityClassification')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError, e: pass | 0.00972 |
def load_mp(cls, file_pointer, _mft_config=None):
'''The initialization process takes a file like object "file_pointer"
and loads it in the internal structures. "use_cores" can be definied
if multiple cores are to be used. The "size" argument is the size
of the MFT entries. If not provided, the class will try to auto detect
it.
'''
import multiprocessing
import queue
mft_config = _mft_config if _mft_config is not None else MFT.mft_config
mft_entry_size = mft_config["entry_size"]
#self.entries = {}
if not mft_entry_size:
mft_entry_size = MFT._find_mft_size(file_pointer)
file_size = _get_file_size(file_pointer)
if (file_size % mft_entry_size):
#TODO error handling (file size not multiple of mft size)
MOD_LOGGER.error("Unexpected file size. It is not multiple of the MFT entry size.")
end = int(file_size / mft_entry_size)
#setup the multiprocessing stuff
queue_size = 10
n_processes = 3
manager = multiprocessing.Manager()
buffer_queue_in = manager.Queue(queue_size)
buffer_queue_out = manager.Queue(queue_size)
entries = manager.dict()
temp_entries = manager.list()
processes = [multiprocessing.Process(target=MFT._load_entry, args=(mft_config, buffer_queue_in, buffer_queue_out, entries, temp_entries)) for i in range(n_processes)]
for p in processes:
p.start()
for i in range(queue_size):
buffer_queue_out.put(bytearray(mft_entry_size))
#start the game
for i in range(0, end):
try:
data_buffer = buffer_queue_out.get(timeout=1)
file_pointer.readinto(data_buffer)
buffer_queue_in.put((i, data_buffer))
#print("adding", i)
except queue.Empty as e:
print("DAMN")
raise
for i in range(queue_size):
buffer_queue_in.put((-1, None))
for p in processes:
p.join()
print("LOADING DONE")
#process the temporary list and add it to the "model"
for entry in temp_entries:
base_record_ref = entry.header.base_record_ref
if base_record_ref in entries: #if the parent entry has been loaded
if MFT._is_related(entries[base_record_ref], entry):
entries[base_record_ref].copy_attributes(entry)
else: #can happen when you have an orphan entry
entries[i] = entry | 0.005341 |
def parse_intf_section(interface):
"""Parse a single entry from show interfaces output.
Different cases:
mgmt0 is up
admin state is up
Ethernet2/1 is up
admin state is up, Dedicated Interface
Vlan1 is down (Administratively down), line protocol is down, autostate enabled
Ethernet154/1/48 is up (with no 'admin state')
"""
interface = interface.strip()
re_protocol = r"^(?P<intf_name>\S+?)\s+is\s+(?P<status>.+?)" \
r",\s+line\s+protocol\s+is\s+(?P<protocol>\S+).*$"
re_intf_name_state = r"^(?P<intf_name>\S+) is (?P<intf_state>\S+).*"
re_is_enabled_1 = r"^admin state is (?P<is_enabled>\S+)$"
re_is_enabled_2 = r"^admin state is (?P<is_enabled>\S+), "
re_is_enabled_3 = r"^.* is down.*Administratively down.*$"
re_mac = r"^\s+Hardware.*address:\s+(?P<mac_address>\S+) "
re_speed = r"^\s+MTU .*, BW (?P<speed>\S+) (?P<speed_unit>\S+), "
re_description = r"^\s+Description:\s+(?P<description>.*)$"
# Check for 'protocol is ' lines
match = re.search(re_protocol, interface, flags=re.M)
if match:
intf_name = match.group('intf_name')
status = match.group('status')
protocol = match.group('protocol')
if 'admin' in status.lower():
is_enabled = False
else:
is_enabled = True
is_up = bool('up' in protocol)
else:
# More standard is up, next line admin state is lines
match = re.search(re_intf_name_state, interface)
intf_name = match.group('intf_name')
intf_state = match.group('intf_state').strip()
is_up = True if intf_state == 'up' else False
admin_state_present = re.search("admin state is", interface)
if admin_state_present:
# Parse cases where 'admin state' string exists
for x_pattern in [re_is_enabled_1, re_is_enabled_2]:
match = re.search(x_pattern, interface, flags=re.M)
if match:
is_enabled = match.group('is_enabled').strip()
is_enabled = True if is_enabled == 'up' else False
break
else:
msg = "Error parsing intf, 'admin state' never detected:\n\n{}".format(interface)
raise ValueError(msg)
else:
# No 'admin state' should be 'is up' or 'is down' strings
# If interface is up; it is enabled
is_enabled = True
if not is_up:
match = re.search(re_is_enabled_3, interface, flags=re.M)
if match:
is_enabled = False
match = re.search(re_mac, interface, flags=re.M)
if match:
mac_address = match.group('mac_address')
mac_address = napalm_base.helpers.mac(mac_address)
else:
mac_address = ""
match = re.search(re_speed, interface, flags=re.M)
speed = int(match.group('speed'))
speed_unit = match.group('speed_unit')
# This was alway in Kbit (in the data I saw)
if speed_unit != "Kbit":
msg = "Unexpected speed unit in show interfaces parsing:\n\n{}".format(interface)
raise ValueError(msg)
speed = int(round(speed / 1000.0))
description = ''
match = re.search(re_description, interface, flags=re.M)
if match:
description = match.group('description')
return {
intf_name: {
'description': description,
'is_enabled': is_enabled,
'is_up': is_up,
'last_flapped': -1.0,
'mac_address': mac_address,
'speed': speed}
} | 0.00109 |
def config_amend_(self,config_amend):
""" This will take a YAML or dict configuration and load it into
the configuration file for furthur usage. The good part
about this method is that it doesn't clobber, only appends
when keys are missing.
This should provide a value in dictionary format like:
{
'default': {
'togglsync': {
'dsn': 'sqlite:///zerp-toggl.db',
'default': {
'username': 'abced',
'toggl_api_key': 'arfarfarf',
},
'dev': {
'cache': False
}
}
}
OR at user's preference can also use yaml format:
default:
togglsync:
dsn: 'sqlite:///zerp-toggl.db'
default:
username: 'abced'
toggl_api_key: 'arfarfarf'
dev:
cache: False
Then the code will append the key/values where they may be
missing.
If there is a conflict between a dict key and a value, this
function will throw an exception.
IMPORTANT: after making the change to the configuration,
remember to save the changes with cfg.save_()
"""
if not isinstance(config_amend,dict):
config_amend = yaml.load(config_amend)
def merge_dicts(source,target,breadcrumbs=None):
"""
Function to update the configuration if required. Returns
True if a change was made.
"""
changed = False
if breadcrumbs is None:
breadcrumbs = []
# Don't descend if we're not a dict
if not isinstance(source,dict):
return source
# Let's start iterating over things
for k,v in source.items():
# New key, simply add.
if k not in target:
target[k] = v
changed = True
continue
# Not new key.... so is it a dict?
elif isinstance(target[k],dict):
trail = breadcrumbs+[k]
if isinstance(v,dict):
if merge_dicts(v,target[k],trail):
changed = True
else:
raise Exception('.'.join(trail) + ' has conflicting dict/scalar types!')
else:
trail = breadcrumbs+[k]
if isinstance(v,dict):
raise Exception('.'.join(trail) + ' has conflicting dict/scalar types!')
return changed
if merge_dicts(config_amend,self._cfg):
self.overlay_load()
return self._cfg | 0.006085 |
def _get_step_inputs(step, file_vs, std_vs, parallel_ids, wf=None):
"""Retrieve inputs for a step from existing variables.
Potentially nests inputs to deal with merging split variables. If
we split previously and are merging now, then we only nest those
coming from the split process.
"""
inputs = []
skip_inputs = set([])
for orig_input in [_get_variable(x, file_vs) for x in _handle_special_inputs(step.inputs, file_vs)]:
inputs.append(orig_input)
# Only add description and other information for non-record inputs, otherwise batched with records
if not any(is_cwl_record(x) for x in inputs):
inputs += [v for v in std_vs if get_base_id(v["id"]) not in skip_inputs]
nested_inputs = []
if step.parallel in ["single-merge", "batch-merge"]:
if parallel_ids:
inputs = [_nest_variable(x) if x["id"] in parallel_ids else x for x in inputs]
nested_inputs = parallel_ids[:]
parallel_ids = []
elif step.parallel in ["multi-combined"]:
assert len(parallel_ids) == 0
nested_inputs = [x["id"] for x in inputs]
inputs = [_nest_variable(x) for x in inputs]
elif step.parallel in ["multi-batch"]:
assert len(parallel_ids) == 0
nested_inputs = [x["id"] for x in inputs]
# If we're batching,with mixed records/inputs avoid double nesting records
inputs = [_nest_variable(x, check_records=(len(inputs) > 1)) for x in inputs]
# avoid inputs/outputs with the same name
outputs = [_get_string_vid(x["id"]) for x in step.outputs]
final_inputs = []
for input in inputs:
input["wf_duplicate"] = get_base_id(input["id"]) in outputs
final_inputs.append(input)
return inputs, parallel_ids, nested_inputs | 0.003917 |
def erase(self, message=None):
"""Erase something whose you write before: message"""
if not message:
message = self.last_message
# Move cursor to the beginning of line
super(Animation, self).write("\033[G")
# Erase in line from cursor
super(Animation, self).write("\033[K") | 0.006006 |
def getMaxWidth(self, rows):
'Return the maximum length of any cell in column or its header.'
w = 0
if len(rows) > 0:
w = max(max(len(self.getDisplayValue(r)) for r in rows), len(self.name))+2
return max(w, len(self.name)) | 0.011278 |
def scores(self):
"""Return a list of the items with their final scores.
The final score of each item is its average score multiplied by the
square root of its length. This reduces to sum * len^(-1/2).
"""
return map(
lambda x: (x[0], sum(x[1]) * len(x[1]) ** -.5),
iter(self.items.viewitems())
) | 0.005405 |
def update_job_libraries(
logger,
job_list,
match,
new_library_path,
token,
host,
):
"""
update libraries on jobs using same major version
Parameters
----------
logger: logging object
configured in cli_commands.py
job_list: list of strings
output of get_job_list
match: FilenameMatch object
match object with suffix
new_library_path: string
path to library in dbfs (including uri)
token: string
Databricks API key with admin permissions
host: string
Databricks account url
(e.g. https://fake-organization.cloud.databricks.com)
Side Effects
------------
jobs now require updated version of library
"""
for job in job_list:
get_res = requests.get(
host + '/api/2.0/jobs/get?job_id={}'.format(job['job_id']),
auth=('token', token),
)
if get_res.status_code == 200:
job_specs = get_res.json() # copy current job specs
settings = job_specs['settings']
job_specs.pop('settings')
new_libraries = []
for lib in settings['libraries']:
if (
match.suffix in lib.keys()
and lib[match.suffix] == job['library_path']
):
# replace entry for old library path with new one
new_libraries.append({match.suffix: new_library_path})
else:
new_libraries.append(lib)
settings['libraries'] = new_libraries
job_specs['new_settings'] = settings
post_res = requests.post(
host + '/api/2.0/jobs/reset',
auth=('token', token),
data=json.dumps(job_specs)
)
if post_res.status_code != 200:
raise APIError(post_res)
else:
raise APIError(get_res) | 0.000511 |
def environment_as(**kwargs):
"""Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH='foo:bar:baz',
PYTHON='/usr/bin/python2.7'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key, val):
if val is not None:
os.environ[key] = val if PY3 else _os_encode(val)
else:
if key in os.environ:
del os.environ[key]
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val) | 0.016591 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.