text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _get_url_parameters(self):
"""
Encode URL parameters
"""
url_parameters = ''
if self._url_parameters is not None:
url_parameters = '?' + urllib.urlencode(self._url_parameters)
return url_parameters | 0.007663 |
def _get_property(device_path: Union[Path, str], property_name: str) -> str:
"""Gets the given property for a device."""
with open(str(Path(device_path, property_name))) as file:
return file.readline().strip() | 0.004444 |
def get_modname_from_modpath(module_fpath):
"""
returns importable name from file path
get_modname_from_modpath
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> modname = ut.get_modname_from_modpath(module_fpath)
>>> result = modname
>>> print(result)
utool.util_path
"""
modsubdir_list = get_module_subdir_list(module_fpath)
modname = '.'.join(modsubdir_list)
modname = modname.replace('.__init__', '').strip()
modname = modname.replace('.__main__', '').strip()
return modname | 0.001309 |
def get_cloudflare_records(self, *, account):
"""Return a `list` of `dict`s containing the zones and their records, obtained from the CloudFlare API
Returns:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
:obj:`list` of `dict`
"""
zones = []
for zobj in self.__cloudflare_list_zones(account=account):
try:
self.log.debug('Processing DNS zone CloudFlare/{}'.format(zobj['name']))
zone = {
'zone_id': get_resource_id('cfz', zobj['name']),
'name': zobj['name'],
'source': 'CloudFlare',
'comment': None,
'tags': {},
'records': []
}
for record in self.__cloudflare_list_zone_records(account=account, zoneID=zobj['id']):
zone['records'].append({
'id': get_resource_id('cfr', zobj['id'], ['{}={}'.format(k, v) for k, v in record.items()]),
'zone_id': zone['zone_id'],
'name': record['name'],
'value': record['value'],
'type': record['type']
})
if len(zone['records']) > 0:
zones.append(zone)
except CloudFlareError:
self.log.exception('Failed getting records for CloudFlare zone {}'.format(zobj['name']))
return zones | 0.004593 |
def echo_size(self, transferred=1, status=None):
'''Sample usage:
f=lambda x,y:x+y
ldata = range(10)
toBeTransferred = reduce(f,range(10))
progress = ProgressBarUtils("refresh", toBeTransferred=toBeTransferred, unit="KB", chunk_size=1.0, run_status="正在下载", fin_status="下载完成")
import time
for i in ldata:
time.sleep(0.2)
progress.echo_size(transferred=i)
'''
self.transferred += transferred
# if status is not None:
self.status = status or self.status
end_str = "\r"
if self.transferred == self.toBeTransferred:
end_str = '\n'
self.status = status or self.fin_status
print(self.__get_info() + end_str) | 0.007067 |
def consume(self, chars, min=0, max=-1):
"""
Consume chars until min/max is satisfied is valid.
"""
return self._src.consume(chars=chars, min=min, max=max) | 0.010695 |
def documentation_404(self, base_url=None):
"""Returns a smart 404 page that contains documentation for the written API"""
base_url = self.base_url if base_url is None else base_url
def handle_404(request, response, *args, **kwargs):
url_prefix = request.forwarded_uri[:-1]
if request.path and request.path != "/":
url_prefix = request.forwarded_uri.split(request.path)[0]
to_return = OrderedDict()
to_return['404'] = ("The API call you tried to make was not defined. "
"Here's a definition of the API to help you get going :)")
to_return['documentation'] = self.documentation(base_url, self.determine_version(request, False),
prefix=url_prefix)
if self.output_format == hug.output_format.json:
response.data = hug.output_format.json(to_return, indent=4, separators=(',', ': '))
response.content_type = 'application/json; charset=utf-8'
else:
response.data = self.output_format(to_return, request=request, response=response)
response.content_type = self.output_format.content_type
response.status = falcon.HTTP_NOT_FOUND
handle_404.interface = True
return handle_404 | 0.005801 |
def getJsonFromApi(view, request):
"""Return json from querying Web Api
Args:
view: django view function.
request: http request object got from django.
Returns: json format dictionary
"""
jsonText = view(request)
jsonText = json.loads(jsonText.content.decode('utf-8'))
return jsonText | 0.039216 |
def cumulative_distribution(self, X):
"""Computes the cumulative distribution function for the copula, :math:`C(u, v)`
Args:
X: `np.ndarray`
Returns:
np.array: cumulative probability
"""
self.check_fit()
U, V = self.split_matrix(X)
if (V == 0).all() or (U == 0).all():
return np.zeros(V.shape[0])
else:
cdfs = [
np.power(
np.power(U[i], -self.theta) + np.power(V[i], -self.theta) - 1,
-1.0 / self.theta
)
if (U[i] > 0 and V[i] > 0) else 0
for i in range(len(U))
]
return np.array([max(x, 0) for x in cdfs]) | 0.005319 |
async def on_raw_422(self, message):
""" MOTD is missing. """
await self._registration_completed(message)
self.motd = None
await self.on_connect() | 0.011236 |
def main():
"""
NAME
dir_redo.py
DESCRIPTION
converts the Cogne DIR format to PmagPy redo file
SYNTAX
dir_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify input file
-F FILE: specify output file, default is 'zeq_redo'
"""
dir_path='.'
zfile='zeq_redo'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index('-F')
zfile=sys.argv[ind+1]
inspec=dir_path+"/"+inspec
zfile=dir_path+"/"+zfile
zredo=open(zfile,"w")
#
# read in DIR file
#
specs=[]
prior_spec_data=open(inspec,'r').readlines()
for line in prior_spec_data:
line=line.replace("Dir"," Dir")
line=line.replace("OKir"," OKir")
line=line.replace("Fish"," Fish")
line=line.replace("Man"," Man")
line=line.replace("GC"," GC")
line=line.replace("-T"," - T")
line=line.replace("-M"," - M")
rec=line.split()
if len(rec)<2:
sys.exit()
if rec[1]=='Dir' or rec[1]=='GC': # skip all the other stuff
spec=rec[0]
specs.append(spec)
comp_name=string.uppercase[specs.count(spec)-1] # assign component names
calculation_type="DE-FM"
if rec[1]=='Dir' and rec[2]=="Kir": calculation_type="DE-BFL" # assume default calculation type is best-fit line
if rec[1]=='Dir' and rec[2]=="OKir": calculation_type="DE-BFL-A" # anchored best-fit line
if rec[1]=='Dir' and rec[2]=="Fish": calculation_type="DE-FM" # fisher mean
if rec[1]=='GC' : calculation_type="DE-BFP" # best-fit plane
min,max=rec[3],rec[5]
beg,end="",""
if min=="NRM": beg=0
if min[0]=='M':
beg=float(min[1:])*1e-3 # convert to T from mT
elif min[0]=='T':
beg=float(min[1:])+273 # convert to C to kelvin
if max[0]=='M':
end=float(max[1:])*1e-3 # convert to T from mT
elif max[0]=='T':
end=float(max[1:])+273 # convert to C to kelvin
if beg==0:beg=273
outstring='%s %s %s %s %s \n'%(spec,calculation_type,beg,end,comp_name)
zredo.write(outstring) | 0.039417 |
def home(self):
"""
Home the pipette's plunger axis during a protocol run
Notes
-----
`Pipette.home()` homes the `Robot`
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.home() # doctest: +SKIP
"""
def _home(mount):
self.current_volume = 0
self.instrument_actuator.set_active_current(self._plunger_current)
self.robot.poses = self.instrument_actuator.home(
self.robot.poses)
self.robot.poses = self.instrument_mover.home(self.robot.poses)
self.previous_placeable = None # no longer inside a placeable
do_publish(self.broker, commands.home, _home,
'before', None, None, self.mount)
_home(self.mount)
do_publish(self.broker, commands.home, _home,
'after', self, None, self.mount)
return self | 0.001706 |
def create_from_settings(settings):
""" Create a connection with given settings.
Args:
settings (dict): A dictionary of settings
Returns:
:class:`Connection`. The connection
"""
return Connection(
settings["url"],
settings["base_url"],
settings["user"],
settings["password"],
authorizations = settings["authorizations"],
debug = settings["debug"]
) | 0.014141 |
def soaproot(self, node):
"""
Get whether the specified I{node} is a soap encoded root.
This is determined by examining @soapenc:root='1'.
The node is considered to be a root when the attribute
is not specified.
@param node: A node to evaluate.
@type node: L{Element}
@return: True if a soap encoded root.
@rtype: bool
"""
root = node.getAttribute('root', ns=soapenc)
if root is None:
return True
else:
return root.value == '1' | 0.003604 |
def _prepare_for_training(self, records, mini_batch_size=None, job_name=None):
"""Set hyperparameters needed for training.
Args:
* records (:class:`~RecordSet`): The records to train this ``Estimator`` on.
* mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a
default value will be used.
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
"""
super(AmazonAlgorithmEstimatorBase, self)._prepare_for_training(job_name=job_name)
feature_dim = None
if isinstance(records, list):
for record in records:
if record.channel == 'train':
feature_dim = record.feature_dim
break
if feature_dim is None:
raise ValueError('Must provide train channel.')
else:
feature_dim = records.feature_dim
self.feature_dim = feature_dim
self.mini_batch_size = mini_batch_size | 0.005231 |
def next(self):
"""
Get the next line of data.
Returns
-------
tag : str
data :
"""
line = self.buffer.readline()
while line == '\n': # Skip forward to the next line with content.
line = self.buffer.readline()
if line == '': # End of file.
self.at_eof = True
return None, None
match = re.match('([A-Z]{2}|[C][1])\W(.*)', line)
if match is not None:
self.current_tag, data = match.groups()
else:
self.current_tag = self.last_tag
data = line.strip()
return self.current_tag, _cast(data) | 0.004354 |
def pk_prom(word):
'''Return the number of stressed light syllables.'''
violations = 0
stressed = []
for w in extract_words(word):
stressed += w.split('.')[2:-1:2] # odd syllables, excl. word-initial
# (CVV = light)
for syll in stressed:
if phon.is_vowel(syll[-1]):
violations += 1
# # (CVV = heavy)
# for syll in stressed:
# if re.search(
# ur'^[^ieaouäöy]*[ieaouäöy]{1}$', syll, flags=re.I | re.U):
# violations += 1
return violations | 0.001845 |
def line_column(self) -> Tuple[int, int]:
"""Return line and column coordinates."""
ln = self.input.count("\n", 0, self.offset)
c = (self.offset if ln == 0 else
self.offset - self.input.rfind("\n", 0, self.offset) - 1)
return (ln + 1, c) | 0.007092 |
def _init_relationships(self, relationships_arg):
"""Return a set of relationships found in all subset GO Terms."""
if relationships_arg:
relationships_all = self._get_all_relationships()
if relationships_arg is True:
return relationships_all
else:
return relationships_all.intersection(relationships_arg)
return set() | 0.004878 |
def add_properties(self, names, methods):
"""Returns a view of self with the given methods added as properties.
From: <http://stackoverflow.com/a/2954373/1366472>.
"""
cls = type(self)
cls = type(cls.__name__, (cls,), dict(cls.__dict__))
if isinstance(names, string_types):
names = [names]
methods = [methods]
for name,method in zip(names, methods):
setattr(cls, name, property(method))
return self.view(type=cls) | 0.005837 |
def pospawn(self, pawn):
"""Position a :class:`Pawn` that is my child so as to reflect how far
its :class:`Thing` has gone along my :class:`Portal`.
"""
if self._turn < pawn.thing['arrival_time']:
# It's weird that the pawn is getting placed in me, but
# I'll do my best..
pawn.pos = self.pos_along(0)
return
elif (
pawn.thing['next_arrival_time'] and
self._turn >= pawn.thing['next_arrival_time']
):
pawn.pos = self.pos_along(1)
return
try:
pawn.pos = self.pos_along(
(
self._turn -
pawn.thing['arrival_time']
) / (
pawn.thing['next_arrival_time'] -
pawn.thing['arrival_time']
)
)
except (TypeError, ZeroDivisionError):
pawn.pos = self.pos_along(0) | 0.002037 |
def api_info(self, headers=None):
"""Retrieves information provided by the API root endpoint
``'/api/v1'``.
Args:
headers (dict): Optional headers to pass to the request.
Returns:
dict: Details of the HTTP API provided by the BigchainDB
server.
"""
return self.transport.forward_request(
method='GET',
path=self.api_prefix,
headers=headers,
) | 0.004228 |
def calc_next_run(self):
"""Calculate next run time of this task"""
base_time = self.last_run
if self.last_run == HAS_NOT_RUN:
if self.wait_for_schedule is False:
self.next_run = timezone.now()
self.wait_for_schedule = False # reset so we don't run on every clock tick
self.save()
return
else:
base_time = timezone.now()
self.next_run = croniter(self.schedule, base_time).get_next(datetime)
self.save() | 0.007326 |
def exclude_file(sp, f):
"""
Exclude discovered files if they match the special exclude_
search pattern keys
"""
# Make everything a list if it isn't already
for k in sp:
if k in ['exclude_fn', 'exclude_fn_re' 'exclude_contents', 'exclude_contents_re']:
if not isinstance(sp[k], list):
sp[k] = [sp[k]]
# Search by file name (glob)
if 'exclude_fn' in sp:
for pat in sp['exclude_fn']:
if fnmatch.fnmatch(f['fn'], pat):
return True
# Search by file name (regex)
if 'exclude_fn_re' in sp:
for pat in sp['exclude_fn_re']:
if re.match( pat, f['fn']):
return True
# Search the contents of the file
if 'exclude_contents' in sp or 'exclude_contents_re' in sp:
# Compile regex patterns if we have any
if 'exclude_contents_re' in sp:
sp['exclude_contents_re'] = [re.compile(pat) for pat in sp['exclude_contents_re']]
with io.open (os.path.join(f['root'],f['fn']), "r", encoding='utf-8') as fh:
for line in fh:
if 'exclude_contents' in sp:
for pat in sp['exclude_contents']:
if pat in line:
return True
if 'exclude_contents_re' in sp:
for pat in sp['exclude_contents_re']:
if re.search(pat, line):
return True
return False | 0.004676 |
def validate(text, file, schema_type):
"""Validate JSON input using dependencies-schema"""
content = None
if text:
print('Validating text input...')
content = text
if file:
print('Validating file input...')
content = file.read()
if content is None:
click.secho('Please give either text input or a file path. See help for more details.', fg='red')
exit(1)
try:
if schema_type == 'dependencies':
validator = DependenciesSchemaValidator()
elif schema_type == 'actions':
validator = ActionsSchemaValidator()
else:
raise Exception('Unknown type')
validator.validate_json(content)
click.secho('Valid JSON schema!', fg='green')
except Exception as e:
click.secho('Invalid JSON schema!', fg='red')
raise e | 0.002299 |
async def fetchval(self, query, *args, column=0, timeout=None):
"""Run a query and return a value in the first row.
Pool performs this operation using one of its connections. Other than
that, it behaves identically to
:meth:`Connection.fetchval() <connection.Connection.fetchval>`.
.. versionadded:: 0.10.0
"""
async with self.acquire() as con:
return await con.fetchval(
query, *args, column=column, timeout=timeout) | 0.003968 |
def native_to_win32_pathname(name):
"""
@type name: str
@param name: Native (NT) absolute pathname.
@rtype: str
@return: Win32 absolute pathname.
"""
# XXX TODO
# There are probably some native paths that
# won't be converted by this naive approach.
if name.startswith(compat.b("\\")):
if name.startswith(compat.b("\\??\\")):
name = name[4:]
elif name.startswith(compat.b("\\SystemRoot\\")):
system_root_path = os.environ['SYSTEMROOT']
if system_root_path.endswith('\\'):
system_root_path = system_root_path[:-1]
name = system_root_path + name[11:]
else:
for drive_number in compat.xrange(ord('A'), ord('Z') + 1):
drive_letter = '%c:' % drive_number
try:
device_native_path = win32.QueryDosDevice(drive_letter)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror in (win32.ERROR_FILE_NOT_FOUND, \
win32.ERROR_PATH_NOT_FOUND):
continue
raise
if not device_native_path.endswith(compat.b('\\')):
device_native_path += compat.b('\\')
if name.startswith(device_native_path):
name = drive_letter + compat.b('\\') + \
name[ len(device_native_path) : ]
break
return name | 0.004154 |
def objects_to_td64ns(data, unit="ns", errors="raise"):
"""
Convert a object-dtyped or string-dtyped array into an
timedelta64[ns]-dtyped array.
Parameters
----------
data : ndarray or Index
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
errors : {"raise", "coerce", "ignore"}, default "raise"
How to handle elements that cannot be converted to timedelta64[ns].
See ``pandas.to_timedelta`` for details.
Returns
-------
numpy.ndarray : timedelta64[ns] array converted from data
Raises
------
ValueError : Data cannot be converted to timedelta64[ns].
Notes
-----
Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause
errors to be ignored; they are caught and subsequently ignored at a
higher level.
"""
# coerce Index to np.ndarray, converting string-dtype if necessary
values = np.array(data, dtype=np.object_, copy=False)
result = array_to_timedelta64(values,
unit=unit, errors=errors)
return result.view('timedelta64[ns]') | 0.000883 |
def expectation_payload(prep_prog, operator_programs, random_seed):
"""REST payload for :py:func:`ForestConnection._expectation`"""
if operator_programs is None:
operator_programs = [Program()]
if not isinstance(prep_prog, Program):
raise TypeError("prep_prog variable must be a Quil program object")
payload = {'type': TYPE_EXPECTATION,
'state-preparation': prep_prog.out(),
'operators': [x.out() for x in operator_programs]}
if random_seed is not None:
payload['rng-seed'] = random_seed
return payload | 0.001709 |
def sync(collector):
"""Sync an environment"""
amazon = collector.configuration['amazon']
aws_syncr = collector.configuration['aws_syncr']
# Convert everything before we try and sync anything
log.info("Converting configuration")
converted = {}
for thing in collector.configuration["__registered__"]:
if thing in collector.configuration:
converted[thing] = collector.configuration[thing]
# Do the sync
for typ in collector.configuration["__registered__"]:
if typ in converted:
thing = converted[typ]
if not aws_syncr.artifact or aws_syncr.artifact == typ:
log.info("Syncing {0}".format(typ))
for name, item in thing.items.items():
thing.sync_one(aws_syncr, amazon, item)
if not amazon.changes:
log.info("No changes were made!!") | 0.001134 |
def list_directories_and_files(self, share_name, directory_name=None, **kwargs):
"""
Return the list of directories and files stored on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.list_directories_and_files()` takes.
:type kwargs: object
:return: A list of files and directories
:rtype: list
"""
return self.connection.list_directories_and_files(share_name,
directory_name,
**kwargs) | 0.003866 |
def stopped(self):
"""Return if the stream is stopped."""
if self.tune and self.tune.get('@stopped'):
return True if self.tune.get('@stopped') == 'true' else False
else:
raise PyMediaroomError("No information in <node> about @stopped") | 0.007067 |
def move_all(self, from_dict, to_dict):
'''Move everything from one dictionary to another.
This can be expensive if the source dictionary is large.
This always requires a session lock.
:param str from_dict: source dictionary
:param str to_dict: destination dictionary
'''
if self._session_lock_identifier is None:
raise ProgrammerError('must acquire lock first')
conn = redis.Redis(connection_pool=self.pool)
script = conn.register_script('''
if redis.call("get", KEYS[1]) == ARGV[1]
then
local count = 0
local keys = redis.call('ZRANGE', KEYS[3], 0, -1)
for i, next_key in ipairs(keys) do
-- get the value and priority for this key
local next_val = redis.call("hget", KEYS[2], next_key)
local next_priority = redis.call("zscore", KEYS[3], next_key)
-- remove item of from_dict
redis.call("zrem", KEYS[3], next_key)
-- also remove from hash
redis.call("hdel", KEYS[2], next_key)
-- put it in to_dict
redis.call("hset", KEYS[4], next_key, next_val)
redis.call("zadd", KEYS[5], next_priority, next_key)
count = count + 1
end
return count
else
-- ERROR: No longer own the lock
return 0
end
''')
num_moved = script(keys=[self._lock_name,
self._namespace(from_dict),
self._namespace(from_dict) + 'keys',
self._namespace(to_dict),
self._namespace(to_dict) + 'keys'],
args=[self._session_lock_identifier])
return num_moved | 0.001596 |
def fromXml( parent, xml, actions = None ):
"""
Generates an XMenu from the inputed xml data and returns the resulting \
menu. If no action dictionary is supplied, it will be generated based \
on the parents actions.
:param parent | <QWidget>
xml | <xml.etree.ElementTree.Element>
actions | {<str> name: <QAction>, } || None
:return <XMenu> || None
"""
# generate the actions off the parent
if ( actions is None ):
actions = {}
for action in parent.actions():
key = nativestring(action.objectName())
if not key:
key = nativestring(action.text())
if not key:
continue
actions[key] = action
# create a new menu
menu = XMenu(parent)
menu.setIcon(QIcon(resources.find('img/folder.png')))
menu.setTitle(xml.get('title', ''))
for xaction in xml:
if xaction.tag == 'separator':
menu.addSeparator()
elif xaction.tag == 'menu':
menu.addMenu(XMenu.fromXml(menu, xaction, actions))
else:
action = actions.get(xaction.get('name', ''))
if action:
menu.addAction(action)
return menu | 0.012138 |
def format_decimal(decimal):
"""
Formats a decimal number
:param decimal: the decimal value
:return: the formatted string value
"""
# strip trailing fractional zeros
normalized = decimal.normalize()
sign, digits, exponent = normalized.as_tuple()
if exponent >= 1:
normalized = normalized.quantize(1)
return str(normalized) | 0.002695 |
def waverange(self):
"""Range of `waveset`."""
if self.waveset is None:
x = [None, None]
else:
x = u.Quantity([self.waveset.min(), self.waveset.max()])
return x | 0.009259 |
def do_init(self, fs_settings, global_quota):
fs_settings = deepcopy(fs_settings) # because we store some of the info, we need a deep copy
'''
If the same restrictions are applied for many destinations, we use the same job to avoid processing
files twice
'''
for sender_spec in fs_settings.sender_specs:
restrictions = sender_spec.restrictions
if restrictions in self.restriction_to_job:
self.restriction_to_job[restrictions].add_destinations(sender_spec.destinations)
else:
compressor = _CompressorJob(
next_task=self.get_next_task(),
sender_spec=sender_spec,
tmp_file_parts_basepath=fs_settings.tmp_file_parts_basepath,
should_split_small_files=fs_settings.should_split_small_files,
global_quota=global_quota)
self.restriction_to_job[restrictions] = compressor
compressor.register(self) | 0.00675 |
def _set_mac_group_entry(self, v, load=False):
"""
Setter method for mac_group_entry, mapped from YANG variable /mac_group/mac_group_entry (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_group_entry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_group_entry() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("entry_address",mac_group_entry.mac_group_entry, yang_name="mac-group-entry", rest_name="mac", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='entry-address', extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}), is_container='list', yang_name="mac-group-entry", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_group_entry must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("entry_address",mac_group_entry.mac_group_entry, yang_name="mac-group-entry", rest_name="mac", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='entry-address', extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}), is_container='list', yang_name="mac-group-entry", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add mac-address to the mac-group.\nMac mask is optional.', u'cli-no-key-completion': None, u'callpoint': u'mac-group-entry-config', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'alt-name': u'mac'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)""",
})
self.__mac_group_entry = t
if hasattr(self, '_set'):
self._set() | 0.003502 |
def main():
"""Main function for the deprecated 'sl' command."""
print("ERROR: Use the 'slcli' command instead.", file=sys.stderr)
print("> slcli %s" % ' '.join(sys.argv[1:]), file=sys.stderr)
exit(-1) | 0.004608 |
def MergeLines(self, lines, message):
"""Merges a text representation of a protocol message into a message."""
self._allow_multiple_scalars = True
self._ParseOrMerge(lines, message)
return message | 0.004717 |
def _set_ldp_sync_info(self, v, load=False):
"""
Setter method for ldp_sync_info, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_sync_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_sync_info() directly.
YANG Description: ISIS LDP sync info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ldp_sync_info.ldp_sync_info, is_container='container', presence=False, yang_name="ldp-sync-info", rest_name="ldp-sync-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-ldp-sync-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_sync_info must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ldp_sync_info.ldp_sync_info, is_container='container', presence=False, yang_name="ldp-sync-info", rest_name="ldp-sync-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-ldp-sync-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__ldp_sync_info = t
if hasattr(self, '_set'):
self._set() | 0.005423 |
def copy(self, memo):
"""
Make a copy of this SimAbstractMemory object
:return:
"""
am = SimAbstractMemory(
memory_id=self._memory_id,
endness=self.endness,
stack_region_map=self._stack_region_map,
generic_region_map=self._generic_region_map
)
for region_id, region in self._regions.items():
am._regions[region_id] = region.copy(memo)
am._stack_size = self._stack_size
return am | 0.003922 |
def pication(rings, pos_charged, protcharged):
"""Return all pi-Cation interaction between aromatic rings and positively charged groups.
For tertiary and quaternary amines, check also the angle between the ring and the nitrogen.
"""
data = namedtuple(
'pication', 'ring charge distance offset type restype resnr reschain restype_l resnr_l reschain_l protcharged')
pairings = []
if len(rings) == 0 or len(pos_charged) == 0:
return pairings
for ring in rings:
c = ring.center
for p in pos_charged:
d = euclidean3d(c, p.center)
# Project the center of charge into the ring and measure distance to ring center
proj = projection(ring.normal, ring.center, p.center)
offset = euclidean3d(proj, ring.center)
if not config.MIN_DIST < d < config.PICATION_DIST_MAX or not offset < config.PISTACK_OFFSET_MAX:
continue
if type(p).__name__ == 'lcharge' and p.fgroup == 'tertamine':
# Special case here if the ligand has a tertiary amine, check an additional angle
# Otherwise, we might have have a pi-cation interaction 'through' the ligand
n_atoms = [a_neighbor for a_neighbor in OBAtomAtomIter(p.atoms[0].OBAtom)]
n_atoms_coords = [(a.x(), a.y(), a.z()) for a in n_atoms]
amine_normal = np.cross(vector(n_atoms_coords[0], n_atoms_coords[1]),
vector(n_atoms_coords[2], n_atoms_coords[0]))
b = vecangle(ring.normal, amine_normal)
# Smallest of two angles, depending on direction of normal
a = min(b, 180 - b if not 180 - b < 0 else b)
if not a > 30.0:
resnr, restype = whichresnumber(ring.atoms[0]), whichrestype(ring.atoms[0])
reschain = whichchain(ring.atoms[0])
resnr_l, restype_l = whichresnumber(p.orig_atoms[0]), whichrestype(p.orig_atoms[0])
reschain_l = whichchain(p.orig_atoms[0])
contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular',
restype=restype, resnr=resnr, reschain=reschain,
restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l,
protcharged=protcharged)
pairings.append(contact)
break
resnr = whichresnumber(p.atoms[0]) if protcharged else whichresnumber(ring.atoms[0])
resnr_l = whichresnumber(ring.orig_atoms[0]) if protcharged else whichresnumber(p.orig_atoms[0])
restype = whichrestype(p.atoms[0]) if protcharged else whichrestype(ring.atoms[0])
restype_l = whichrestype(ring.orig_atoms[0]) if protcharged else whichrestype(p.orig_atoms[0])
reschain = whichchain(p.atoms[0]) if protcharged else whichchain(ring.atoms[0])
reschain_l = whichchain(ring.orig_atoms[0]) if protcharged else whichchain(p.orig_atoms[0])
contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype,
resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l,
reschain_l=reschain_l, protcharged=protcharged)
pairings.append(contact)
return filter_contacts(pairings) | 0.006955 |
def _api_key_patch_add(conn, apiKey, pvlist):
'''
the add patch operation for a list of (path, value) tuples on an ApiKey resource list path
'''
response = conn.update_api_key(apiKey=apiKey,
patchOperations=_api_key_patchops('add', pvlist))
return response | 0.009646 |
def update_child_calls(self):
"""Replace child nodes on original function call with their partials"""
for node in filter(lambda n: len(n.arg_name), self.child_list):
self.data["bound_args"].arguments[node.arg_name] = node.partial()
self.updated = True | 0.006944 |
def _get_on_trixel_sources_from_database_query(
self):
"""*generate the mysql query before executing it*
"""
self.log.debug(
'completed the ````_get_on_trixel_sources_from_database_query`` method')
tableName = self.tableName
raCol = self.raCol
decCol = self.decCol
radiusArc = self.radius
radius = self.radius / (60. * 60.)
# GET ALL THE TRIXELS REQUIRED
trixelArray = self._get_trixel_ids_that_overlap_conesearch_circles()
if trixelArray.size > 50000 and self.htmDepth == 16:
self.htmDepth = 13
self.mesh = HTM(
depth=self.htmDepth,
log=self.log
)
trixelArray = self._get_trixel_ids_that_overlap_conesearch_circles()
if trixelArray.size > 50000 and self.htmDepth == 13:
self.htmDepth = 10
self.mesh = HTM(
depth=self.htmDepth,
log=self.log
)
trixelArray = self._get_trixel_ids_that_overlap_conesearch_circles()
htmLevel = "htm%sID" % self.htmDepth
if trixelArray.size > 150000:
self.log.info(
"Your search radius of the `%(tableName)s` table may be too large (%(radiusArc)s arcsec)" % locals())
minID = np.min(trixelArray)
maxID = np.max(trixelArray)
htmWhereClause = "where %(htmLevel)s between %(minID)s and %(maxID)s " % locals(
)
else:
thesHtmIds = ",".join(np.array(map(str, trixelArray)))
htmWhereClause = "where %(htmLevel)s in (%(thesHtmIds)s)" % locals(
)
cols = self.columns[:]
if cols != "*" and raCol.lower() not in cols.lower():
cols += ", " + raCol
if cols != "*" and decCol.lower() not in cols.lower():
cols += ", " + decCol
# FINALLY BUILD THE FULL QUERY
if self.distinct:
sqlQuery = """select DISTINCT %(cols)s from %(tableName)s %(htmWhereClause)s""" % locals(
)
else:
sqlQuery = """select %(cols)s from %(tableName)s %(htmWhereClause)s""" % locals(
)
if self.sqlWhere and len(self.sqlWhere):
sqlQuery += " and " + self.sqlWhere
self.log.debug(
'completed the ``_get_on_trixel_sources_from_database_query`` method')
return sqlQuery | 0.004093 |
def _get_ess(sample_array):
"""Compute the effective sample size for a 2D array."""
shape = sample_array.shape
if len(shape) != 2:
raise TypeError("Effective sample size calculation requires 2 dimensional arrays.")
n_chain, n_draws = shape
if n_chain <= 1:
raise TypeError("Effective sample size calculation requires multiple chains.")
acov = np.asarray([_autocov(sample_array[chain]) for chain in range(n_chain)])
chain_mean = sample_array.mean(axis=1)
chain_var = acov[:, 0] * n_draws / (n_draws - 1.0)
acov_t = acov[:, 1] * n_draws / (n_draws - 1.0)
mean_var = np.mean(chain_var)
var_plus = mean_var * (n_draws - 1.0) / n_draws
var_plus += np.var(chain_mean, ddof=1)
rho_hat_t = np.zeros(n_draws)
rho_hat_even = 1.0
rho_hat_t[0] = rho_hat_even
rho_hat_odd = 1.0 - (mean_var - np.mean(acov_t)) / var_plus
rho_hat_t[1] = rho_hat_odd
# Geyer's initial positive sequence
max_t = 1
t = 1
while t < (n_draws - 2) and (rho_hat_even + rho_hat_odd) >= 0.0:
rho_hat_even = 1.0 - (mean_var - np.mean(acov[:, t + 1])) / var_plus
rho_hat_odd = 1.0 - (mean_var - np.mean(acov[:, t + 2])) / var_plus
if (rho_hat_even + rho_hat_odd) >= 0:
rho_hat_t[t + 1] = rho_hat_even
rho_hat_t[t + 2] = rho_hat_odd
max_t = t + 2
t += 2
# Geyer's initial monotone sequence
t = 3
while t <= max_t - 2:
if (rho_hat_t[t + 1] + rho_hat_t[t + 2]) > (rho_hat_t[t - 1] + rho_hat_t[t]):
rho_hat_t[t + 1] = (rho_hat_t[t - 1] + rho_hat_t[t]) / 2.0
rho_hat_t[t + 2] = rho_hat_t[t + 1]
t += 2
ess = (
int((n_chain * n_draws) / (-1.0 + 2.0 * np.sum(rho_hat_t)))
if not np.any(np.isnan(rho_hat_t))
else np.nan
)
return ess | 0.00272 |
def _load_nucmer_hits(self, infile):
'''Returns dict ref name => list of nucmer hits from infile'''
hits = {}
file_reader = pymummer.coords_file.reader(infile)
for al in file_reader:
if al.ref_name not in hits:
hits[al.ref_name] = []
hits[al.ref_name].append(al)
return hits | 0.00565 |
def untrim(self, n='all'):
"""
Removes xmin, xmax, ymin, and ymax.
Parameters
----------
n='all'
Which data set to perform this action upon. 'all' means all data
sets, or you can specify a list.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0:
self._error("No data. Please use set_data() and plot() prior to zooming.")
return
if _s.fun.is_a_number(n): n = [n]
elif isinstance(n,str): n = list(range(len(self._set_xdata)))
# loop over the specified plots
for i in n:
self['xmin'][i] = None
self['xmax'][i] = None
self['ymin'][i] = None
self['ymax'][i] = None
# now show the update.
self.clear_results()
if self['autoplot']: self.plot()
return self | 0.014396 |
def create_user(self, name, password):
"""Create user with hashed password."""
hashed_password = self._password_hasher(password)
return dict(name=name, password=hashed_password) | 0.00995 |
def build(self):
"""
Create the current layer
:return: string of the packet with the payload
"""
p = self.do_build()
p += self.build_padding()
p = self.build_done(p)
return p | 0.008368 |
def popen_xboard(cls, command: Union[str, List[str]], *, timeout: Optional[float] = 10.0, debug: bool = False, setpgrp: bool = False, **popen_args: Any) -> "SimpleEngine":
"""
Spawns and initializes an XBoard engine.
Returns a :class:`~chess.engine.SimpleEngine` instance.
"""
return cls.popen(XBoardProtocol, command, timeout=timeout, debug=debug, setpgrp=setpgrp, **popen_args) | 0.009547 |
def raw_clean(self, datas):
"""
Apply a cleaning on raw datas.
"""
datas = strip_tags(datas) # Remove HTML
datas = STOP_WORDS.rebase(datas, '') # Remove STOP WORDS
datas = PUNCTUATION.sub('', datas) # Remove punctuation
datas = datas.lower()
return [d for d in datas.split() if len(d) > 1] | 0.00542 |
def is_colliding(self, other):
"""Check to see if two AABoundingBoxes are colliding."""
if isinstance(other, AABoundingBox):
if self.rect.colliderect(other.rect):
return True
return False | 0.00823 |
def compact(self, term_doc_matrix):
'''
Parameters
----------
term_doc_matrix : TermDocMatrix
Term document matrix object to compact
Returns
-------
New term doc matrix
'''
domain_mat = CombineDocsIntoDomains(term_doc_matrix).get_new_term_doc_mat(self.doc_domains)
domain_count = (domain_mat > 0).sum(axis=0)
valid_term_mask = (self.max_domain_count >= domain_count) \
& (domain_count >= self.min_domain_count)
indices_to_compact = np.arange(term_doc_matrix.get_num_terms())[~valid_term_mask.A1]
return term_doc_matrix.remove_terms_by_indices(indices_to_compact) | 0.034314 |
def end_container(self, cancel=None):
"""Finishes and registers the currently-active container, unless
'cancel' is True."""
if not self._containers:
return
container = self._containers.pop()
if len(self._containers) >= 1:
parent = self._containers[-1]
else:
parent = self._doc.text
if not cancel:
parent.addElement(container) | 0.004662 |
async def handle_event(self, event: "node.LavalinkEvents", extra):
"""
Handles various Lavalink Events.
If the event is TRACK END, extra will be TrackEndReason.
If the event is TRACK EXCEPTION, extra will be the string reason.
If the event is TRACK STUCK, extra will be the threshold ms.
Parameters
----------
event : node.LavalinkEvents
extra
"""
if event == LavalinkEvents.TRACK_END:
if extra == TrackEndReason.FINISHED:
await self.play()
else:
self._is_playing = False | 0.003231 |
def _to_ndarray(self, a):
"""Casts Python lists and tuples to a numpy array or raises an AssertionError."""
if isinstance(a, (list, tuple)):
a = numpy.array(a)
if not is_ndarray(a):
raise TypeError("Expected an ndarray but got object of type '{}' instead".format(type(a)))
return a | 0.011765 |
def normal_fields(self):
"""fields that aren't magic (eg, aren't _id, _created, _updated)"""
return {f:v for f, v in self.fields.items() if not f.startswith('_')} | 0.016854 |
def _get_path(name, settings, mkdir=True):
"""
Generate a project path.
"""
default_projects_path = settings.config.get("projects_path")
path = None
if default_projects_path:
path = raw_input("\nWhere would you like to create this project? [{0}/{1}] ".format(default_projects_path, name))
if not path:
path = os.path.join(default_projects_path, name)
else:
while not path:
path = raw_input("\nWhere would you like to create this project? (e.g. ~/tarbell/) ")
return os.path.expanduser(path) | 0.005245 |
def random_int(maximum_value):
""" Random generator (PyCrypto getrandbits wrapper). The result is a non-negative value.
:param maximum_value: maximum integer value
:return: int
"""
if maximum_value == 0:
return 0
elif maximum_value == 1:
return random_bits(1)
bits = math.floor(math.log2(maximum_value))
result = random_bits(bits) + random_int(maximum_value - ((2 ** bits) - 1))
return result | 0.031941 |
def drug_timelines(
drug_events_df: DataFrame,
event_lasts_for: datetime.timedelta,
patient_colname: str = DEFAULT_PATIENT_COLNAME,
event_datetime_colname: str = DEFAULT_DRUG_EVENT_DATETIME_COLNAME) \
-> Dict[Any, IntervalList]:
"""
Takes a set of drug event start times (one or more per patient), plus a
fixed time that each event is presumed to last for, and returns an
:class:`IntervalList` for each patient representing the set of events
(which may overlap, in which case they will be amalgamated).
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data
event_lasts_for:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
patient_colname:
name of the column in ``drug_events_df`` containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
Returns:
dict: mapping patient ID to a :class:`IntervalList` object indicating
the amalgamated intervals from the events
"""
sourcecolnum_pt = drug_events_df.columns.get_loc(patient_colname)
sourcecolnum_when = drug_events_df.columns.get_loc(event_datetime_colname)
timelines = defaultdict(IntervalList)
nrows = len(drug_events_df)
for rowidx in range(nrows):
patient_id = drug_events_df.iat[rowidx, sourcecolnum_pt]
event_when = drug_events_df.iat[rowidx, sourcecolnum_when]
interval = Interval(event_when, event_when + event_lasts_for)
ivlist = timelines[patient_id] # will create if unknown
ivlist.add(interval)
return timelines | 0.000509 |
def ssim(data, ground_truth, size=11, sigma=1.5, K1=0.01, K2=0.03,
dynamic_range=None, normalized=False, force_lower_is_better=False):
r"""Structural SIMilarity between ``data`` and ``ground_truth``.
The SSIM takes value -1 for maximum dissimilarity and +1 for maximum
similarity.
See also `this Wikipedia article
<https://en.wikipedia.org/wiki/Structural_similarity>`_.
Parameters
----------
data : `array-like`
Input data to compare to the ground truth.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
size : odd int, optional
Size in elements per axis of the Gaussian window that is used
for all smoothing operations.
sigma : positive float, optional
Width of the Gaussian function used for smoothing.
K1, K2 : positive float, optional
Small constants to stabilize the result. See [Wan+2004] for details.
dynamic_range : nonnegative float, optional
Difference between the maximum and minimum value that the pixels
can attain. Use 255 if pixel range is :math:`[0, 255]` and 1 if
it is :math:`[0, 1]`. Default: `None`, obtain maximum and minimum
from the ground truth.
normalized : bool, optional
If ``True``, the output values are mapped to the interval
:math:`[0, 1]` (see `Notes` for details), otherwise return the
original SSIM.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches by returning the negative of the SSIM, otherwise the (possibly
normalized) SSIM is returned. If both `normalized` and
`force_lower_is_better` are ``True``, then the order is reversed before
mapping the outputs, so that the latter are still in the interval
:math:`[0, 1]`.
Returns
-------
ssim : float
FOM value, where a higher value means a better match
if `force_lower_is_better` is ``False``.
Notes
-----
The SSIM is computed on small windows and then averaged over the whole
image. The SSIM between two windows :math:`x` and :math:`y` of size
:math:`N \times N`
.. math::
SSIM(x,y) = \frac{(2\mu_x\mu_y + c_1)(2\sigma_{xy} + c_2)}
{(\mu_x^2 + \mu_y^2 + c_1)(\sigma_x^2 + \sigma_y^2 + c_2)}
where:
* :math:`\mu_x`, :math:`\mu_y` is the mean of :math:`x` and :math:`y`,
respectively.
* :math:`\sigma_x`, :math:`\sigma_y` is the standard deviation of
:math:`x` and :math:`y`, respectively.
* :math:`\sigma_{xy}` the covariance of :math:`x` and :math:`y`
* :math:`c_1 = (k_1L)^2`, :math:`c_2 = (k_2L)^2` where :math:`L` is the
dynamic range of the image.
The unnormalized values are contained in the interval :math:`[-1, 1]`,
where 1 corresponds to a perfect match. The normalized values are given by
.. math::
SSIM_{normalized}(x, y) = \frac{SSIM(x, y) + 1}{2}
References
----------
[Wan+2004] Wang, Z, Bovik, AC, Sheikh, HR, and Simoncelli, EP.
*Image Quality Assessment: From Error Visibility to Structural Similarity*.
IEEE Transactions on Image Processing, 13.4 (2004), pp 600--612.
"""
from scipy.signal import fftconvolve
data = np.asarray(data)
ground_truth = np.asarray(ground_truth)
# Compute gaussian on a `size`-sized grid in each axis
coords = np.linspace(-(size - 1) / 2, (size - 1) / 2, size)
grid = sparse_meshgrid(*([coords] * data.ndim))
window = np.exp(-(sum(xi ** 2 for xi in grid) / (2.0 * sigma ** 2)))
window /= np.sum(window)
def smoothen(img):
"""Smoothes an image by convolving with a window function."""
return fftconvolve(window, img, mode='valid')
if dynamic_range is None:
dynamic_range = np.max(ground_truth) - np.min(ground_truth)
C1 = (K1 * dynamic_range) ** 2
C2 = (K2 * dynamic_range) ** 2
mu1 = smoothen(data)
mu2 = smoothen(ground_truth)
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = smoothen(data * data) - mu1_sq
sigma2_sq = smoothen(ground_truth * ground_truth) - mu2_sq
sigma12 = smoothen(data * ground_truth) - mu1_mu2
num = (2 * mu1_mu2 + C1) * (2 * sigma12 + C2)
denom = (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
pointwise_ssim = num / denom
result = np.mean(pointwise_ssim)
if force_lower_is_better:
result = -result
if normalized:
result = (result + 1.0) / 2.0
return result | 0.000218 |
def representation_function_compiler(self, func_name):
"""Generic function can be used to compile __repr__ or __unicode__ or __str__"""
def get_col_accessor(col):
return ALCHEMY_TEMPLATES.col_accessor.safe_substitute(col=col)
def get_col_evaluator(col):
return ALCHEMY_TEMPLATES.col_evaluator.safe_substitute(col=col)
col_evaluators = ", ".join([get_col_evaluator(n) for n in self.primary_keys])
col_accessors = ", ".join([get_col_accessor(n) for n in self.primary_keys])
return ALCHEMY_TEMPLATES.representor_function.safe_substitute(func_name=func_name,
col_accessors=col_accessors,
col_evaluators=col_evaluators,
class_name=self.class_name) | 0.009698 |
def verify(xml):
'''Verifies whether the validity of the signature contained in an XML
document following the XMLDSig standard'''
try:
from cStringIO import cStringIO as StringIO
except ImportError:
from StringIO import StringIO
xml = xml.encode('utf-8', 'xmlcharrefreplace')
tree = etree.parse(StringIO(xml))
try:
'''Isolating the Signature element'''
signature_tree = tree.xpath('xmldsig:Signature',
namespaces=NAMESPACES)[0]
'''Removing the Signature element from the main tree to compute a new
digest value with the actual content of the XML.'''
signature_tree.getparent().remove(signature_tree)
signed_info = _generate_signed_info(etree.tostring(tree))
'''Constructing the key from the Modulus and Exponent stored in the
Signature/KeyInfo/KeyValue/RSAKeyValue element'''
from Crypto.PublicKey import RSA
modulus = signature_tree.xpath(KEY_INFO_PATH + '/xmldsig:Modulus',
namespaces=NAMESPACES)[0].text
exponent = signature_tree.xpath(KEY_INFO_PATH + '/xmldsig:Exponent',
namespaces=NAMESPACES)[0].text
public_key = RSA.construct((b64d(modulus), b64d(exponent)))
'''Isolating the signature value to verify'''
signature = signature_tree.xpath('xmldsig:SignatureValue',
namespaces=NAMESPACES)[0].text
except IndexError:
raise RuntimeError('XML does not contain a properly formatted ' \
' Signature element')
verifier = PKCS1_v1_5.PKCS115_SigScheme(public_key)
return verifier.verify(SHA.new(c14n(signed_info)),
binascii.a2b_base64(signature)) | 0.006293 |
def row_absent(name, db, table, where_sql, where_args=None):
'''
Makes sure the specified row is absent in db. If multiple rows
match where_sql, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql)
else:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = "More than one row matched the specified query"
elif len(rows) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be removed in " + table
changes['changes']['old'] = rows[0]
else:
if where_args is None:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql)
else:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql,
where_args)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row removed"
changes['changes']['old'] = rows[0]
else:
changes['result'] = False
changes['comment'] = "Unable to remove row"
else:
changes['result'] = True
changes['comment'] = 'Row is absent'
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes | 0.000818 |
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
query = query.replace(',', ' ')
vals = query.split()
coords = [float(v) for v in vals]
return tuple(coords[:2]) | 0.003247 |
def to_csv(self, path, iamc_index=False, **kwargs):
"""Write timeseries data to a csv file
Parameters
----------
path: string
file path
iamc_index: bool, default False
if True, use `['model', 'scenario', 'region', 'variable', 'unit']`;
else, use all `data` columns
"""
self._to_file_format(iamc_index).to_csv(path, index=False, **kwargs) | 0.004651 |
def token_auto_auth(func):
"""Wrap class methods with automatic token re-authentication.
This wrapper will detect authentication failures coming from its wrapped method. When one is
caught, it will request a new token, and simply replay the original request.
The one constraint that this wrapper has is that the wrapped method's class must have the
:py:class:`objectrocket.client.Client` object embedded in it as the property ``_client``. Such
is the design of all current client operations layers.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
response = func(self, *args, **kwargs)
# If auth failure occurs, attempt to re-authenticate and replay once at most.
except errors.AuthFailure:
# Request to have authentication refreshed.
self._client.auth._refresh()
# Replay original request.
response = func(self, *args, **kwargs)
return response
# TODO(TheDodd): match func call signature and docs.
return wrapper | 0.005571 |
def load_cropped_svhn(path='data', include_extra=True):
"""Load Cropped SVHN.
The Cropped Street View House Numbers (SVHN) Dataset contains 32x32x3 RGB images.
Digit '1' has label 1, '9' has label 9 and '0' has label 0 (the original dataset uses 10 to represent '0'), see `ufldl website <http://ufldl.stanford.edu/housenumbers/>`__.
Parameters
----------
path : str
The path that the data is downloaded to.
include_extra : boolean
If True (default), add extra images to the training set.
Returns
-------
X_train, y_train, X_test, y_test: tuple
Return splitted training/test set respectively.
Examples
---------
>>> X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)
>>> tl.vis.save_images(X_train[0:100], [10, 10], 'svhn.png')
"""
start_time = time.time()
path = os.path.join(path, 'cropped_svhn')
logging.info("Load or Download Cropped SVHN > {} | include extra images: {}".format(path, include_extra))
url = "http://ufldl.stanford.edu/housenumbers/"
np_file = os.path.join(path, "train_32x32.npz")
if file_exists(np_file) is False:
filename = "train_32x32.mat"
filepath = maybe_download_and_extract(filename, path, url)
mat = sio.loadmat(filepath)
X_train = mat['X'] / 255.0 # to [0, 1]
X_train = np.transpose(X_train, (3, 0, 1, 2))
y_train = np.squeeze(mat['y'], axis=1)
y_train[y_train == 10] = 0 # replace 10 to 0
np.savez(np_file, X=X_train, y=y_train)
del_file(filepath)
else:
v = np.load(np_file)
X_train = v['X']
y_train = v['y']
logging.info(" n_train: {}".format(len(y_train)))
np_file = os.path.join(path, "test_32x32.npz")
if file_exists(np_file) is False:
filename = "test_32x32.mat"
filepath = maybe_download_and_extract(filename, path, url)
mat = sio.loadmat(filepath)
X_test = mat['X'] / 255.0
X_test = np.transpose(X_test, (3, 0, 1, 2))
y_test = np.squeeze(mat['y'], axis=1)
y_test[y_test == 10] = 0
np.savez(np_file, X=X_test, y=y_test)
del_file(filepath)
else:
v = np.load(np_file)
X_test = v['X']
y_test = v['y']
logging.info(" n_test: {}".format(len(y_test)))
if include_extra:
logging.info(" getting extra 531131 images, please wait ...")
np_file = os.path.join(path, "extra_32x32.npz")
if file_exists(np_file) is False:
logging.info(" the first time to load extra images will take long time to convert the file format ...")
filename = "extra_32x32.mat"
filepath = maybe_download_and_extract(filename, path, url)
mat = sio.loadmat(filepath)
X_extra = mat['X'] / 255.0
X_extra = np.transpose(X_extra, (3, 0, 1, 2))
y_extra = np.squeeze(mat['y'], axis=1)
y_extra[y_extra == 10] = 0
np.savez(np_file, X=X_extra, y=y_extra)
del_file(filepath)
else:
v = np.load(np_file)
X_extra = v['X']
y_extra = v['y']
# print(X_train.shape, X_extra.shape)
logging.info(" adding n_extra {} to n_train {}".format(len(y_extra), len(y_train)))
t = time.time()
X_train = np.concatenate((X_train, X_extra), 0)
y_train = np.concatenate((y_train, y_extra), 0)
# X_train = np.append(X_train, X_extra, axis=0)
# y_train = np.append(y_train, y_extra, axis=0)
logging.info(" added n_extra {} to n_train {} took {}s".format(len(y_extra), len(y_train), time.time() - t))
else:
logging.info(" no extra images are included")
logging.info(" image size: %s n_train: %d n_test: %d" % (str(X_train.shape[1:4]), len(y_train), len(y_test)))
logging.info(" took: {}s".format(int(time.time() - start_time)))
return X_train, y_train, X_test, y_test | 0.002259 |
def set_filter(self, props_filter):
"""
Changes the current filter for the given one
:param props_filter: The new requirement filter on service properties
:raise TypeError: Unknown filter type
"""
if props_filter is not None and not (
is_string(props_filter)
or isinstance(
props_filter, (ldapfilter.LDAPFilter, ldapfilter.LDAPCriteria)
)
):
# Unknown type
raise TypeError(
"Invalid filter type {0}".format(type(props_filter).__name__)
)
if props_filter is not None:
# Filter given, keep its string form
self.__original_filter = str(props_filter)
else:
# No filter
self.__original_filter = None
# Parse the filter
self.filter = ldapfilter.get_ldap_filter(props_filter)
# Prepare the full filter
spec_filter = "({0}={1})".format(OBJECTCLASS, self.specification)
self.__full_filter = ldapfilter.combine_filters(
(spec_filter, self.filter)
) | 0.001776 |
def to_utc(a_datetime, keep_utc_tzinfo=False):
"""
Convert a time awared datetime to utc datetime.
:param a_datetime: a timezone awared datetime. (If not, then just returns)
:param keep_utc_tzinfo: whether to retain the utc time zone information.
**中文文档**
将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。
"""
if a_datetime.tzinfo:
utc_datetime = a_datetime.astimezone(utc) # convert to utc time
if keep_utc_tzinfo is False:
utc_datetime = utc_datetime.replace(tzinfo=None)
return utc_datetime
else:
return a_datetime | 0.001681 |
def __put_buttons_in_buttonframe(choices, default_choice, cancel_choice):
"""Put the buttons in the buttons frame
"""
global buttonsFrame, cancel_invoke
# TODO: I'm using a dict to hold buttons, but this could all be cleaned up if I subclass Button to hold
# all the event bindings, etc
# TODO: Break __buttonEvent out into three: regular keyboard, default
# select, and cancel select.
unique_choices = ut.uniquify_list_of_strings(choices)
# Create buttons dictionary and Tkinter widgets
buttons = dict()
for button_text, unique_button_text in zip(choices, unique_choices):
this_button = dict()
this_button['original_text'] = button_text
this_button['clean_text'], this_button[
'hotkey'], hotkey_position = ut.parse_hotkey(button_text)
this_button['widget'] = Button(buttonsFrame,
takefocus=1,
text=this_button['clean_text'],
underline=hotkey_position)
this_button['widget'].pack(
expand=YES, side=LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m')
buttons[unique_button_text] = this_button
# Bind arrows, Enter, Escape
for this_button in list(buttons.values()):
bindArrows(this_button['widget'])
for selectionEvent in st.STANDARD_SELECTION_EVENTS:
this_button['widget'].bind("<{}>".format(selectionEvent),
lambda e: __buttonEvent(
e, buttons, virtual_event='select'),
add=True)
# Assign default and cancel buttons
if cancel_choice in buttons:
buttons[cancel_choice]['cancel_choice'] = True
boxRoot.bind_all('<Escape>', lambda e: __buttonEvent(
e, buttons, virtual_event='cancel'), add=True)
boxRoot.protocol('WM_DELETE_WINDOW', lambda: __buttonEvent(
None, buttons, virtual_event='cancel'))
if default_choice in buttons:
buttons[default_choice]['default_choice'] = True
buttons[default_choice]['widget'].focus_force()
# Bind hotkeys
for hk in [button['hotkey'] for button in list(buttons.values()) if button['hotkey']]:
boxRoot.bind_all(hk, lambda e: __buttonEvent(e, buttons), add=True)
return | 0.001685 |
def treynor_ratio(self, benchmark, rf=0.02):
"""Return over `rf` per unit of systematic risk.
A measure of risk-adjusted performance that relates a
portfolio's excess returns to the portfolio's beta.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
rf : {float, TSeries, pd.Series}, default 0.02
If float, this represents an *compounded annualized*
risk-free rate; 2.0% is the default.
If a TSeries or pd.Series, this represents a time series
of periodic returns to a risk-free security.
To download a risk-free rate return series using
3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.
Returns
-------
float
"""
benchmark = _try_to_squeeze(benchmark)
if benchmark.ndim > 1:
raise ValueError("Treynor ratio requires a single benchmark")
rf = self._validate_rf(rf)
beta = self.beta(benchmark)
return (self.anlzd_ret() - rf) / beta | 0.001705 |
def find_analysis_interims(ar_or_sample):
""" This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces.
"""
interim_fields = list()
for analysis in find_analyses(ar_or_sample):
keywords = get_interims_keywords(analysis)
interim_fields.extend(keywords)
return list(set(interim_fields)) | 0.001779 |
def _extend_breaks(self, major):
"""
Append 2 extra breaks at either end of major
If breaks of transform space are non-equidistant,
:func:`minor_breaks` add minor breaks beyond the first
and last major breaks. The solutions is to extend those
breaks (in transformed space) before the minor break call
is made. How the breaks depends on the type of transform.
"""
trans = self.trans
trans = trans if isinstance(trans, type) else trans.__class__
# so far we are only certain about this extending stuff
# making sense for log transform
is_log = trans.__name__.startswith('log')
diff = np.diff(major)
step = diff[0]
if is_log and all(diff == step):
major = np.hstack([major[0]-step, major, major[-1]+step])
return major | 0.002315 |
def list_device_subscriptions(self, device_id, **kwargs):
"""Lists all subscribed resources from a single device
:param device_id: ID of the device (Required)
:returns: a list of subscribed resources
:rtype: list of str
"""
api = self._get_api(mds.SubscriptionsApi)
resp = api.get_endpoint_subscriptions(device_id, **kwargs)
return resp.split("\n") | 0.004843 |
def translations(self):
"""
Yield all six translations of a nucleotide sequence.
@return: A generator that produces six L{TranslatedRead} instances.
"""
rc = self.reverseComplement().sequence
for reverseComplemented in False, True:
for frame in 0, 1, 2:
seq = rc if reverseComplemented else self.sequence
# Get the suffix of the sequence for translation. I.e.,
# skip 0, 1, or 2 initial bases, depending on the frame.
# Note that this makes a copy of the sequence, which we can
# then safely append 'N' bases to to adjust its length to
# be zero mod 3.
suffix = seq[frame:]
lengthMod3 = len(suffix) % 3
if lengthMod3:
suffix += ('NN' if lengthMod3 == 1 else 'N')
yield TranslatedRead(self, translate(suffix), frame,
reverseComplemented) | 0.001972 |
def add_constraints(self, *args, **kwargs):
"""
Add some constraints to the state.
You may pass in any number of symbolic booleans as variadic positional arguments.
"""
if len(args) > 0 and isinstance(args[0], (list, tuple)):
raise Exception("Tuple or list passed to add_constraints!")
if o.TRACK_CONSTRAINTS in self.options and len(args) > 0:
if o.SIMPLIFY_CONSTRAINTS in self.options:
constraints = [ self.simplify(a) for a in args ]
else:
constraints = args
self._inspect('constraints', BP_BEFORE, added_constraints=constraints)
constraints = self._inspect_getattr("added_constraints", constraints)
added = self.solver.add(*constraints)
self._inspect('constraints', BP_AFTER)
# add actions for the added constraints
if o.TRACK_CONSTRAINT_ACTIONS in self.options:
for c in added:
sac = SimActionConstraint(self, c)
self.history.add_action(sac)
else:
# preserve the old action logic for when we don't track constraints (why?)
if (
'action' in kwargs and kwargs['action'] and
o.TRACK_CONSTRAINT_ACTIONS in self.options and len(args) > 0
):
for arg in args:
if self.solver.symbolic(arg):
sac = SimActionConstraint(self, arg)
self.history.add_action(sac)
if o.ABSTRACT_SOLVER in self.options and len(args) > 0:
for arg in args:
if self.solver.is_false(arg):
self._satisfiable = False
return
if self.solver.is_true(arg):
continue
# `is_true` and `is_false` does not use VSABackend currently (see commits 97a75366 and 2dfba73e in
# claripy). There is a chance that VSA backend can in fact handle it.
# Therefore we try to resolve it with VSABackend again
if claripy.backends.vsa.is_false(arg):
self._satisfiable = False
return
if claripy.backends.vsa.is_true(arg):
continue
# It's neither True or False. Let's try to apply the condition
# We take the argument, extract a list of constrained SIs out of it (if we could, of course), and
# then replace each original SI the intersection of original SI and the constrained one.
_, converted = self.solver.constraint_to_si(arg)
for original_expr, constrained_si in converted:
if not original_expr.variables:
l.error('Incorrect original_expression to replace in add_constraints(). '
'This is due to defects in VSA logics inside claripy. Please report '
'to Fish and he will fix it if he\'s free.')
continue
new_expr = constrained_si
self.registers.replace_all(original_expr, new_expr)
for _, region in self.memory.regions.items():
region.memory.replace_all(original_expr, new_expr)
l.debug("SimState.add_constraints: Applied to final state.")
elif o.SYMBOLIC not in self.options and len(args) > 0:
for arg in args:
if self.solver.is_false(arg):
self._satisfiable = False
return | 0.004077 |
def variants(ctx, snpeff):
"""Print the variants in a vcf"""
head = ctx.parent.head
vcf_handle = ctx.parent.handle
outfile = ctx.parent.outfile
silent = ctx.parent.silent
print_headers(head, outfile=outfile, silent=silent)
for line in vcf_handle:
print_variant(variant_line=line, outfile=outfile, silent=silent)
if snpeff:
variant_dict = get_variant_dict(
variant_line = line,
header_line = head.header
)
#Create a info dict:
info_dict = get_info_dict(
info_line = variant_dict['INFO']
)
snpeff_string = info_dict.get('ANN')
if snpeff_string:
#Get the snpeff annotations
snpeff_info = get_snpeff_info(
snpeff_string = snpeff_string,
snpeff_header = head.snpeff_columns
) | 0.016913 |
def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
bbox = denormalize_bbox(bbox, rows, cols)
x_min, y_min, x_max, y_max = bbox
x1, y1, x2, y2 = crop_coords
cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1]
return normalize_bbox(cropped_bbox, crop_height, crop_width) | 0.006073 |
def terminal_width():
"""Returns the terminal's width (number of character columns)."""
try:
if os.name == 'nt':
_WindowsCSBI.initialize()
return _WindowsCSBI.get_info(_WindowsCSBI.HANDLE_STDOUT)['terminal_width']
return struct.unpack('hhhh', fcntl.ioctl(0, termios.TIOCGWINSZ, '\000' * 8))[1]
except IOError:
return 80 | 0.007916 |
def shape(self):
"""
Returns (rowCount, valueCount)
"""
bf = self.copy()
content = requests.get(bf.dataset_url).json()
rowCount = content['status']['rowCount']
valueCount = content['status']['valueCount']
return (rowCount, valueCount) | 0.006689 |
def dyndns_add(nameserver, name, rdata, type="A", ttl=10):
"""Send a DNS add message to a nameserver for "name" to have a new "rdata"
dyndns_add(nameserver, name, rdata, type="A", ttl=10) -> result code (0=ok)
example: dyndns_add("ns1.toto.com", "dyn.toto.com", "127.0.0.1")
RFC2136
"""
zone = name[name.find(".") + 1:]
r = sr1(IP(dst=nameserver) / UDP() / DNS(opcode=5,
qd=[DNSQR(qname=zone, qtype="SOA")], # noqa: E501
ns=[DNSRR(rrname=name, type="A",
ttl=ttl, rdata=rdata)]),
verbose=0, timeout=5)
if r and r.haslayer(DNS):
return r.getlayer(DNS).rcode
else:
return -1 | 0.001305 |
def filter(self, node, condition):
"""
This method accepts a node and the condition function; a
generator will be returned to yield the nodes that got matched
by the condition.
"""
if not isinstance(node, Node):
raise TypeError('not a node')
for child in node:
if condition(child):
yield child
for subchild in self.filter(child, condition):
yield subchild | 0.004158 |
def download_file(self, project_name, remote_path, local_path=None):
"""
Download a file from a project
When local_path is None the file will be downloaded to the base filename
:param project_name: str: name of the project to download a file from
:param remote_path: str: remote path specifying which file to download
:param local_path: str: optional argument to customize where the file will be downloaded to
"""
project = self._get_project_for_name(project_name)
file = project.get_child_for_path(remote_path)
file.download_to_path(local_path) | 0.0064 |
def get_targets(self, predicate=None):
"""Returns the candidate targets this task should act on.
This method is a convenience for processing optional transitivity. Tasks may bypass it
and make their own decisions on which targets to act on.
NOTE: This method was introduced in 2018, so at the time of writing few tasks consult it.
Instead, they query self.context.targets directly.
TODO: Fix up existing targets to consult this method, for uniformity.
Note that returned targets have not been checked for invalidation. The caller should do
so as needed, typically by calling self.invalidated().
:API: public
"""
initial_targets = (self.context.targets(predicate) if self.act_transitively
else list(filter(predicate, self.context.target_roots)))
if not self.target_filtering_enabled:
return initial_targets
else:
return self._filter_targets(initial_targets) | 0.00733 |
def capture_url_missing_namespace(self, node):
"""
Capture missing namespace in url include.
"""
for arg in node.args:
if not(isinstance(arg, ast.Call) and isinstance(arg.func, ast.Name)):
continue
if arg.func.id != 'include':
continue
for keyword in arg.keywords:
if keyword.arg == 'namespace':
return
return DJ05(
lineno=node.lineno,
col=node.col_offset,
) | 0.005445 |
def cfset_to_set(cfset):
"""Convert CFSet to python set."""
count = cf.CFSetGetCount(cfset)
buffer = (c_void_p * count)()
cf.CFSetGetValues(cfset, byref(buffer))
return set([cftype_to_value(c_void_p(buffer[i])) for i in range(count)]) | 0.003937 |
def pageassert(func):
'''
Decorator that assert page number
'''
@wraps(func)
def wrapper(*args, **kwargs):
if args[0] < 1 or args[0] > 40:
raise ValueError('Page Number not found')
return func(*args, **kwargs)
return wrapper | 0.003623 |
def vel_disp(self, kwargs_profile, kwargs_aperture, kwargs_light, kwargs_anisotropy, num=1000):
"""
computes the averaged LOS velocity dispersion in the slit (convolved)
:param gamma:
:param phi_E:
:param r_eff:
:param r_ani:
:param R_slit:
:param FWHM:
:return:
"""
sigma_s2_sum = 0
for i in range(0, num):
sigma_s2_draw = self._vel_disp_one(kwargs_profile, kwargs_aperture, kwargs_light, kwargs_anisotropy)
sigma_s2_sum += sigma_s2_draw
sigma_s2_average = sigma_s2_sum/num
return np.sqrt(sigma_s2_average) | 0.006221 |
def ResidualFeedForward(feature_depth,
feedforward_depth,
dropout,
mode):
"""Residual feed-forward layer with normalization at start."""
return layers.Residual(
layers.LayerNorm(),
layers.Dense(feedforward_depth),
layers.Relu(),
layers.Dropout(rate=dropout, mode=mode),
layers.Dense(feature_depth),
layers.Dropout(rate=dropout, mode=mode)
) | 0.006608 |
def _xdate_setter(self, xdate_format='%Y-%m-%d'):
"""Makes x axis a date axis with auto format
Parameters
----------
xdate_format: String
\tSets date formatting
"""
if xdate_format:
# We have to validate xdate_format. If wrong then bail out.
try:
self.autofmt_xdate()
datetime.date(2000, 1, 1).strftime(xdate_format)
except ValueError:
self.autofmt_xdate()
return
self.__axes.xaxis_date()
formatter = dates.DateFormatter(xdate_format)
self.__axes.xaxis.set_major_formatter(formatter) | 0.002937 |
def warp_image_by_corner_points_projection(corner_points, image):
"""Given corner points of a Sudoku, warps original selection to a square image.
:param corner_points:
:type: corner_points: list
:param image:
:type image:
:return:
:rtype:
"""
# Clarify by storing in named variables.
top_left, top_right, bottom_left, bottom_right = np.array(corner_points)
top_edge = np.linalg.norm(top_right - top_left)
bottom_edge = np.linalg.norm(bottom_right - bottom_left)
left_edge = np.linalg.norm(top_left - bottom_left)
right_edge = np.linalg.norm(top_right - bottom_right)
L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge])))
src = np.array([top_left, top_right, bottom_left, bottom_right])
dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]])
tr = ProjectiveTransform()
tr.estimate(dst, src)
warped_image = warp(image, tr, output_shape=(L, L))
out = resize(warped_image, (500, 500))
return out | 0.001978 |
def _dot_product(self, imgs_to_decode):
""" Decoding using the dot product.
"""
return np.dot(imgs_to_decode.T, self.feature_images).T | 0.012658 |
def main():
"""
Run autosub as a command-line program.
"""
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file to subtitle",
nargs='?')
parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make",
type=int, default=DEFAULT_CONCURRENCY)
parser.add_argument('-o', '--output',
help="Output path for subtitles (by default, subtitles are saved in \
the same directory and name as the source path)")
parser.add_argument('-F', '--format', help="Destination subtitle format",
default=DEFAULT_SUBTITLE_FORMAT)
parser.add_argument('-S', '--src-language', help="Language spoken in source file",
default=DEFAULT_SRC_LANGUAGE)
parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles",
default=DEFAULT_DST_LANGUAGE)
parser.add_argument('-K', '--api-key',
help="The Google Translate API key to be used. \
(Required for subtitle translation)")
parser.add_argument('--list-formats', help="List all available subtitle formats",
action='store_true')
parser.add_argument('--list-languages', help="List all available source/destination languages",
action='store_true')
args = parser.parse_args()
if args.list_formats:
print("List of formats:")
for subtitle_format in FORMATTERS:
print("{format}".format(format=subtitle_format))
return 0
if args.list_languages:
print("List of all languages:")
for code, language in sorted(LANGUAGE_CODES.items()):
print("{code}\t{language}".format(code=code, language=language))
return 0
if not validate(args):
return 1
try:
subtitle_file_path = generate_subtitles(
source_path=args.source_path,
concurrency=args.concurrency,
src_language=args.src_language,
dst_language=args.dst_language,
api_key=args.api_key,
subtitle_file_format=args.format,
output=args.output,
)
print("Subtitles file created at {}".format(subtitle_file_path))
except KeyboardInterrupt:
return 1
return 0 | 0.003264 |
def network_from_edgelist(self, edgelist):
"""
Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight].
"""
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
self.network = pd.DataFrame(edgelist, columns=colnames)
self._update_network() | 0.004 |
def config_files():
'''
Get list of currently used config files.
'''
sensu_loaded_tempfile = os.environ.get('SENSU_LOADED_TEMPFILE')
sensu_config_files = os.environ.get('SENSU_CONFIG_FILES')
sensu_v1_config = '/etc/sensu/config.json'
sensu_v1_confd = '/etc/sensu/conf.d'
if sensu_loaded_tempfile and os.path.isfile(sensu_loaded_tempfile):
with open(sensu_loaded_tempfile, 'r') as tempfile:
contents = tempfile.read()
return contents.split(':')
elif sensu_config_files:
return sensu_config_files.split(':')
else:
files = []
filenames = []
if os.path.isfile(sensu_v1_config):
files = [sensu_v1_config]
if os.path.isdir(sensu_v1_confd):
filenames = [f for f in os.listdir(sensu_v1_confd)
if os.path.splitext(f)[1] == '.json']
for filename in filenames:
files.append('{}/{}'.format(sensu_v1_confd, filename))
return files | 0.000986 |
def get_events_by_tripI_and_dsut(self, trip_I, day_start_ut,
start_ut=None, end_ut=None):
"""
Get trip data as a list of events (i.e. dicts).
Parameters
----------
trip_I : int
shorthand index of the trip.
day_start_ut : int
the start time of the day in unix time (seconds)
start_ut : int, optional
consider only events that start after this time
If not specified, this filtering is not applied.
end_ut : int, optional
Consider only events that end before this time
If not specified, this filtering is not applied.
Returns
-------
events: list of dicts
each element contains the following data:
from_stop: int (stop_I)
to_stop: int (stop_I)
dep_time_ut: int (in unix time)
arr_time_ut: int (in unix time)
"""
# for checking input:
assert day_start_ut <= start_ut
assert day_start_ut <= end_ut
assert start_ut <= end_ut
events = []
# check that trip takes place on that day:
if not self.tripI_takes_place_on_dsut(trip_I, day_start_ut):
return events
query = """SELECT stop_I, arr_time_ds+?, dep_time_ds+?
FROM stop_times JOIN stops USING(stop_I)
WHERE
(trip_I = ?)
"""
params = [day_start_ut, day_start_ut,
trip_I]
if start_ut:
query += "AND (dep_time_ds > ?-?)"
params += [start_ut, day_start_ut]
if end_ut:
query += "AND (arr_time_ds < ?-?)"
params += [end_ut, day_start_ut]
query += "ORDER BY arr_time_ds"
cur = self.conn.cursor()
rows = cur.execute(query, params)
stop_data = list(rows)
for i in range(len(stop_data) - 1):
event = {
"from_stop": stop_data[i][0],
"to_stop": stop_data[i + 1][0],
"dep_time_ut": stop_data[i][2],
"arr_time_ut": stop_data[i + 1][1]
}
events.append(event)
return events | 0.001316 |
def setDirection(self, outputLocation, inputLocation):
"""
Sets the output-to-input direction by setting both the locations \
at the same time.
:param outputLocation | <XConnectionLocation>
:param inputLocation | <XConnectionLocation>
"""
self.setOutputLocation(outputLocation)
self.setInputLocation(inputLocation) | 0.007353 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.