text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def humanize_speed(c_per_sec):
"""convert a speed in counts per second to counts per [s, min, h, d], choosing the smallest value greater zero.
"""
scales = [60, 60, 24]
units = ['c/s', 'c/min', 'c/h', 'c/d']
speed = c_per_sec
i = 0
if speed > 0:
while (speed < 1) and (i < len(scales)):
speed *= scales[i]
i += 1
return "{:.1f}{}".format(speed, units[i]) | 0.007026 |
def get_instance(self, payload):
"""
Build an instance of MonthlyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
"""
return MonthlyInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | 0.011628 |
def _get_argv(index, default=None):
''' get the argv input argument defined by index. Return the default
attribute if that argument does not exist
'''
return _sys.argv[index] if len(_sys.argv) > index else default | 0.004292 |
def datetime(self, to_timezone=None, naive=False):
"""Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False)
"""
if to_timezone:
dt = self.datetime().astimezone(pytz.timezone(to_timezone))
else:
dt = Datetime.utcfromtimestamp(self._epoch)
dt.replace(tzinfo=self._tz)
# Strip the timezone info if requested to do so.
if naive:
return dt.replace(tzinfo=None)
else:
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self._tz)
return dt | 0.002478 |
def stop_capture(self, cap_file_name=None, cap_file_format=IxeCapFileFormat.mem, *ports):
""" Stop capture on ports.
:param cap_file_name: prefix for the capture file name.
Capture files for each port are saved as individual pcap file named 'prefix' + 'URI'.pcap.
:param cap_file_format: exported file format
:param ports: list of ports to stop traffic on, if empty stop all ports.
:return: dictionary (port, nPackets)
"""
port_list = self.set_ports_list(*ports)
self.api.call_rc('ixStopCapture {}'.format(port_list))
nPackets = {}
for port in (ports if ports else self.ports.values()):
nPackets[port] = port.capture.nPackets
if nPackets[port]:
if cap_file_format is not IxeCapFileFormat.mem:
port.cap_file_name = cap_file_name + '-' + port.uri.replace(' ', '_') + '.' + cap_file_format.name
port.captureBuffer.export(port.cap_file_name)
return nPackets | 0.005792 |
def _get_qvm_with_topology(name: str, topology: nx.Graph,
noisy: bool = False,
requires_executable: bool = True,
connection: ForestConnection = None,
qvm_type: str = 'qvm') -> QuantumComputer:
"""Construct a QVM with the provided topology.
:param name: A name for your quantum computer. This field does not affect behavior of the
constructed QuantumComputer.
:param topology: A graph representing the desired qubit connectivity.
:param noisy: Whether to include a generic noise model. If you want more control over
the noise model, please construct your own :py:class:`NoiseModel` and use
:py:func:`_get_qvm_qc` instead of this function.
:param requires_executable: Whether this QVM will refuse to run a :py:class:`Program` and
only accept the result of :py:func:`compiler.native_quil_to_executable`. Setting this
to True better emulates the behavior of a QPU.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.
:return: A pre-configured QuantumComputer
"""
# Note to developers: consider making this function public and advertising it.
device = NxDevice(topology=topology)
if noisy:
noise_model = decoherence_noise_with_asymmetric_ro(gates=gates_in_isa(device.get_isa()))
else:
noise_model = None
return _get_qvm_qc(name=name, qvm_type=qvm_type, connection=connection, device=device,
noise_model=noise_model, requires_executable=requires_executable) | 0.006297 |
def read_frames(self, n, channels=None):
"""Read ``n`` frames from the track, starting
with the current frame
:param integer n: Number of frames to read
:param integer channels: Number of channels to return (default
is number of channels in track)
:returns: Next ``n`` frames from the track, starting with ``current_frame``
:rtype: numpy array
"""
if channels is None:
channels = self.channels
if channels == 1:
out = np.zeros(n)
elif channels == 2:
out = np.zeros((n, 2))
else:
print "Input needs to be 1 or 2 channels"
return
if n > self.remaining_frames():
print "Trying to retrieve too many frames!"
print "Asked for", n
n = self.remaining_frames()
print "Returning", n
if self.channels == 1 and channels == 1:
out = self.sound.read_frames(n)
elif self.channels == 1 and channels == 2:
frames = self.sound.read_frames(n)
out = np.vstack((frames.copy(), frames.copy())).T
elif self.channels == 2 and channels == 1:
frames = self.sound.read_frames(n)
out = np.mean(frames, axis=1)
elif self.channels == 2 and channels == 2:
out[:n, :] = self.sound.read_frames(n)
self.current_frame += n
return out | 0.002083 |
def upgrade():
"""Upgrade database."""
op.drop_constraint(u'fk_access_actionsusers_user_id_accounts_user',
'access_actionsusers', type_='foreignkey')
op.drop_index(op.f('ix_access_actionsusers_user_id'),
table_name='access_actionsusers')
op.alter_column('access_actionsusers', 'user_id', nullable=False,
existing_type=sa.Integer())
op.create_index(op.f('ix_access_actionsusers_user_id'),
'access_actionsusers', ['user_id'], unique=False)
op.create_foreign_key(op.f('fk_access_actionsusers_user_id_accounts_user'),
'access_actionsusers', 'accounts_user', ['user_id'],
['id'], ondelete='CASCADE') | 0.001328 |
def step(self, substeps=2):
'''Step the world forward by one frame.
Parameters
----------
substeps : int, optional
Split the step into this many sub-steps. This helps to prevent the
time delta for an update from being too large.
'''
self.frame_no += 1
dt = self.dt / substeps
for _ in range(substeps):
self.ode_contactgroup.empty()
self.ode_space.collide(None, self.on_collision)
self.ode_world.step(dt) | 0.003788 |
def with_aad_device_authentication(cls, connection_string, authority_id="common"):
"""Creates a KustoConnection string builder that will authenticate with AAD application and
password.
:param str connection_string: Kusto connection string should by of the format: https://<clusterName>.kusto.windows.net
:param str authority_id: optional param. defaults to "common"
"""
kcsb = cls(connection_string)
kcsb[kcsb.ValidKeywords.aad_federated_security] = True
kcsb[kcsb.ValidKeywords.authority_id] = authority_id
return kcsb | 0.008306 |
def savecands(d, cands, domock=False):
""" Save all candidates in pkl file for later aggregation and filtering.
domock is option to save simulated cands file
"""
with open(getcandsfile(d, domock=domock), 'w') as pkl:
pickle.dump(d, pkl)
pickle.dump(cands, pkl) | 0.003413 |
def lcumsum (inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist | 0.010791 |
def singlerun(job, task_id=0, job_id=0):
"""This task is for an example."""
import ecell4_base
import ecell4
import ecell4.util.simulation
import ecell4.util.decorator
print('ecell4_base.__version__ = {:s}'.format(ecell4_base.__version__))
print('ecell4.__version__ = {:s}'.format(ecell4.__version__))
print('job={}, task_id={}, job_id={}'.format(str(job), task_id, job_id))
with ecell4.util.decorator.reaction_rules():
A + B == C | (0.01, 0.3)
res = ecell4.util.simulation.run_simulation(
1.0,
y0={'A': job[0], 'B': job[1], 'C': job[2]},
rndseed=job_id,
solver='gillespie',
return_type='array')
print('A simulation was successfully done.')
return res | 0.001332 |
def DragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate mouse left button drag from point x1, y1 drop to point x2, y2.
x1: int.
y1: int.
x2: int.
y2: int.
moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.
waitTime: float.
"""
PressMouse(x1, y1, 0.05)
MoveTo(x2, y2, moveSpeed, 0.05)
ReleaseMouse(waitTime) | 0.004474 |
def _get_spot_history(ctx, instance_type):
"""
Returns list of 1,000 most recent spot market data points represented as SpotPriceHistory
objects. Note: The most recent object/data point will be first in the list.
:rtype: list[SpotPriceHistory]
"""
one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
spot_data = ctx.ec2.get_spot_price_history(start_time=one_week_ago.isoformat(),
instance_type=instance_type,
product_description="Linux/UNIX")
spot_data.sort(key=attrgetter("timestamp"), reverse=True)
return spot_data | 0.006015 |
def _ParseApplicationPasswordRecord(self, parser_mediator, record):
"""Extracts the information from an application password record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record (dict[str, object]): database record.
Raises:
ParseError: if Internet password record cannot be parsed.
"""
key = record.get('_key_', None)
if not key or not key.startswith(b'ssgp'):
raise errors.ParseError((
'Unsupported application password record key value does not start '
'with: "ssgp".'))
event_data = KeychainApplicationRecordEventData()
event_data.account_name = self._ParseBinaryDataAsString(
parser_mediator, record['acct'])
event_data.comments = self._ParseBinaryDataAsString(
parser_mediator, record['crtr'])
event_data.entry_name = self._ParseBinaryDataAsString(
parser_mediator, record['PrintName'])
ssgp_hash = codecs.encode(key[4:], 'hex')
event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8')
event_data.text_description = self._ParseBinaryDataAsString(
parser_mediator, record['desc'])
date_time = self._ParseDateTimeValue(parser_mediator, record['cdat'])
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
date_time = self._ParseDateTimeValue(parser_mediator, record['mdat'])
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.003425 |
def ifposition(parser, token):
"""
Syntax::
{% ifposition POSITION_NAME ... for CATEGORY [nofallback] %}
{% else %}
{% endifposition %}
"""
bits = list(token.split_contents())
end_tag = 'end' + bits[0]
nofallback = False
if bits[-1] == 'nofallback':
nofallback = True
bits.pop()
if len(bits) >= 4 and bits[-2] == 'for':
category = template.Variable(bits.pop())
pos_names = bits[1:-1]
else:
raise TemplateSyntaxError('Invalid syntax: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %}')
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfPositionNode(category, pos_names, nofallback, nodelist_true, nodelist_false) | 0.003158 |
def print_stats(self):
"""
Print a series of relevant stats about a full execution. This function
is meant to be called at the end of the program.
"""
stats = self.calculate()
total_time = '%d:%02d:%02d' % (stats['total_time'] / 3600,
(stats['total_time'] / 3600) / 60,
(stats['total_time'] % 3600) % 60)
output = """\
Total runtime: {total_time}
Lyrics found: {found}
Lyrics not found:{notfound}
Most useful source:\
{best} ({best_count} lyrics found) ({best_rate:.2f}% success rate)
Least useful source:\
{worst} ({worst_count} lyrics found) ({worst_rate:.2f}% success rate)
Fastest website to scrape: {fastest} (Avg: {fastest_time:.2f}s per search)
Slowest website to scrape: {slowest} (Avg: {slowest_time:.2f}s per search)
Average time per website: {avg_time:.2f}s
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxx PER WEBSITE STATS: xxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
"""
output = output.format(total_time=total_time,
found=stats['found'],
notfound=stats['notfound'],
best=stats['best'][0].capitalize(),
best_count=stats['best'][1],
best_rate=stats['best'][2],
worst=stats['worst'][0].capitalize(),
worst_count=stats['worst'][1],
worst_rate=stats['worst'][2],
fastest=stats['fastest'][0].capitalize(),
fastest_time=stats['fastest'][1],
slowest=stats['slowest'][0].capitalize(),
slowest_time=stats['slowest'][1],
avg_time=self.avg_time())
for source in sources:
stat = str(self.source_stats[source.__name__])
output += f'\n{source.__name__.upper()}\n{stat}\n'
print(output) | 0.000956 |
def tileBounds(self, zoom, tileCol, tileRow):
"Returns the bounds of a tile in LV03 (EPSG:21781)"
assert zoom in range(0, len(self.RESOLUTIONS))
# 0,0 at top left: y axis down and x axis right
tileSize = self.tileSize(zoom)
minX = self.MINX + tileCol * tileSize
maxX = self.MINX + (tileCol + 1) * tileSize
if self.originCorner == 'bottom-left':
minY = self.MINY + tileRow * tileSize
maxY = self.MINY + (tileRow + 1) * tileSize
elif self.originCorner == 'top-left':
minY = self.MAXY - (tileRow + 1) * tileSize
maxY = self.MAXY - tileRow * tileSize
return [minX, minY, maxX, maxY] | 0.002861 |
def runGetResults(cmd, stdout=True, stderr=True, encoding=sys.getdefaultencoding()):
'''
runGetResults - Simple method to run a command and return the results of the execution as a dict.
@param cmd <str/list> - String of command and arguments, or list of command and arguments
If cmd is a string, the command will be executed as if ran exactly as written in a shell. This mode supports shell-isms like '&&' and '|'
If cmd is a list, the first element will be the executable, and further elements are arguments that will be passed to that executable.
@param stdout <True/False> - Default True, Whether to gather and include program's stdout data in results.
If False, that data the program prints to stdout will just be output to the current tty and not recorded.
If True, it will NOT be output to the tty, and will be recorded under the key "stdout" in the return dict.
@param stderr <True/False or "stdout"/subprocess.STDOUT> - Default True, Whether to gather and include program's stderr data in results, or to combine with "stdout" data.
If False, the data the program prints to stderr will just be output to the current tty and not recorded
If True, it will NOT be output to the tty, and will be recorded under the key "stderr" in the return dict.
If "stdout" or subprocess.STDOUT - stderr data will be blended with stdout data. This requires that stdout=True.
@param encoding <None/str> - Default sys.getdefaultencoding(), the program's output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii").
If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type)
If unsure, leave this as it's default value, or provide "utf-8"
@return <dict> - Dict of results. Has following keys:
'returnCode' - <int> - Always present, included the integer return-code from the command.
'stdout' <unciode/str/bytes (depending on #encoding)> - Present if stdout=True, contains data output by program to stdout, or stdout+stderr if stderr param is "stdout"/subprocess.STDOUT
'stderr' <unicode/str/bytes (depending on #encoding)> - Present if stderr=True, contains data output by program to stderr.
@raises - SimpleCommandFailure if it cannot launch the given command, for reasons such as: cannot find the executable, or no permission to execute, etc
'''
if stderr in ('stdout', subprocess.STDOUT):
stderr = subprocess.STDOUT
elif stderr == True or stderr == subprocess.PIPE:
stderr = subprocess.PIPE
else:
stderr = None
if stdout == True or stdout == subprocess.STDOUT:
stdout = subprocess.PIPE
else:
stdout = None
if stderr == subprocess.PIPE:
raise ValueError('Cannot redirect stderr to stdout if stdout is not captured.')
if issubclass(cmd.__class__, (list, tuple)):
shell = False
else:
shell = True
try:
pipe = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
except Exception as e:
try:
if shell is True:
cmdStr = ' '.join(cmd)
else:
cmdStr = cmd
except:
cmdStr = repr(cmd)
raise SimpleCommandFailure('Failed to execute "%s": %s' %(cmdStr, str(e)), returnCode=255)
streams = []
fileNoToKey = {}
ret = {}
if stdout == subprocess.PIPE:
streams.append(pipe.stdout)
fileNoToKey[pipe.stdout.fileno()] = 'stdout'
ret['stdout'] = []
if stderr == subprocess.PIPE:
streams.append(pipe.stderr)
fileNoToKey[pipe.stderr.fileno()] = 'stderr'
ret['stderr'] = []
returnCode = None
time.sleep(.02)
while returnCode is None or streams:
returnCode = pipe.poll()
while True:
(readyToRead, junk1, junk2) = select.select(streams, [], [], .005)
if not readyToRead:
# Don't strangle CPU
time.sleep(.01)
break
for readyStream in readyToRead:
retKey = fileNoToKey[readyStream.fileno()]
curRead = readyStream.read()
if curRead in (b'', ''):
streams.remove(readyStream)
continue
ret[retKey].append(curRead)
for key in list(ret.keys()):
ret[key] = b''.join(ret[key])
if encoding:
ret[key] = ret[key].decode(encoding)
ret['returnCode'] = returnCode
return ret | 0.006583 |
def coverage(self):
"""
If there is a .coverage file available, this will attempt to form a DataFrame with that information in it, which
will contain the columns:
* filename
* lines_covered
* total_lines
* coverage
If it can't be found or parsed, an empty DataFrame of that form will be returned.
:return: DataFrame
"""
if not self.has_coverage():
return DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage'])
with open(self.git_dir + os.sep + '.coverage', 'r') as f:
blob = f.read()
blob = blob.split('!')[2]
cov = json.loads(blob)
ds = []
for filename in cov['lines'].keys():
idx = 0
try:
with open(filename, 'r') as f:
for idx, _ in enumerate(f):
pass
except FileNotFoundError as e:
if self.verbose:
warnings.warn('Could not find file %s for coverage' % (filename, ))
num_lines = idx + 1
try:
short_filename = filename.split(self.git_dir + os.sep)[1]
ds.append([short_filename, len(cov['lines'][filename]), num_lines])
except IndexError as e:
if self.verbose:
warnings.warn('Could not find file %s for coverage' % (filename, ))
df = DataFrame(ds, columns=['filename', 'lines_covered', 'total_lines'])
df['coverage'] = df['lines_covered'] / df['total_lines']
return df | 0.005538 |
def to_dict(self):
"""Prepare a minimal dictionary with keys mapping to attributes for
the current instance.
"""
o_copy = copy.copy(self)
# Remove some stuff that is not likely related to AD attributes
for attribute in dir(self):
if attribute == 'logger' or attribute == 'adq':
try:
delattr(o_copy, attribute)
except AttributeError:
pass
return o_copy.__dict__ | 0.003984 |
def shlex_process_stdin(process_command, helptext):
"""
Use shlex to process stdin line-by-line.
Also prints help text.
Requires that @process_command be a Click command object, used for
processing single lines of input. helptext is prepended to the standard
message printed to interactive sessions.
"""
# if input is interactive, print help to stderr
if sys.stdin.isatty():
safeprint(
(
"{}\n".format(helptext) + "Lines are split with shlex in POSIX mode: "
"https://docs.python.org/library/shlex.html#parsing-rules\n"
"Terminate input with Ctrl+D or <EOF>\n"
),
write_to_stderr=True,
)
# use readlines() rather than implicit file read line looping to force
# python to properly capture EOF (otherwise, EOF acts as a flush and
# things get weird)
for line in sys.stdin.readlines():
# get the argument vector:
# do a shlex split to handle quoted paths with spaces in them
# also lets us have comments with #
argv = shlex.split(line, comments=True)
if argv:
try:
process_command.main(args=argv)
except SystemExit as e:
if e.code != 0:
raise | 0.001528 |
def close(self):
"""
Destructor for this audio interface. Waits the threads to finish their
streams, if desired.
"""
with self.halting: # Avoid simultaneous "close" threads
if not self.finished: # Ignore all "close" calls, but the first,
self.finished = True # and any call to play would raise ThreadError
# Closes all playing AudioThread instances
while True:
with self.lock: # Ensure there's no other thread messing around
try:
thread = self._threads[0] # Needless to say: pop = deadlock
except IndexError: # Empty list
break # No more threads
if not self.wait:
thread.stop()
thread.join()
# Closes all recording RecStream instances
while self._recordings:
recst = self._recordings[-1]
recst.stop()
recst.take(inf) # Ensure it'll be closed
# Finishes
assert not self._pa._streams # No stream should survive
self._pa.terminate() | 0.017159 |
def get_parameter_dict(self, include_frozen=False):
"""
Get an ordered dictionary of the parameters
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
"""
return OrderedDict(zip(
self.get_parameter_names(include_frozen=include_frozen),
self.get_parameter_vector(include_frozen=include_frozen),
)) | 0.004246 |
def create_grade_system(self, grade_system_form):
"""Creates a new ``GradeSystem``.
arg: grade_system_form (osid.grading.GradeSystemForm): the
form for this ``GradeSystem``
return: (osid.grading.GradeSystem) - the new ``GradeSystem``
raise: IllegalState - ``grade_system_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``grade_system_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``grade_system_form`` did not originate
from ``get_grade_system_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.create_resource_template
collection = JSONClientValidated('grading',
collection='GradeSystem',
runtime=self._runtime)
if not isinstance(grade_system_form, ABCGradeSystemForm):
raise errors.InvalidArgument('argument type is not an GradeSystemForm')
if grade_system_form.is_for_update():
raise errors.InvalidArgument('the GradeSystemForm is for update only, not create')
try:
if self._forms[grade_system_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('grade_system_form already used in a create transaction')
except KeyError:
raise errors.Unsupported('grade_system_form did not originate from this session')
if not grade_system_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
insert_result = collection.insert_one(grade_system_form._my_map)
self._forms[grade_system_form.get_id().get_identifier()] = CREATED
result = objects.GradeSystem(
osid_object_map=collection.find_one({'_id': insert_result.inserted_id}),
runtime=self._runtime,
proxy=self._proxy)
return result | 0.003974 |
def freeze(self) -> dict:
"""
Returns a dictionary of all settings set for this object, including
any values of its parents or hardcoded defaults.
"""
settings = {}
for key, v in self._h.defaults.items():
settings[key] = self._unserialize(v.value, v.type)
if self._parent:
settings.update(getattr(self._parent, self._h.attribute_name).freeze())
for key in self._cache():
settings[key] = self.get(key)
return settings | 0.005725 |
def create_examples_all():
"""create arduino/examples/all directory.
:rtype: None
"""
remove_examples_all()
examples_all_dir().mkdir()
for lib in libraries():
maindir = examples_all_dir() / lib.upper()[0:1] / lib
# libraries_dir() /
maindir.makedirs_p()
for ex in lib_examples(lib):
d = lib_example_dir(lib, ex)
if hasattr(os, 'symlink'):
d.symlink(maindir / ex)
else:
d.copytree(maindir / ex) | 0.001923 |
def _extract_header(time_series):
"""Return a copy of time_series with the points removed."""
return TimeSeries(
metric=time_series.metric,
resource=time_series.resource,
metric_kind=time_series.metric_kind,
value_type=time_series.value_type,
) | 0.003472 |
def depth_december_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_december_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_december_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_december_average_ground_temperature`'.format(value))
self._depth_december_average_ground_temperature = value | 0.004657 |
def plot_covariance_ellipse(
mean, cov=None, variance=1.0, std=None,
ellipse=None, title=None, axis_equal=True, show_semiaxis=False,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
"""
Deprecated function to plot a covariance ellipse. Use plot_covariance
instead.
See Also
--------
plot_covariance
"""
warnings.warn("deprecated, use plot_covariance instead", DeprecationWarning)
plot_covariance(mean=mean, cov=cov, variance=variance, std=std,
ellipse=ellipse, title=title, axis_equal=axis_equal,
show_semiaxis=show_semiaxis, facecolor=facecolor,
edgecolor=edgecolor, fc=fc, ec=ec, alpha=alpha,
xlim=xlim, ylim=ylim, ls=ls) | 0.00237 |
def search_payload(self, fields=None, query=None):
"""Reset ``errata_id`` from DB ID to ``errata_id``."""
payload = super(ContentViewFilterRule, self).search_payload(
fields, query)
if 'errata_id' in payload:
if not hasattr(self.errata, 'errata_id'):
self.errata = self.errata.read()
payload['errata_id'] = self.errata.errata_id
return payload | 0.004684 |
def avail_locations():
'''
Available locations
'''
response = list_common_lookups(kwargs={'lookup': 'ip.datacenter'})
ret = {}
for item in response['list']:
name = item['name']
ret[name] = item
return ret | 0.004 |
def root(reference_labels, estimated_labels):
"""Compare chords according to roots.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.root(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut.
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots = encode_many(estimated_labels, False)[0]
comparison_scores = (ref_roots == est_roots).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | 0.000608 |
def remover(self, id_brand):
"""Remove Brand from by the identifier.
:param id_brand: Identifier of the Brand. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: The identifier of Brand is null and invalid.
:raise MarcaNaoExisteError: Brand not registered.
:raise MarcaError: The brand is associated with a model.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_brand):
raise InvalidParameterError(
u'The identifier of Brand is invalid or was not informed.')
url = 'brand/' + str(id_brand) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml) | 0.004678 |
def calculate_P(self, T, P, method):
r'''Method to calculate pressure-dependent gas thermal conductivity
at temperature `T` and pressure `P` with a given method.
This method has no exception handling; see `TP_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate gas thermal conductivity, [K]
P : float
Pressure at which to calculate gas thermal conductivity, [K]
method : str
Name of the method to use
Returns
-------
kg : float
Thermal conductivity of the gas at T and P, [W/m/K]
'''
if method == ELI_HANLEY_DENSE:
Vmg = self.Vmg(T, P) if hasattr(self.Vmg, '__call__') else self.Vmg
Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm
kg = eli_hanley_dense(T, self.MW, self.Tc, self.Vc, self.Zc, self.omega, Cvgm, Vmg)
elif method == CHUNG_DENSE:
Vmg = self.Vmg(T, P) if hasattr(self.Vmg, '__call__') else self.Vmg
Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm
mug = self.mug(T, P) if hasattr(self.mug, '__call__') else self.mug
kg = chung_dense(T, self.MW, self.Tc, self.Vc, self.omega, Cvgm, Vmg, mug, self.dipole)
elif method == STIEL_THODOS_DENSE:
kg = self.T_dependent_property(T)
Vmg = self.Vmg(T, P) if hasattr(self.Vmg, '__call__') else self.Vmg
kg = stiel_thodos_dense(T, self.MW, self.Tc, self.Pc, self.Vc, self.Zc, Vmg, kg)
elif method == COOLPROP:
kg = PropsSI('L', 'T', T, 'P', P, self.CASRN)
elif method in self.tabular_data:
kg = self.interpolate_P(T, P, method)
return kg | 0.00385 |
def bits_from_str(cls, s):
""" Converts the output of __str__ into an integer. """
try:
if len(s) <= len(cls.__name__) or not s.startswith(cls.__name__):
return cls.bits_from_simple_str(s)
c = s[len(cls.__name__)]
if c == '(':
if not s.endswith(')'):
raise ValueError
return cls.bits_from_simple_str(s[len(cls.__name__)+1:-1])
elif c == '.':
member_name = s[len(cls.__name__)+1:]
return int(cls.__all_members__[member_name])
else:
raise ValueError
except ValueError as ex:
if ex.args:
raise
raise ValueError("%s.%s: invalid input: %r" % (cls.__name__, cls.bits_from_str.__name__, s))
except KeyError as ex:
raise ValueError("%s.%s: Invalid flag name '%s' in input: %r" % (cls.__name__, cls.bits_from_str.__name__,
ex.args[0], s)) | 0.00469 |
def ssn(self):
"""
Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string
The first 6 digits represent the birthdate with (in order) year, month and day.
The second group of 3 digits is represents a sequence number (order of birth).
It is even for women and odd for men.
For men the range starts at 1 and ends 997, for women 2 until 998.
The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).
Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.
For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2
(add 2000000000) before the division by 97.
"""
# see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)
def _checksum(digits):
res = 97 - (digits % 97)
return res
# Generate a date (random)
mydate = self.generator.date()
# Convert it to an int
elms = mydate.split("-")
# Adjust for year 2000 if necessary
if elms[0][0] == '2':
above = True
else:
above = False
# Only keep the last 2 digits of the year
elms[0] = elms[0][2:4]
# Simulate the gender/sequence - should be 3 digits
seq = self.generator.random_int(1, 998)
# Right justify sequence and append to list
seq_str = "{:0>3}".format(seq)
elms.append(seq_str)
# Now convert list to an integer so the checksum can be calculated
date_as_int = int("".join(elms))
if above:
date_as_int += 2000000000
# Generate checksum
s = _checksum(date_as_int)
s_rjust = "{:0>2}".format(s)
# return result as a string
elms.append(s_rjust)
return "".join(elms) | 0.004294 |
def from_raw(self, rval: RawScalar, jptr: JSONPointer = "") -> ScalarValue:
"""Override the superclass method."""
res = self.type.from_raw(rval)
if res is None:
raise RawTypeError(jptr, self.type.yang_type() + " value")
return res | 0.007299 |
def update_collection_by_id(cls, collection_id, collection, **kwargs):
"""Update Collection
Update attributes of Collection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_collection_by_id(collection_id, collection, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to update. (required)
:param Collection collection: Attributes of collection to update. (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs)
else:
(data) = cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs)
return data | 0.005753 |
def getpaths(self):
'''
If we have children, use a list comprehension to instantiate new paths
objects to traverse.
'''
self.children = self.getchildren()
if self.children is None:
return
if self.paths is None:
self.paths = [Paths(self.screen,
os.path.join(self.name, child),
self.hidden,
self.picked,
self.expanded,
self.sized)
for child in self.children]
return self.paths | 0.003086 |
def update_pricing(kwargs=None, call=None):
'''
Download most recent pricing information from GCE and save locally
CLI Examples:
.. code-block:: bash
salt-cloud -f update_pricing my-gce-config
.. versionadded:: 2015.8.0
'''
url = 'https://cloudpricingcalculator.appspot.com/static/data/pricelist.json'
price_json = salt.utils.http.query(url, decode=True, decode_type='json')
outfile = os.path.join(
__opts__['cachedir'], 'gce-pricing.p'
)
with salt.utils.files.fopen(outfile, 'w') as fho:
salt.utils.msgpack.dump(price_json['dict'], fho)
return True | 0.00319 |
def delete(self):
r"""Delete this node from the parse tree.
Where applicable, this will remove all descendants of this node from
the parse tree.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textit{\color{blue}{Silly}}\textit{keep me!}''')
>>> soup.textit.color.delete()
>>> soup
\textit{}\textit{keep me!}
>>> soup.textit.delete()
>>> soup
\textit{keep me!}
"""
# TODO: needs better abstraction for supports contents
parent = self.parent
if parent.expr._supports_contents():
parent.remove(self)
return
# TODO: needs abstraction for removing from arg
for arg in parent.args:
if self.expr in arg.contents:
arg.contents.remove(self.expr) | 0.003584 |
async def append(self, reply: Reply) \
-> None:
"""
Add the given Reply to this transaction store's list of responses.
Also add to processedRequests if not added previously.
"""
result = reply.result
identifier = result.get(f.IDENTIFIER.nm)
txnId = result.get(TXN_ID)
logger.debug("Reply being sent {}".format(reply))
if self._isNewTxn(identifier, reply, txnId):
self.addToProcessedTxns(identifier, txnId, reply)
if identifier not in self.responses:
self.responses[identifier] = asyncio.Queue()
await self.responses[identifier].put(reply) | 0.003021 |
def _flush(self):
"""
Flush all caches
Might be used after classes, methods or fields are added.
"""
self.classes_names = None
self.__cache_methods = None
self.__cached_methods_idx = None
self.__cache_fields = None
# cache methods and fields as well, otherwise the decompiler is quite slow
self.__cache_all_methods = None
self.__cache_all_fields = None | 0.006803 |
def overlay_images(self, canvas, data, whence=0.0):
"""Overlay data from any canvas image objects.
Parameters
----------
canvas : `~ginga.canvas.types.layer.DrawingCanvas`
Canvas containing possible images to overlay.
data : ndarray
Output array on which to overlay image data.
whence
See :meth:`get_rgb_object`.
"""
#if not canvas.is_compound():
if not hasattr(canvas, 'objects'):
return
for obj in canvas.get_objects():
if hasattr(obj, 'draw_image'):
obj.draw_image(self, data, whence=whence)
elif obj.is_compound() and (obj != canvas):
self.overlay_images(obj, data, whence=whence) | 0.003876 |
def summary_err_table(df, qvalues=[0, 0.01, 0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5]):
""" Summary error table for some typical q-values """
qvalues = to_one_dim_array(qvalues)
# find best matching fows in df for given qvalues:
ix = find_nearest_matches(np.float32(df.qvalue.values), qvalues)
# extract sub table
df_sub = df.iloc[ix].copy()
# remove duplicate hits, mark them with None / NAN:
for i_sub, (i0, i1) in enumerate(zip(ix, ix[1:])):
if i1 == i0:
df_sub.iloc[i_sub + 1, :] = None
# attach q values column
df_sub.qvalue = qvalues
# remove old index from original df:
df_sub.reset_index(inplace=True, drop=True)
return df_sub[['qvalue','pvalue','svalue','pep','fdr','fnr','fpr','tp','tn','fp','fn','cutoff']] | 0.017834 |
def forward_word_end_extend_selection(self, e): #
u"""Move forward to the end of the next word. Words are composed of
letters and digits."""
self.l_buffer.forward_word_end_extend_selection(self.argument_reset)
self.finalize() | 0.015267 |
def import_lv_grid_districts(self, session, lv_stations):
"""Imports all lv grid districts within given load area
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
Returns
-------
lv_grid_districts: :pandas:`pandas.DataFrame<dataframe>`
Table of lv_grid_districts
"""
# get ding0s' standard CRS (SRID)
srid = str(int(cfg_ding0.get('geo', 'srid')))
# SET SRID 3035 to achieve correct area calculation of lv_grid_district
# srid = '3035'
gw2kw = 10 ** 6 # load in database is in GW -> scale to kW
# 1. filter grid districts of relevant load area
lv_grid_districs_sqla = session.query(
self.orm['orm_lv_grid_district'].mvlv_subst_id,
self.orm['orm_lv_grid_district'].la_id,
self.orm['orm_lv_grid_district'].zensus_sum.label('population'),
(self.orm[
'orm_lv_grid_district'].sector_peakload_residential * gw2kw).
label('peak_load_residential'),
(self.orm['orm_lv_grid_district'].sector_peakload_retail * gw2kw).
label('peak_load_retail'),
(self.orm[
'orm_lv_grid_district'].sector_peakload_industrial * gw2kw).
label('peak_load_industrial'),
(self.orm[
'orm_lv_grid_district'].sector_peakload_agricultural * gw2kw).
label('peak_load_agricultural'),
((self.orm['orm_lv_grid_district'].sector_peakload_residential
+ self.orm['orm_lv_grid_district'].sector_peakload_retail
+ self.orm['orm_lv_grid_district'].sector_peakload_industrial
+ self.orm['orm_lv_grid_district'].sector_peakload_agricultural)
* gw2kw).label('peak_load'),
func.ST_AsText(func.ST_Transform(
self.orm['orm_lv_grid_district'].geom, srid)).label('geom'),
self.orm['orm_lv_grid_district'].sector_count_residential,
self.orm['orm_lv_grid_district'].sector_count_retail,
self.orm['orm_lv_grid_district'].sector_count_industrial,
self.orm['orm_lv_grid_district'].sector_count_agricultural,
(self.orm[
'orm_lv_grid_district'].sector_consumption_residential * gw2kw). \
label('sector_consumption_residential'),
(self.orm['orm_lv_grid_district'].sector_consumption_retail * gw2kw). \
label('sector_consumption_retail'),
(self.orm[
'orm_lv_grid_district'].sector_consumption_industrial * gw2kw). \
label('sector_consumption_industrial'),
(self.orm[
'orm_lv_grid_district'].sector_consumption_agricultural * gw2kw). \
label('sector_consumption_agricultural'),
self.orm['orm_lv_grid_district'].mvlv_subst_id). \
filter(self.orm['orm_lv_grid_district'].mvlv_subst_id.in_(
lv_stations.index.tolist())). \
filter(self.orm['version_condition_lvgd'])
# read data from db
lv_grid_districts = pd.read_sql_query(lv_grid_districs_sqla.statement,
session.bind,
index_col='mvlv_subst_id')
lv_grid_districts[
['sector_count_residential',
'sector_count_retail',
'sector_count_industrial',
'sector_count_agricultural']] = lv_grid_districts[
['sector_count_residential',
'sector_count_retail',
'sector_count_industrial',
'sector_count_agricultural']].fillna(0)
return lv_grid_districts | 0.005024 |
def forward(self, x):
"""
Arguments:
x (list[Tensor]): feature maps for each feature level.
Returns:
results (tuple[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.
"""
last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
results = []
results.append(getattr(self, self.layer_blocks[-1])(last_inner))
for feature, inner_block, layer_block in zip(
x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
):
if not inner_block:
continue
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
inner_lateral = getattr(self, inner_block)(feature)
# TODO use size instead of scale to make it robust to different sizes
# inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
# mode='bilinear', align_corners=False)
last_inner = inner_lateral + inner_top_down
results.insert(0, getattr(self, layer_block)(last_inner))
if isinstance(self.top_blocks, LastLevelP6P7):
last_results = self.top_blocks(x[-1], results[-1])
results.extend(last_results)
elif isinstance(self.top_blocks, LastLevelMaxPool):
last_results = self.top_blocks(results[-1])
results.extend(last_results)
return tuple(results) | 0.004027 |
def blockwise_inner_join(data, left, foreign_key, right,
force_repeat=None,
foreign_key_name=None):
"""Perform a blockwise inner join.
Perform a blockwise inner join from names specified in ``left`` to
``right`` via ``foreign_key``: left->foreign_key->right.
Parameters
----------
data : array
A structured NumPy array.
left : array
Array of left side column names.
foreign_key : array or string
NumPy array or string ``foreign_key`` column name. This column can be
either an integer or an array of ints. If ``foreign_key`` is an array
of int column, left column will be treated according to left column
type:
* Scalar columns or columns in ``force_repeat`` will be repeated
* Array columns not in ``force_repeat`` will be assumed to the
same length as ``foreign_key`` and will be stretched by index
right : array
Array of right side column names. These are array columns that each
index ``foreign_key`` points to. These columns are assumed to have the
same length.
force_repeat : array, optional (default=None)
Array of left column names that will be forced to stretch even if it's
an array (useful when you want to emulate a multiple join).
foreign_key_name : str, optional (default=None)
The name of foreign key column in the output array.
Examples
--------
>>> import numpy as np
>>> from root_numpy import blockwise_inner_join
>>> test_data = np.array([
(1.0, np.array([11, 12, 13]), np.array([1, 0, 1]), 0, np.array([1, 2, 3])),
(2.0, np.array([21, 22, 23]), np.array([-1, 2, -1]), 1, np.array([31, 32, 33]))],
dtype=[('sl', np.float), ('al', 'O'), ('fk', 'O'), ('s_fk', np.int), ('ar', 'O')])
>>> blockwise_inner_join(test_data, ['sl', 'al'], test_data['fk'], ['ar'])
array([(1.0, 11, 2, 1), (1.0, 12, 1, 0), (1.0, 13, 2, 1), (2.0, 22, 33, 2)],
dtype=[('sl', '<f8'), ('al', '<i8'), ('ar', '<i8'), ('fk', '<i8')])
>>> blockwise_inner_join(test_data, ['sl', 'al'], test_data['fk'], ['ar'], force_repeat=['al'])
array([(1.0, [11, 12, 13], 2, 1), (1.0, [11, 12, 13], 1, 0),
(1.0, [11, 12, 13], 2, 1), (2.0, [21, 22, 23], 33, 2)],
dtype=[('sl', '<f8'), ('al', '|O8'), ('ar', '<i8'), ('fk', '<i8')])
"""
if isinstance(foreign_key, string_types):
foreign_key = data[foreign_key]
return _blockwise_inner_join(data, left, foreign_key, right,
force_repeat, foreign_key_name) | 0.001918 |
def invoke_step(self, context):
"""Invoke 'run_step' in the dynamically loaded step module.
Don't invoke this from outside the Step class. Use
pypyr.dsl.Step.run_step instead.
invoke_step just does the bare module step invocation, it does not
evaluate any of the decorator logic surrounding the step. So unless
you really know what you're doing, use run_step if you intend on
executing the step the same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
logger.debug(f"running step {self.module}")
self.run_step_function(context)
logger.debug(f"step {self.module} done") | 0.002541 |
def dimensions(self):
"""(row_count, col_count) pair describing size of range."""
_, _, width, height = self._extents
return height, width | 0.012346 |
def get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper,
lambda1=None, lambda2=None, quadparam1=None,
quadparam2=None):
"""
Function to convert between masses and spins and locations in the mu
parameter space. Mu = Cartesian metric, but not principal components.
Parameters
-----------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
mus : list of floats or numpy.arrays
Position of the system(s) in the mu coordinate system
"""
# Do this by masses -> lambdas
lambdas = get_chirp_params(mass1, mass2, spin1z, spin2z,
metricParams.f0, metricParams.pnOrder,
lambda1=lambda1, lambda2=lambda2,
quadparam1=quadparam1, quadparam2=quadparam2)
# and lambdas -> mus
mus = get_mu_params(lambdas, metricParams, fUpper)
return mus | 0.00063 |
async def unixconnect(path):
'''
Connect to a PF_UNIX server listening on the given path.
'''
reader, writer = await asyncio.open_unix_connection(path=path)
info = {'path': path, 'unix': True}
return await Link.anit(reader, writer, info=info) | 0.003759 |
def format_xpaths(xpath_map, *args, **kwargs):
""" :return: a copy of xpath_map, but with XPATHs formatted with ordered or keyword values """
formatted = {}.fromkeys(xpath_map)
for key, xpath in iteritems(xpath_map):
formatted[key] = xpath.format(*args, **kwargs)
return formatted | 0.006515 |
async def _dump_message_field(self, writer, msg, field, fvalue=None):
"""
Dumps a message field to the writer. Field is defined by the message field specification.
:param writer:
:param msg:
:param field:
:param fvalue:
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
fvalue = getattr(msg, fname, None) if fvalue is None else fvalue
await self.dump_field(writer, fvalue, ftype, params) | 0.006098 |
def init_argparser(self, argparser):
"""
Other runtimes (or users of ArgumentParser) can pass their
subparser into here to collect the arguments here for a
subcommand.
"""
super(SourcePackageToolchainRuntime, self).init_argparser(argparser)
self.init_argparser_source_registry(argparser)
self.init_argparser_package_names(argparser) | 0.005025 |
def register_actions(self, shortcut_manager):
"""Register callback methods for triggered actions
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager:
"""
shortcut_manager.add_callback_for_action("undo", self.undo)
shortcut_manager.add_callback_for_action("redo", self.redo) | 0.006061 |
def factory(cfg, login, pswd, request_type):
"""
Instantiate ExportRequest
:param cfg: request configuration, should consist of request description (url and optional parameters)
:param login:
:param pswd:
:param request_type: TYPE_SET_FIELD_VALUE || TYPE_CREATE_ENTITY || TYPE_DELETE_ENTITY || TYPE_CREATE_RELATION
:return: ExportRequest instance
"""
if request_type == ExportRequest.TYPE_SET_FIELD_VALUE:
return SetFieldValueRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_CREATE_ENTITY:
return CreateEntityRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_DELETE_ENTITY:
return DeleteEntityRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_CREATE_RELATION:
return CreateRelationRequest(cfg, login, pswd)
else:
raise NotImplementedError('Not supported request type - {}'.format(request_type)) | 0.004965 |
def varchar(self, field=None):
"""
Returns a chunk of text, of maximum length 'max_length'
"""
assert field is not None, "The field parameter must be passed to the 'varchar' method."
max_length = field.max_length
def source():
length = random.choice(range(1, max_length + 1))
return "".join(random.choice(general_chars) for i in xrange(length))
return self.get_allowed_value(source, field) | 0.008511 |
def get_authorizations_by_genus_type(self, authorization_genus_type):
"""Gets an ``AuthorizationList`` corresponding to the given authorization genus ``Type`` which does not include authorizations of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: authorization_genus_type (osid.type.Type): an
authorization genus type
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: NullArgument - ``authorization_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(authorization_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.AuthorizationList(result, runtime=self._runtime, proxy=self._proxy) | 0.002488 |
def _post_processing(kwargs, skip_translate, invalid):
'''
Additional container-specific post-translation processing
'''
# Don't allow conflicting options to be set
if kwargs.get('port_bindings') is not None \
and kwargs.get('publish_all_ports'):
kwargs.pop('port_bindings')
invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True'
if kwargs.get('hostname') is not None \
and kwargs.get('network_mode') == 'host':
kwargs.pop('hostname')
invalid['hostname'] = 'Cannot be used when network_mode=True'
# Make sure volumes and ports are defined to match the binds and port_bindings
if kwargs.get('binds') is not None \
and (skip_translate is True or
all(x not in skip_translate
for x in ('binds', 'volume', 'volumes'))):
# Make sure that all volumes defined in "binds" are included in the
# "volumes" param.
auto_volumes = []
if isinstance(kwargs['binds'], dict):
for val in six.itervalues(kwargs['binds']):
try:
if 'bind' in val:
auto_volumes.append(val['bind'])
except TypeError:
continue
else:
if isinstance(kwargs['binds'], list):
auto_volume_defs = kwargs['binds']
else:
try:
auto_volume_defs = helpers.split(kwargs['binds'])
except AttributeError:
auto_volume_defs = []
for val in auto_volume_defs:
try:
auto_volumes.append(helpers.split(val, ':')[1])
except IndexError:
continue
if auto_volumes:
actual_volumes = kwargs.setdefault('volumes', [])
actual_volumes.extend([x for x in auto_volumes
if x not in actual_volumes])
# Sort list to make unit tests more reliable
actual_volumes.sort()
if kwargs.get('port_bindings') is not None \
and all(x not in skip_translate
for x in ('port_bindings', 'expose', 'ports')):
# Make sure that all ports defined in "port_bindings" are included in
# the "ports" param.
ports_to_bind = list(kwargs['port_bindings'])
if ports_to_bind:
ports_to_open = set(kwargs.get('ports', []))
ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind])
kwargs['ports'] = list(ports_to_open)
if 'ports' in kwargs \
and all(x not in skip_translate for x in ('expose', 'ports')):
# TCP ports should only be passed as the port number. Normalize the
# input so a port definition of 80/tcp becomes just 80 instead of
# (80, 'tcp').
for index, _ in enumerate(kwargs['ports']):
try:
if kwargs['ports'][index][1] == 'tcp':
kwargs['ports'][index] = ports_to_open[index][0]
except TypeError:
continue | 0.000955 |
def dot(poly1, poly2):
"""
Dot product of polynomial vectors.
Args:
poly1 (Poly) : left part of product.
poly2 (Poly) : right part of product.
Returns:
(Poly) : product of poly1 and poly2.
Examples:
>>> poly = cp.prange(3, 1)
>>> print(poly)
[1, q0, q0^2]
>>> print(cp.dot(poly, numpy.arange(3)))
2q0^2+q0
>>> print(cp.dot(poly, poly))
q0^4+q0^2+1
"""
if not isinstance(poly1, Poly) and not isinstance(poly2, Poly):
return numpy.dot(poly1, poly2)
poly1 = Poly(poly1)
poly2 = Poly(poly2)
poly = poly1*poly2
if numpy.prod(poly1.shape) <= 1 or numpy.prod(poly2.shape) <= 1:
return poly
return chaospy.poly.sum(poly, 0) | 0.001309 |
def rbac_policy_create(request, **kwargs):
"""Create a RBAC Policy.
:param request: request context
:param target_tenant: target tenant of the policy
:param tenant_id: owner tenant of the policy(Not recommended)
:param object_type: network or qos_policy
:param object_id: object id of policy
:param action: access_as_shared or access_as_external
:return: RBACPolicy object
"""
body = {'rbac_policy': kwargs}
rbac_policy = neutronclient(request).create_rbac_policy(
body=body).get('rbac_policy')
return RBACPolicy(rbac_policy) | 0.001718 |
def run(self):
"""Run directive."""
try:
language = self.arguments[0]
except IndexError:
language = ''
code = '\n'.join(self.content)
literal = docutils.nodes.literal_block(code, code)
literal['classes'].append('code-block')
literal['language'] = language
return [literal] | 0.005556 |
def is_none(node):
"Determine if a node is the `None` keyword."
return isinstance(node, parso.python.tree.Keyword) and node.value == 'None' | 0.006803 |
def get_response(self, action, params, page=0, itemSet=None):
"""
Utility method to handle calls to ECS and parsing of responses.
"""
params['Service'] = "AWSECommerceService"
params['Operation'] = action
if page:
params['ItemPage'] = page
response = self.make_request(None, params, "/onca/xml")
body = response.read()
boto.log.debug(body)
if response.status != 200:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
if itemSet == None:
rs = ItemSet(self, action, params, page)
else:
rs = itemSet
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs | 0.003464 |
def files(self):
"""Extract files to download from GitHub payload."""
tag_name = self.release['tag_name']
repo_name = self.repository['full_name']
zipball_url = self.release['zipball_url']
filename = u'{name}-{tag}.zip'.format(name=repo_name, tag=tag_name)
response = self.gh.api.session.head(zipball_url)
assert response.status_code == 302, \
u'Could not retrieve archive from GitHub: {0}'.format(zipball_url)
yield filename, zipball_url | 0.003868 |
def compose(self):
"""
get CGR of reaction
reagents will be presented as unchanged molecules
:return: CGRContainer
"""
rr = self.__reagents + self.__reactants
if rr:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in rr):
raise TypeError('Queries not composable')
r = reduce(or_, rr)
else:
r = MoleculeContainer()
if self.__products:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in self.__products):
raise TypeError('Queries not composable')
p = reduce(or_, self.__products)
else:
p = MoleculeContainer()
return r ^ p | 0.00534 |
def _getf(self, id):
"""Open a cached file with the given id for reading."""
try:
filename = self.__filename(id)
self.__remove_if_expired(filename)
return self.__open(filename, "rb")
except Exception:
pass | 0.00722 |
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request) | 0.002972 |
def block_transfer(self, buffer, x, y):
"""
Copy a buffer to the screen double buffer at a specified location.
:param buffer: The double buffer to copy
:param x: The X origin for where to place it in the Screen
:param y: The Y origin for where to place it in the Screen
"""
self._buffer.block_transfer(buffer, x, y) | 0.005376 |
def to_txt(self, verbose=False):
"""Write the program information to text,
which can be printed in a terminal.
Parameters
----------
verbose
If True, more information is shown.
Returns
-------
string
Program as text.
"""
# Get information related to formatting
exercises = list(self._yield_exercises())
max_ex_name = 0
if len(exercises) != 0:
max_ex_name = max(len(ex.name) for ex in exercises)
# If rendered, find the length of the longest '6 x 75kg'-type string
max_ex_scheme = 0
if self._rendered:
for (week, day, dynamic_ex) in self._yield_week_day_dynamic():
lengths = [len(s) for s in
self._rendered[week][day][dynamic_ex]['strings']]
max_ex_scheme = max(max_ex_scheme, max(lengths))
env = self.jinja2_environment
template = env.get_template(self.TEMPLATE_NAMES['txt'])
return template.render(program=self, max_ex_name=max_ex_name,
max_ex_scheme=max_ex_scheme, verbose=verbose) | 0.002547 |
def modify_request(self, http_request=None):
"""Sets HTTP request components based on the URI."""
if http_request is None:
http_request = HttpRequest()
if http_request.uri is None:
http_request.uri = Uri()
# Determine the correct scheme.
if self.scheme:
http_request.uri.scheme = self.scheme
if self.port:
http_request.uri.port = self.port
if self.host:
http_request.uri.host = self.host
# Set the relative uri path
if self.path:
http_request.uri.path = self.path
if self.query:
http_request.uri.query = self.query.copy()
return http_request | 0.012759 |
def enable_disable_on_bot_select_deselect(self):
"""
Disables the botconfig groupbox and minus buttons when no bot is selected
:return:
"""
if not self.blue_listwidget.selectedItems() and not self.orange_listwidget.selectedItems():
self.bot_config_groupbox.setDisabled(True)
self.blue_minus_toolbutton.setDisabled(True)
self.orange_minus_toolbutton.setDisabled(True)
else:
self.bot_config_groupbox.setDisabled(False) | 0.007813 |
def add_page_break(self):
"""Return newly |Paragraph| object containing only a page break."""
paragraph = self.add_paragraph()
paragraph.add_run().add_break(WD_BREAK.PAGE)
return paragraph | 0.009091 |
def mtf_transformer_paper_tr(size):
"""Config for translation experiments.
Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Args:
size: an integer
Returns:
a hparams object
"""
n = 2 ** size
hparams = mtf_transformer_base()
hparams.label_smoothing = 0.1
hparams.batch_size = 128
hparams.d_model = 1024
hparams.d_ff = int(4096 * n)
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
# one epoch for translate_enfr_wmt32k_packed = 51400 steps
hparams.learning_rate_decay_steps = 51400
return hparams | 0.016818 |
def _resolve_command(self, command, target):
"""Get the correct event for the command
Only for 'privmsg' and 'notice' commands.
:param command: The command string
:type command: :class:`str`
:param target: either a user or a channel
:type target: :class:`str`
:returns: the correct event type
:rtype: :class:`str`
:raises: None
"""
if command == "privmsg":
if irc.client.is_channel(target):
command = "pubmsg"
else:
if irc.client.is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
return command | 0.002837 |
def copy(self):
"""
Returns a copy of the factor.
Returns
-------
DiscreteFactor: copy of the factor
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 3], np.arange(18))
>>> phi_copy = phi.copy()
>>> phi_copy.variables
['x1', 'x2', 'x3']
>>> phi_copy.cardinality
array([2, 3, 3])
>>> phi_copy.values
array([[[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8]],
[[ 9, 10, 11],
[12, 13, 14],
[15, 16, 17]]])
"""
# not creating a new copy of self.values and self.cardinality
# because __init__ methods does that.
return DiscreteFactor(self.scope(), self.cardinality, self.values, state_names=self.state_names) | 0.003181 |
def derivative(n, coef, derivative=2, periodic=False):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes the squared differences between basis coefficients.
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
derivative: int, default: 2
which derivative do we penalize.
derivative is 1, we penalize 1st order derivatives,
derivative is 2, we penalize 2nd order derivatives, etc
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n == 1:
# no derivative for constant functions
return sp.sparse.csc_matrix(0.)
D = sparse_diff(sp.sparse.identity(n + 2*derivative*periodic).tocsc(), n=derivative).tolil()
if periodic:
# wrap penalty
cols = D[:, :derivative]
D[:, -2 * derivative:-derivative] += cols * (-1) ** derivative
# do symmetric operation on lower half of matrix
n_rows = int((n + 2 * derivative)/2)
D[-n_rows:] = D[:n_rows][::-1, ::-1]
# keep only the center of the augmented matrix
D = D[derivative:-derivative, derivative:-derivative]
return D.dot(D.T).tocsc() | 0.001597 |
def permission_check(apikey, endpoint):
"""
return (user, seckey) if url end point is in allowed entry point list
"""
try:
ak = APIKeys.objects.get(apikey=apikey)
apitree = cPickle.loads(ak.apitree.encode("ascii"))
if apitree.match(endpoint):
return ak.user if ak.user else AnonymousUser(), ak.seckey
except APIKeys.DoesNotExist:
pass
return None, None | 0.00431 |
def import_class(class_path):
'''
Imports the class for the given class name.
'''
module_name, class_name = class_path.rsplit(".", 1)
module = import_module(module_name)
claz = getattr(module, class_name)
return claz | 0.004032 |
def update(collection_name, upsert, multi, spec, doc, safe, last_error_args):
"""Get an **update** message.
"""
options = 0
if upsert:
options += 1
if multi:
options += 2
data = __ZERO
data += bson._make_c_string(collection_name)
data += struct.pack("<i", options)
data += bson.BSON.encode(spec)
data += bson.BSON.encode(doc)
if safe:
(_, update_message) = __pack_message(2001, data)
(request_id, error_message) = __last_error(last_error_args)
return (request_id, update_message + error_message)
else:
return __pack_message(2001, data) | 0.001582 |
def argument(self) -> bool:
"""Parse statement argument.
Return ``True`` if the argument is followed by block of substatements.
"""
next = self.peek()
if next == "'":
quoted = True
self.sq_argument()
elif next == '"':
quoted = True
self.dq_argument()
elif self._arg == "":
quoted = False
self.unq_argument()
else:
raise UnexpectedInput(self, "single or double quote")
self.opt_separator()
next = self.peek()
if next == ";":
return False
if next == "{":
return True
elif quoted and next == "+":
self.offset += 1
self.opt_separator()
return self.argument()
else:
raise UnexpectedInput(self, "';', '{'" +
(" or '+'" if quoted else "")) | 0.002139 |
def _if_not_closed(f):
"""Run the method iff. the memory view hasn't been closed and the parent
object has not been freed."""
@add_signature_to_docstring(f)
@functools.wraps(f)
def f_(self, *args, **kwargs):
if self.closed or self._parent._freed:
raise OSError
return f(self, *args, **kwargs)
return f_ | 0.002817 |
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError('to_concat must have the same tz')
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs['freq'] = None
return self._simple_new(new_data, **attribs) | 0.002454 |
def rollback(self):
"""Ignore all changes made in the latest session (terminate the session)."""
if self.session is not None:
logger.info("rolling back transaction in %s" % self)
self.session.close()
self.session = None
self.lock_update.release()
else:
logger.warning("rollback called but there's no open session in %s" % self) | 0.009732 |
def _treat_star_format(self,arg_format_list,args):
"""
Deal with "*" format if specified.
"""
num_stars = len([a for a in arg_format_list if a == "*"])
if num_stars > 0:
# Make sure the repeated format argument only occurs once, is last,
# and that there is at least one format in addition to it.
if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1:
# Trim * from end
arg_format_list = arg_format_list[:-1]
# If we need extra arguments...
if len(arg_format_list) < len(args):
f = arg_format_list[-1]
len_diff = len(args) - len(arg_format_list)
tmp = list(arg_format_list)
tmp.extend([f for i in range(len_diff)])
arg_format_list = "".join(tmp)
else:
err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format."
raise ValueError(err)
return arg_format_list | 0.005333 |
def _arith_method_SPARSE_ARRAY(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
def wrapper(self, other):
from pandas.core.arrays.sparse.array import (
SparseArray, _sparse_array_op, _wrap_result, _get_fill)
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: {self} vs. {other}"
.format(self=len(self), other=len(other)))
if not isinstance(other, SparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all='ignore'):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
return _wrap_result(op_name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = op_name
return wrapper | 0.000749 |
def search(self, **kwargs):
"""
Method to search asns based on extends search.
:param search: Dict containing QuerySets to find asns.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing asns
"""
return super(ApiV4As, self).get(self.prepare_url(
'api/v4/as/', kwargs)) | 0.00491 |
def get_artist_location(self, cache=True):
"""Get the location of a song's artist.
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
An artist location object.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.artist_location
{u'latitude': 34.053489999999996, u'location': u'Los Angeles, CA', u'longitude': -118.24532000000001}
>>>
"""
if not (cache and ('artist_location' in self.cache)):
response = self.get_attribute('profile', bucket='artist_location')
self.cache['artist_location'] = response['songs'][0]['artist_location']
return self.cache['artist_location'] | 0.010817 |
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.X = np.asarray(sl.lu_solve_AATI(self.Z, self.rho, self.SZT +
self.rho*(self.Y - self.U), self.lu, self.piv,),
dtype=self.dtype) | 0.00641 |
def read_modis_response(filename, scale=1.0):
"""Read the Terra/Aqua MODIS relative spectral responses. Be aware that
MODIS has several detectors (more than one) compared to e.g. AVHRR which
has always only one.
"""
with open(filename, "r") as fid:
lines = fid.readlines()
nodata = -99.0
# The IR channels seem to be in microns, whereas the short wave channels are
# in nanometers! For VIS/NIR scale should be 0.001
detectors = {}
for line in lines:
if line.find("#") == 0:
continue
dummy, det_num, s_1, s_2 = line.split()
detector_name = 'det-{0:d}'.format(int(det_num))
if detector_name not in detectors:
detectors[detector_name] = {'wavelength': [], 'response': []}
detectors[detector_name]['wavelength'].append(float(s_1) * scale)
detectors[detector_name]['response'].append(float(s_2))
for key in detectors:
mask = np.array(detectors[key]['response']) == nodata
detectors[key]['response'] = np.ma.masked_array(
detectors[key]['response'], mask=mask).compressed()
detectors[key]['wavelength'] = np.ma.masked_array(
detectors[key]['wavelength'], mask=mask).compressed()
return detectors | 0.001576 |
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self | 0.005319 |
def _get_account(self, address):
"""Get account by address.
:param address:
:return:
"""
state = self._get_head_state()
account_address = binascii.a2b_hex(utils.remove_0x_head(address))
return state.get_and_cache_account(account_address) | 0.006803 |
def _proxy(self, url, urlparams=None):
"""Do the actual action of proxying the call.
"""
for k,v in request.params.iteritems():
urlparams[k]=v
query = urlencode(urlparams)
full_url = url
if query:
if not full_url.endswith("?"):
full_url += "?"
full_url += query
# build the request with its headers
req = urllib2.Request(url=full_url)
for header in request.headers:
if header.lower() == "host":
req.add_header(header, urlparse.urlparse(url)[1])
else:
req.add_header(header, request.headers[header])
res = urllib2.urlopen(req)
# add response headers
i = res.info()
response.status = res.code
got_content_length = False
for header in i:
# We don't support serving the result as chunked
if header.lower() == "transfer-encoding":
continue
if header.lower() == "content-length":
got_content_length = True
response.headers[header] = i[header]
# return the result
result = res.read()
res.close()
#if not got_content_length:
# response.headers['content-length'] = str(len(result))
return result | 0.003706 |
def range_span(ranges):
"""
Returns the total span between the left most range to the right most range.
>>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)]
>>> range_span(ranges)
41
>>> ranges = [("1", 30, 45), ("2", 40, 50)]
>>> range_span(ranges)
27
>>> ranges = [("1", 30, 45), ("1", 45, 50)]
>>> range_span(ranges)
21
>>> range_span([])
0
"""
if not ranges:
return 0
ranges.sort()
ans = 0
for seq, lt in groupby(ranges, key=lambda x: x[0]):
lt = list(lt)
ans += max(max(lt)[1:]) - min(min(lt)[1:]) + 1
return ans | 0.0016 |
def convert_tensor_to_probability_map(scope, operator, container):
'''
This converter tries to convert a special operator 'TensorToProbabilityMap' into a sequence of some ONNX operators.
Those operators are used to create a dictionary in which keys are class labels and values are the associated
probabilities. We assume that the elements in the given probability tensor are aligned with the class labels
specified in the CoreML model.
Notice that ONNX<1.2 doesn't support a CoreML classifier with a batch size larger than one because old ONNX ZipMap
is not able to produce a sequence of dictionaries. This issue has been fixed in ONNX-1.2.
'''
attrs = {'name': scope.get_unique_operator_name('ZipMap')}
model_type = operator.raw_operator.WhichOneof('Type')
if model_type == 'neuralNetworkClassifier':
model = operator.raw_operator.neuralNetworkClassifier
if model.WhichOneof('ClassLabels') == 'stringClassLabels':
attrs['classlabels_strings'] = list(s.encode('utf-8') for s in model.stringClassLabels.vector)
elif model.WhichOneof('ClassLabels') == 'int64ClassLabels':
attrs['classlabels_int64s'] = list(int(i) for i in model.int64ClassLabels.vector)
else:
raise ValueError('Unknown label type found')
elif model_type == 'pipelineClassifier':
model = operator.raw_operator.pipelineClassifier
if model.WhichOneof('ClassLabels') == 'stringClassLabels':
attrs['classlabels_strings'] = list(s.encode('utf-8') for s in model.stringClassLabels.vector)
elif model.WhichOneof('ClassLabels') == 'int64ClassLabels':
attrs['classlabels_int64s'] = list(int(i) for i in model.int64ClassLabels.vector)
else:
raise ValueError('Unknown label type found')
else:
raise TypeError('Only neural network classifiers and pipeline classifiers are supported')
input_shape = operator.inputs[0].type.shape
if len(operator.inputs[0].type.shape) != 2:
# Calculate the shape attribute of ONNX Reshape
if input_shape[0] != 'None':
N = input_shape[0]
else:
N = -1 # -1 means that this dimension is automatically determined in runtime and unknown in conversion time
if all(isinstance(i, numbers.Integral) for i in input_shape[1:]):
C = 1
for i in input_shape[1:]:
C *= int(i)
else:
C = -1 # -1 means that this dimension is automatically determined in runtime and unknown in conversion time
# ZipMap in ONNX only accepts [C] and [N, C] inputs. In cases of [N, C, 1, 1], we reshape the probability tensor
# into [N, C] before feeding it into ZipMap.
buffer_name = scope.get_unique_variable_name('buffer')
apply_reshape(scope, operator.inputs[0].full_name, buffer_name, container, desired_shape=[N, C])
else:
buffer_name = operator.inputs[0].full_name
container.add_node('ZipMap', buffer_name, operator.outputs[0].full_name,
op_domain='ai.onnx.ml', **attrs) | 0.004806 |
def accepts(**schemas):
"""Create a decorator for validating function parameters.
Example::
@accepts(a="number", body={"+field_ids": [int], "is_ok": bool})
def f(a, body):
print (a, body["field_ids"], body.get("is_ok"))
:param schemas: The schema for validating a given parameter.
"""
validate = parse(schemas).validate
@decorator
def validating(func, *args, **kwargs):
validate(inspect.getcallargs(func, *args, **kwargs), adapt=False)
return func(*args, **kwargs)
return validating | 0.001779 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.