text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _normalize_utf8_keys(kwargs):
"""When kwargs are passed literally in a source file, their keys are ascii: normalize."""
if any(type(key) is binary_type for key in kwargs.keys()):
# This is to preserve the original dict type for kwargs.
dict_type = type(kwargs)
return dict_type([(text_type(k), v) for k, v in kwargs.items()])
return kwargs | 0.01385 |
async def membership(client: Client, membership_signed_raw: str) -> ClientResponse:
"""
POST a Membership document
:param client: Client to connect to the api
:param membership_signed_raw: Membership signed raw document
:return:
"""
return await client.post(MODULE + '/membership', {'membership': membership_signed_raw}, rtype=RESPONSE_AIOHTTP) | 0.008043 |
def set_widgets(self):
"""Set widgets on the Hazard Layer From TOC tab."""
# The list is already populated in the previous step, but now we
# need to do it again in case we're back from the Keyword Wizard.
# First, preserve self.parent.layer before clearing the list
last_layer = self.parent.layer and self.parent.layer.id() or None
self.lblDescribeCanvasHazLayer.clear()
self.list_compatible_canvas_layers()
self.auto_select_one_item(self.lstCanvasHazLayers)
# Try to select the last_layer, if found:
if last_layer:
layers = []
for index in range(self.lstCanvasHazLayers.count()):
item = self.lstCanvasHazLayers.item(index)
layers += [item.data(Qt.UserRole)]
if last_layer in layers:
self.lstCanvasHazLayers.setCurrentRow(layers.index(last_layer))
# Set icon
hazard = self.parent.step_fc_functions1.selected_value(
layer_purpose_hazard['key'])
icon_path = get_image_path(hazard)
self.lblIconIFCWHazardFromCanvas.setPixmap(QPixmap(icon_path)) | 0.001738 |
def _get_method_wrappers(cls):
"""
Find the appropriate operation-wrappers to use when defining flex/special
arithmetic, boolean, and comparison operations with the given class.
Parameters
----------
cls : class
Returns
-------
arith_flex : function or None
comp_flex : function or None
arith_special : function
comp_special : function
bool_special : function
Notes
-----
None is only returned for SparseArray
"""
if issubclass(cls, ABCSparseSeries):
# Be sure to catch this before ABCSeries and ABCSparseArray,
# as they will both come see SparseSeries as a subclass
arith_flex = _flex_method_SERIES
comp_flex = _flex_method_SERIES
arith_special = _arith_method_SPARSE_SERIES
comp_special = _arith_method_SPARSE_SERIES
bool_special = _bool_method_SERIES
# TODO: I don't think the functions defined by bool_method are tested
elif issubclass(cls, ABCSeries):
# Just Series; SparseSeries is caught above
arith_flex = _flex_method_SERIES
comp_flex = _flex_method_SERIES
arith_special = _arith_method_SERIES
comp_special = _comp_method_SERIES
bool_special = _bool_method_SERIES
elif issubclass(cls, ABCSparseArray):
arith_flex = None
comp_flex = None
arith_special = _arith_method_SPARSE_ARRAY
comp_special = _arith_method_SPARSE_ARRAY
bool_special = _arith_method_SPARSE_ARRAY
elif issubclass(cls, ABCPanel):
arith_flex = _flex_method_PANEL
comp_flex = _comp_method_PANEL
arith_special = _arith_method_PANEL
comp_special = _comp_method_PANEL
bool_special = _arith_method_PANEL
elif issubclass(cls, ABCDataFrame):
# Same for DataFrame and SparseDataFrame
arith_flex = _arith_method_FRAME
comp_flex = _flex_comp_method_FRAME
arith_special = _arith_method_FRAME
comp_special = _comp_method_FRAME
bool_special = _arith_method_FRAME
return arith_flex, comp_flex, arith_special, comp_special, bool_special | 0.000469 |
def update(connection, force_download):
"""Update the database"""
manager.database.update(
connection=connection,
force_download=force_download
) | 0.00578 |
def one(self, command, params=None):
"""
Возвращает первую строку ответа, полученного через query
> db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID})
:param command: SQL запрос
:param params: Параметры для prepared statements
:rtype: dict
"""
dr = self.query(command, params)
if dr['rows']:
return dr['rows'][0]
else:
return None | 0.004454 |
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident) | 0.004301 |
def get_characteristic_subpattern(subpatterns):
"""Picks the most characteristic from a list of linear patterns
Current order used is:
names > common_names > common_chars
"""
if not isinstance(subpatterns, list):
return subpatterns
if len(subpatterns)==1:
return subpatterns[0]
# first pick out the ones containing variable names
subpatterns_with_names = []
subpatterns_with_common_names = []
common_names = ['in', 'for', 'if' , 'not', 'None']
subpatterns_with_common_chars = []
common_chars = "[]().,:"
for subpattern in subpatterns:
if any(rec_test(subpattern, lambda x: type(x) is str)):
if any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_chars)):
subpatterns_with_common_chars.append(subpattern)
elif any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_names)):
subpatterns_with_common_names.append(subpattern)
else:
subpatterns_with_names.append(subpattern)
if subpatterns_with_names:
subpatterns = subpatterns_with_names
elif subpatterns_with_common_names:
subpatterns = subpatterns_with_common_names
elif subpatterns_with_common_chars:
subpatterns = subpatterns_with_common_chars
# of the remaining subpatterns pick out the longest one
return max(subpatterns, key=len) | 0.003378 |
def get_product_string(self):
""" Get the Product String from the HID device.
:return: The Product String
:rtype: unicode
"""
self._check_device_status()
str_p = ffi.new("wchar_t[]", 255)
rv = hidapi.hid_get_product_string(self._device, str_p, 255)
if rv == -1:
raise IOError("Failed to read product string from HID device: {0}"
.format(self._get_last_error_string()))
return ffi.string(str_p) | 0.003906 |
def timestamp(stamp, tolerance=150):
"""Validate timestamp specified by request.
See `validate.request` for additional info.
Args:
stamp: str. Time request was made as ISO 8601 timestamp.
tolerance: int. Number of seconds request remains valid from timestamp.
Returns
bool: True if valid, False otherwise.
"""
try:
tolerance = datetime.timedelta(0, tolerance)
timestamp_low = dateutil.parser.parse(stamp)
timestamp_high = timestamp_low + tolerance
now = datetime.datetime.now(timestamp_low.tzinfo)
except ValueError:
return False
return now >= timestamp_low and now <= timestamp_high | 0.001464 |
def to_dict(self, include_null=True):
"""
Convert to dict.
"""
if include_null:
return dict(self.items())
else:
return {
attr: value
for attr, value in self.__dict__.items()
if not attr.startswith("_sa_")
} | 0.006061 |
def limit_inference(iterator, size):
"""Limit inference amount.
Limit inference amount to help with performance issues with
exponentially exploding possible results.
:param iterator: Inference generator to limit
:type iterator: Iterator(NodeNG)
:param size: Maximum mount of nodes yielded plus an
Uninferable at the end if limit reached
:type size: int
:yields: A possibly modified generator
:rtype param: Iterable
"""
yield from islice(iterator, size)
has_more = next(iterator, False)
if has_more is not False:
yield Uninferable
return | 0.001623 |
def meld(*values):
"""Return the repeated value, or the first value if there's only one.
This is a convenience function, equivalent to calling
getvalue(repeated(x)) to get x.
This function skips over instances of None in values (None is not allowed
in repeated variables).
Examples:
meld("foo", "bar") # => ListRepetition("foo", "bar")
meld("foo", "foo") # => ListRepetition("foo", "foo")
meld("foo", None) # => "foo"
meld(None) # => None
"""
values = [x for x in values if x is not None]
if not values:
return None
result = repeated(*values)
if isrepeating(result):
return result
return getvalue(result) | 0.001418 |
def decimal_to_dms(value, precision):
'''
Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF
'''
deg = math.floor(value)
min = math.floor((value - deg) * 60)
sec = math.floor((value - deg - min / 60) * 3600 * precision)
return ((deg, 1), (min, 1), (sec, precision)) | 0.006116 |
def stop_charge(self):
"""Stop charging the Tesla Vehicle."""
if self.__charger_state:
data = self._controller.command(self._id, 'charge_stop',
wake_if_asleep=True)
if data and data['response']['result']:
self.__charger_state = False
self.__manual_update_time = time.time() | 0.005195 |
def _compute_frequencies(self, word_sent):
"""
Compute the frequency of each of word.
Input:
word_sent, a list of sentences already tokenized.
Output:
freq, a dictionary where freq[w] is the frequency of w.
"""
freq = defaultdict(int)
for s in word_sent:
for word in s:
if word not in self._stopwords:
freq[word] += 1
# frequencies normalization and fitering
m = float(max(freq.values()))
for w in freq.keys():
freq[w] = freq[w]/m
if freq[w] >= self._max_cut or freq[w] <= self._min_cut:
del freq[w]
return freq | 0.012862 |
def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a category id. Returns information about matching series in a DataFrame.
Parameters
----------
category_id : int
category id, e.g., 32145
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/category/series?category_id=%d&" % (self.root_url,
category_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for category id: ' + str(category_id))
return info | 0.007412 |
def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)
_, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0)
ys2 = ys2[::-1]
count_ys2 = count_ys2[::-1]
count_ys = count_ys1 + count_ys2
ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xs, ys, count_ys | 0.006083 |
def readdir(path):
'''
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents | 0.001869 |
def to_bytes(value):
""" str to bytes (py3k) """
vtype = type(value)
if vtype == bytes or vtype == type(None):
return value
try:
return vtype.encode(value)
except UnicodeEncodeError:
pass
return value | 0.008 |
def validate_port_or_colon_separated_port_range(port_range):
"""Accepts a port number or a single-colon separated range."""
if port_range.count(':') > 1:
raise ValidationError(_("One colon allowed in port range"))
ports = port_range.split(':')
for port in ports:
validate_port_range(port) | 0.003125 |
def get_settings():
'''
Get all currently loaded settings.
'''
settings = {}
for config_file in config_files():
config_contents = load_config(config_file)
if config_contents is not None:
settings = deep_merge(settings, config_contents)
return settings | 0.0033 |
def set_text(self, point, text):
"""Set a text value in the screen canvas."""
if not self.option.legend:
return
if not isinstance(point, Point):
point = Point(point)
for offset, char in enumerate(str(text)):
self.screen.canvas[point.y][point.x + offset] = char | 0.006061 |
def daemon_factory(path):
"""Create a closure which creates a running daemon.
We need to create a closure that contains the correct path the daemon should
be started with. This is needed as the `Daemonize` library
requires a callable function for daemonization and doesn't accept any arguments.
This function cleans up sockets and output files in case we encounter any exceptions.
"""
def start_daemon():
root_dir = path
config_dir = os.path.join(root_dir, '.config/pueue')
try:
daemon = Daemon(root_dir=root_dir)
daemon.main()
except KeyboardInterrupt:
print('Keyboard interrupt. Shutting down')
daemon.stop_daemon()
except Exception:
try:
daemon.stop_daemon()
except Exception:
pass
cleanup(config_dir)
raise
return start_daemon | 0.004296 |
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs) | 0.002677 |
def _read_compound_from_factsage_file_(file_name):
"""
Build a dictionary containing the factsage thermochemical data of a
compound by reading the data from a file.
:param file_name: Name of file to read the data from.
:returns: Dictionary containing compound data.
"""
with open(file_name) as f:
lines = f.readlines()
compound = {'Formula': lines[0].split(' ')[1]}
# FIXME: replace with logging
print(compound['Formula'])
compound['Phases'] = phs = {}
started = False
phaseold = 'zz'
recordold = '0'
for line in lines:
if started:
if line.startswith('_'): # line indicating end of data
break
line = line.replace(' 298 ', ' 298.15 ')
line = line.replace(' - ', ' ')
while ' ' in line:
line = line.replace(' ', ' ')
line = line.replace(' \n', '')
line = line.replace('\n', '')
strings = line.split(' ')
if len(strings) < 2: # empty line
continue
phase = strings[0]
if phase != phaseold: # new phase detected
phaseold = phase
ph = phs[phase] = {}
ph['Symbol'] = phase
ph['DHref'] = float(strings[2])
ph['Sref'] = float(strings[3])
cprecs = ph['Cp_records'] = {}
record = strings[1]
if record != recordold: # new record detected
recordold = record
Tmax = float(strings[len(strings) - 1])
cprecs[Tmax] = {}
cprecs[Tmax]['Tmin'] = float(strings[len(strings) - 2])
cprecs[Tmax]['Tmax'] = float(strings[len(strings) - 1])
cprecs[Tmax]['Terms'] = []
t = {'Coefficient': float(strings[4]),
'Exponent': float(strings[5])}
cprecs[Tmax]['Terms'].append(t)
if len(strings) == 10:
t = {'Coefficient': float(strings[6]),
'Exponent': float(strings[7])}
cprecs[Tmax]['Terms'].append(t)
else: # old record detected
t = {'Coefficient': float(strings[2]),
'Exponent': float(strings[3])}
cprecs[Tmax]['Terms'].append(t)
if len(strings) == 8:
t = {'Coefficient': float(strings[4]),
'Exponent': float(strings[5])}
cprecs[Tmax]['Terms'].append(t)
else: # old phase detected
ph = phs[phase]
record = strings[1]
if record != recordold: # new record detected
recordold = record
Tmax = float(strings[len(strings) - 1])
cprecs = ph['Cp_records']
cprecs[Tmax] = {}
cprecs[Tmax]['Tmin'] = float(strings[len(strings) - 2])
cprecs[Tmax]['Tmax'] = float(strings[len(strings) - 1])
cprecs[Tmax]['Terms'] = []
t = {'Coefficient': float(strings[2]),
'Exponent': float(strings[3])}
cprecs[Tmax]['Terms'].append(t)
if len(strings) == 8:
t = {'Coefficient': float(strings[4]),
'Exponent': float(strings[5])}
cprecs[Tmax]['Terms'].append(t)
else: # old record detected
t = {'Coefficient': float(strings[2]),
'Exponent': float(strings[3])}
cprecs[Tmax]['Terms'].append(t)
if len(strings) == 8:
t = {'Coefficient': float(strings[4]),
'Exponent': float(strings[5])}
cprecs[Tmax]['Terms'].append(t)
if line.startswith('_'): # line indicating the start of the data
started = True
for name, ph in phs.items():
cprecs = ph['Cp_records']
first = cprecs[min(cprecs.keys())]
first['Tmin'] = 298.15
return compound | 0.000232 |
def get_cachedir_bsig(self):
"""
Return the signature for a cached file, including
its children.
It adds the path of the cached file to the cache signature,
because multiple targets built by the same action will all
have the same build signature, and we have to differentiate
them somehow.
"""
try:
return self.cachesig
except AttributeError:
pass
# Collect signatures for all children
children = self.children()
sigs = [n.get_cachedir_csig() for n in children]
# Append this node's signature...
sigs.append(self.get_contents_sig())
# ...and it's path
sigs.append(self.get_internal_path())
# Merge this all into a single signature
result = self.cachesig = SCons.Util.MD5collect(sigs)
return result | 0.002265 |
def draw(self, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the distribution plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Prepare the data
bins = np.arange(self.N)
words = [self.features[i] for i in self.sorted_[:self.N]]
freqs = {}
# Set up the bar plots
if self.conditional_freqdist_:
for label, values in sorted(self.conditional_freqdist_.items(), key=itemgetter(0)):
freqs[label] = [
values[i] for i in self.sorted_[:self.N]
]
else:
freqs['corpus'] = [
self.freqdist_[i] for i in self.sorted_[:self.N]
]
# Draw a horizontal barplot
if self.orient == 'h':
# Add the barchart, stacking if necessary
for label, freq in freqs.items():
self.ax.barh(bins, freq, label=label, align='center')
# Set the y ticks to the words
self.ax.set_yticks(bins)
self.ax.set_yticklabels(words)
# Order the features from top to bottom on the y axis
self.ax.invert_yaxis()
# Turn off y grid lines and turn on x grid lines
self.ax.yaxis.grid(False)
self.ax.xaxis.grid(True)
# Draw a vertical barplot
elif self.orient == 'v':
# Add the barchart, stacking if necessary
for label, freq in freqs.items():
self.ax.bar(bins, freq, label=label, align='edge')
# Set the y ticks to the words
self.ax.set_xticks(bins)
self.ax.set_xticklabels(words, rotation=90)
# Turn off x grid lines and turn on y grid lines
self.ax.yaxis.grid(True)
self.ax.xaxis.grid(False)
# Unknown state
else:
raise YellowbrickValueError(
"Orientation must be 'h' or 'v'"
) | 0.001952 |
def write_tsv(output_stream, *tup, **kwargs):
"""
Write argument list in `tup` out as a tab-separeated row to the stream.
"""
encoding = kwargs.get('encoding') or 'utf-8'
value = '\t'.join([s for s in tup]) + '\n'
output_stream.write(value.encode(encoding)) | 0.003559 |
def _render_before(self, element):
'''Render opening tag and inline content'''
start = ["%s<%s" % (self.spaces, element.tag)]
if element.id:
start.append(" id=%s" % self.element.attr_wrap(self.replace_inline_variables(element.id)))
if element.classes:
start.append(" class=%s" % self.element.attr_wrap(self.replace_inline_variables(element.classes)))
if element.attributes:
start.append(' ' + self.replace_inline_variables(element.attributes))
content = self._render_inline_content(self.element.inline_content)
if element.nuke_inner_whitespace and content:
content = content.strip()
if element.self_close and not content:
start.append(" />")
elif content:
start.append(">%s" % (content))
elif self.children:
start.append(">%s" % (self.render_newlines()))
else:
start.append(">")
return ''.join(start) | 0.005025 |
def encoder_data(self, data):
"""
This method handles the incoming encoder data message and stores
the data in the digital response table.
:param data: Message data from Firmata
:return: No return value.
"""
prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]
val = int((data[self.MSB] << 7) + data[self.LSB])
# set value so that it shows positive and negative values
if val > 8192:
val -= 16384
pin = data[0]
with self.pymata.data_lock:
self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val
if prev_val != val:
callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]
if callback is not None:
callback([self.pymata.ENCODER, pin,
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]]) | 0.005792 |
def describe_db_subnet_groups(name=None, filters=None, jmespath='DBSubnetGroups',
region=None, key=None, keyid=None, profile=None):
'''
Return a detailed listing of some, or all, DB Subnet Groups visible in the
current scope. Arbitrary subelements or subsections of the returned dataset
can be selected by passing in a valid JMSEPath filter as well.
CLI example::
salt myminion boto_rds.describe_db_subnet_groups
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
pag = conn.get_paginator('describe_db_subnet_groups')
args = {}
args.update({'DBSubnetGroupName': name}) if name else None
args.update({'Filters': filters}) if filters else None
pit = pag.paginate(**args)
pit = pit.search(jmespath) if jmespath else pit
return [p for p in pit] | 0.00348 |
def bot_item_drop_event(self, dropped_listwidget, event):
"""
Switches the team for the dropped agent to the other team
:param dropped_listwidget: The listwidget belonging to the new team for the agent
:param event: The QDropEvent containing the source
:return:
"""
dragged_listwidget = event.source()
if dragged_listwidget is dropped_listwidget: # drops in the same widget
return
self.current_bot.set_team(0 if dropped_listwidget == self.blue_listwidget else 1)
self.update_teams_listwidgets() | 0.008518 |
def _copy_with_changed_callback(self, new_callback):
''' Dev API used to wrap the callback with decorators. '''
return TimeoutCallback(self._document, new_callback, self._timeout, self._id) | 0.014634 |
def consume(self, key, amount=1, rate=None, capacity=None, **kwargs):
"""Consume an amount for a given key.
Non-default rate/capacity can be given to override Throttler defaults.
Returns:
bool: whether the units could be consumed
"""
bucket = self.get_bucket(key, rate, capacity, **kwargs)
return bucket.consume(amount) | 0.005249 |
def cross(*sequences):
"""
From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html
"""
# visualize an odometer, with "wheels" displaying "digits"...:
wheels = map(iter, sequences)
digits = [it.next( ) for it in wheels]
while True:
yield tuple(digits)
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = wheels[i].next( )
break
except StopIteration:
wheels[i] = iter(sequences[i])
digits[i] = wheels[i].next( )
else:
break | 0.023009 |
def setLoggerLevel(self, logger, level):
"""
Sets the level to log the inputed logger at.
:param logger | <str>
level | <int>
"""
if logger == 'root':
_log = logging.getLogger()
else:
_log = logging.getLogger(logger)
_log.setLevel(level)
if level == logging.NOTSET:
self._loggerLevels.pop(logger, None)
else:
self._loggerLevels[logger] = level | 0.009381 |
def list_tables(self):
"""
Returns the existing tables.
Tables are returned in lexicographical order.
:rtype: ~collections.Iterable[.Table]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
path = '/archive/{}/tables'.format(self._instance)
response = self._client.get_proto(path=path)
message = rest_pb2.ListTablesResponse()
message.ParseFromString(response.content)
tables = getattr(message, 'table')
return iter([Table(table) for table in tables]) | 0.003135 |
def add_packages(packages):
"""Add external packages to the pyspark interpreter.
Set the PYSPARK_SUBMIT_ARGS properly.
Parameters
----------
packages: list of package names in string format
"""
#if the parameter is a string, convert to a single element list
if isinstance(packages,str):
packages = [packages]
_add_to_submit_args("--packages "+ ",".join(packages) +" pyspark-shell") | 0.013953 |
def polynet(num_classes=1000, pretrained='imagenet'):
"""PolyNet architecture from the paper
'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks'
https://arxiv.org/abs/1611.05725
"""
if pretrained:
settings = pretrained_settings['polynet'][pretrained]
assert num_classes == settings['num_classes'], \
'num_classes should be {}, but is {}'.format(
settings['num_classes'], num_classes)
model = PolyNet(num_classes=num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = PolyNet(num_classes=num_classes)
return model | 0.001134 |
def prob_lnm(m1, m2, s1z, s2z, **kwargs):
''' Return probability density for uniform in log
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
The probability density for m1, m2 pair
'''
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
max_mtotal = min_mass + max_mass
m1, m2 = np.array(m1), np.array(m2)
C_lnm = integrate.quad(lambda x: (log(max_mtotal - x) - log(min_mass))/x, min_mass, max_mass)[0]
xx = np.minimum(m1, m2)
m1 = np.maximum(m1, m2)
m2 = xx
bound = np.sign(max_mtotal - m1 - m2)
bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)
idx = np.where(bound != 2)
p_m1_m2 = (1/C_lnm)*(1./m1)*(1./m2)
p_m1_m2[idx] = 0
return p_m1_m2 | 0.001815 |
def filter_convolve(data, filters, filter_rot=False, method='scipy'):
r"""Filter convolve
This method convolves the input image with the wavelet filters
Parameters
----------
data : np.ndarray
Input data, 2D array
filters : np.ndarray
Wavelet filters, 3D array
filter_rot : bool, optional
Option to rotate wavelet filters (default is 'False')
method : str {'astropy', 'scipy'}, optional
Convolution method (default is 'scipy')
Returns
-------
np.ndarray convolved data
Examples
--------
>>> from modopt.signal.wavelet import filter_convolve
>>> x = np.arange(9).reshape(3, 3).astype(float)
>>> y = np.arange(36).reshape(4, 3, 3).astype(float)
>>> filter_convolve(x, y)
array([[[ 174., 165., 174.],
[ 93., 84., 93.],
[ 174., 165., 174.]],
[[ 498., 489., 498.],
[ 417., 408., 417.],
[ 498., 489., 498.]],
[[ 822., 813., 822.],
[ 741., 732., 741.],
[ 822., 813., 822.]],
[[ 1146., 1137., 1146.],
[ 1065., 1056., 1065.],
[ 1146., 1137., 1146.]]])
>>> filter_convolve(y, y, filter_rot=True)
array([[ 14550., 14586., 14550.],
[ 14874., 14910., 14874.],
[ 14550., 14586., 14550.]])
"""
if filter_rot:
return np.sum([convolve(coef, f, method=method) for coef, f in
zip(data, rotate_stack(filters))], axis=0)
else:
return np.array([convolve(data, f, method=method) for f in filters]) | 0.000603 |
def set_conn(self, **kwargs):
""" takes a connection and creates the connection """
# log = logging.getLogger("%s.%s" % (self.log, inspect.stack()[0][3]))
log.setLevel(kwargs.get('log_level',self.log_level))
conn_name = kwargs.get("name")
if not conn_name:
raise NameError("a connection requires a 'name': %s" % kwargs)
elif self.conns.get(conn_name):
raise KeyError("connection '%s' has already been set" % conn_name)
if not kwargs.get("active", True):
log.warning("Connection '%s' is set as inactive" % conn_name)
return
conn_type = kwargs.get("conn_type")
if not conn_type or conn_type not in self.conn_mapping.nested:
err_msg = ["a connection requires a valid 'conn_type':\n",
"%s"]
raise NameError("".join(err_msg) % (list(self.conn_mapping.nested)))
log.info("Setting '%s' connection", conn_name)
if conn_type == "triplestore":
conn = make_tstore_conn(kwargs)
else:
conn = RdfwConnections[conn_type][kwargs['vendor']](**kwargs)
self.conns[conn_name] = conn
self.__is_initialized__ = True | 0.00326 |
def get_steam_id(vanityurl, **kwargs):
"""
Get a players steam id from their steam name/vanity url
"""
params = {"vanityurl": vanityurl}
return make_request("ResolveVanityURL", params, version="v0001",
base="http://api.steampowered.com/ISteamUser/", **kwargs) | 0.006969 |
def create(self, list_id, data):
"""
Add a new member to the list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"status": string*, (Must be one of 'subscribed', 'unsubscribed', 'cleaned',
'pending', or 'transactional')
"email_address": string*
}
"""
self.list_id = list_id
if 'status' not in data:
raise KeyError('The list member must have a status')
if data['status'] not in ['subscribed', 'unsubscribed', 'cleaned', 'pending', 'transactional']:
raise ValueError('The list member status must be one of "subscribed", "unsubscribed", "cleaned", '
'"pending", or "transactional"')
if 'email_address' not in data:
raise KeyError('The list member must have an email_address')
check_email(data['email_address'])
response = self._mc_client._post(url=self._build_path(list_id, 'members'), data=data)
if response is not None:
self.subscriber_hash = response['id']
else:
self.subscriber_hash = None
return response | 0.004658 |
def errorFunction(self, t, a):
"""
Using a hyperbolic arctan on the error slightly exaggerates
the actual error non-linearly. Return t - a to just use the difference.
t - target vector
a - activation vector
"""
def difference(v):
if not self.hyperbolicError:
#if -0.1 < v < 0.1: return 0.0
#else:
return v
else:
if v < -0.9999999: return -17.0
elif v > 0.9999999: return 17.0
else: return math.log( (1.0 + v) / (1.0 - v) )
#else: return Numeric.arctanh(v) # half that above
return list(map(difference, t - a)) | 0.018233 |
def get_config_tuple_from_egrc(egrc_path):
"""
Create a Config named tuple from the values specified in the .egrc. Expands
any paths as necessary.
egrc_path must exist and point a file.
If not present in the .egrc, properties of the Config are returned as None.
"""
with open(egrc_path, 'r') as egrc:
try:
config = ConfigParser.RawConfigParser()
except AttributeError:
config = ConfigParser()
config.readfp(egrc)
# default to None
examples_dir = None
custom_dir = None
use_color = None
pager_cmd = None
squeeze = None
subs = None
editor_cmd = None
if config.has_option(DEFAULT_SECTION, EG_EXAMPLES_DIR):
examples_dir = config.get(DEFAULT_SECTION, EG_EXAMPLES_DIR)
examples_dir = get_expanded_path(examples_dir)
if config.has_option(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR):
custom_dir = config.get(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR)
custom_dir = get_expanded_path(custom_dir)
if config.has_option(DEFAULT_SECTION, USE_COLOR):
use_color_raw = config.get(DEFAULT_SECTION, USE_COLOR)
use_color = _parse_bool_from_raw_egrc_value(use_color_raw)
if config.has_option(DEFAULT_SECTION, PAGER_CMD):
pager_cmd_raw = config.get(DEFAULT_SECTION, PAGER_CMD)
pager_cmd = ast.literal_eval(pager_cmd_raw)
if config.has_option(DEFAULT_SECTION, EDITOR_CMD):
editor_cmd_raw = config.get(DEFAULT_SECTION, EDITOR_CMD)
editor_cmd = ast.literal_eval(editor_cmd_raw)
color_config = get_custom_color_config_from_egrc(config)
if config.has_option(DEFAULT_SECTION, SQUEEZE):
squeeze_raw = config.get(DEFAULT_SECTION, SQUEEZE)
squeeze = _parse_bool_from_raw_egrc_value(squeeze_raw)
if config.has_section(SUBSTITUTION_SECTION):
subs = get_substitutions_from_config(config)
return Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
editor_cmd=editor_cmd,
squeeze=squeeze,
subs=subs,
) | 0.000432 |
def compare_verbs(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as verbs
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_verb) | 0.004065 |
def katex_rendering_delimiters(app):
"""Delimiters for rendering KaTeX math.
If no delimiters are specified in katex_options, add the
katex_inline and katex_display delimiters. See also
https://khan.github.io/KaTeX/docs/autorender.html
"""
# Return if we have user defined rendering delimiters
if 'delimiters' in app.config.katex_options:
return ''
katex_inline = [d.replace('\\', '\\\\') for d in app.config.katex_inline]
katex_display = [d.replace('\\', '\\\\') for d in app.config.katex_display]
katex_delimiters = {'inline': katex_inline, 'display': katex_display}
# Set chosen delimiters for the auto-rendering options of KaTeX
delimiters = r'''delimiters: [
{{ left: "{inline[0]}", right: "{inline[1]}", display: false }},
{{ left: "{display[0]}", right: "{display[1]}", display: true }}
]'''.format(**katex_delimiters)
return delimiters | 0.001078 |
def fetch(self, is_dl_forced=True):
"""
Fetches data from udp collaboration server,
see top level comments for class for more information
:return:
"""
username = config.get_config()['dbauth']['udp']['user']
password = config.get_config()['dbauth']['udp']['password']
credentials = (username, password)
# Get patient map file:
patient_id_map = self.open_and_parse_yaml(self.map_files['patient_ids'])
udp_internal_ids = patient_id_map.keys()
phenotype_fields = ['Patient', 'HPID', 'Present']
# Get phenotype ids for each patient
phenotype_params = {
'method': 'search_subjects',
'subject_type': 'Phenotype',
'search_mode': 'DEEP',
'fields': 'Patient',
'conditions': 'equals',
'values': ','.join(udp_internal_ids),
'user_fields': ','.join(phenotype_fields)
}
prioritized_variants = [
'Patient', 'Gene', 'Chromosome Position', 'Variant Allele', 'Transcript']
prioritized_params = {
'method': 'search_subjects',
'subject_type': 'Variant Prioritization',
'search_mode': 'DEEP',
'fields': 'Patient',
'conditions': 'equals',
'values': ','.join(udp_internal_ids),
'user_fields': ','.join(prioritized_variants),
'format': 'json'}
variant_fields = [
'Patient', 'Family', 'Chr', 'Build', 'Chromosome Position',
'Reference Allele', 'Variant Allele', 'Parent of origin',
'Allele Type', 'Mutation Type', 'Gene', 'Transcript', 'Original Amino Acid',
'Variant Amino Acid', 'Amino Acid Change', 'Segregates with',
'Position', 'Exon', 'Inheritance model', 'Zygosity', 'dbSNP ID',
'1K Frequency', 'Number of Alleles']
variant_params = {
'method': 'search_subjects',
'subject_type': 'Exome Analysis Results',
'search_mode': 'DEEP',
'fields': 'Patient',
'conditions': 'equals',
'user_fields': ','.join(variant_fields),
'format': 'json'}
pheno_file = open(
'/'.join((self.rawdir, self.files['patient_phenotypes']['file'])), 'w')
variant_file = open(
'/'.join((self.rawdir, self.files['patient_variants']['file'])), 'w')
pheno_file.write('{0}\n'.format('\t'.join(phenotype_fields)))
variant_file.write('{0}\n'.format('\t'.join(variant_fields)))
variant_gene = self._fetch_data_from_udp(
udp_internal_ids, prioritized_params, prioritized_variants, credentials)
variant_gene_map = dict()
for line in variant_gene:
variant_gene_map.setdefault(line[0], []).append(
# Try to make a unique value based on gene-pos-variantAlele-transcript
# TODO make this a dict for readability purposes
"{0}-{1}-{2}-{3}".format(line[1], line[2], line[3], line[4]))
variant_info = self._fetch_data_from_udp(
udp_internal_ids, variant_params, variant_fields, credentials)
for line in variant_info:
variant = "{0}-{1}-{2}-{3}".format(line[10], line[4], line[6], line[11])
if variant in variant_gene_map[line[0]]:
line[0] = patient_id_map[line[0]]
line[4] = re.sub(r'\.0$', '', line[4])
variant_file.write('{0}\n'.format('\t'.join(line)))
phenotype_info = self._fetch_data_from_udp(
udp_internal_ids, phenotype_params, phenotype_fields, credentials)
for line in phenotype_info:
line[0] = patient_id_map[line[0]]
pheno_file.write('{0}\n'.format('\t'.join(line)))
variant_file.close()
pheno_file.close()
return | 0.002561 |
def to_json_dict(self, filter_fcn=None):
"""Create a dict with Entity properties for json encoding.
It can be overridden by subclasses for each standard serialization
doesn't work. By default it call _to_json_dict on OneToOne fields
and build a list calling the same method on each OneToMany object's
fields.
Fields can be filtered accordingly to 'filter_fcn'. This callable
receives field's name as first parameter and fields itself as second
parameter. It must return True if field's value should be included on
dict and False otherwise. If not provided field will not be filtered.
:type filter_fcn: callable
:return: dct
"""
fields, values = self.get_fields(), self.get_values()
filtered_fields = fields.items()
if filter_fcn is not None:
filtered_fields = (
tpl for tpl in filtered_fields if filter_fcn(tpl[0], tpl[1])
)
json_dct = {}
for field_name, field in filtered_fields:
if field_name in values:
value = values[field_name]
if value is None:
json_dct[field_name] = None
# This conditions is needed because some times you get
# None on an OneToOneField what lead to an error
# on bellow condition, e.g., calling value.to_json_dict()
# when value is None
elif isinstance(field, OneToOneField):
json_dct[field_name] = value.to_json_dict()
elif isinstance(field, OneToManyField):
json_dct[field_name] = [
entity.to_json_dict() for entity in value
]
else:
json_dct[field_name] = to_json_serializable(value)
return json_dct | 0.001048 |
def cc_from_arg_kinds(self, fp_args, ret_fp=None, sizes=None, sp_delta=None, func_ty=None):
"""
Get a SimCC (calling convention) that will extract floating-point/integral args correctly.
:param arch: The Archinfo arch for this CC
:param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp,
false if it is integral.
:param ret_fp: True if the return value for the function is fp.
:param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the
size of the corresponding argument in bytes.
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:param func_ty: A SimType for the function itself or a C-style function declaration that can be parsed into
a SimTypeFunction instance.
Example func_ty strings:
>>> "int func(char*, int)"
>>> "int f(int, int, int*);"
Function names are ignored.
"""
return self._default_cc.from_arg_kinds(arch=self.project.arch,
fp_args=fp_args,
ret_fp=ret_fp,
sizes=sizes,
sp_delta=sp_delta,
func_ty=func_ty) | 0.009393 |
def users(self):
"""
Gets the Users API client.
Returns:
Users:
"""
if not self.__users:
self.__users = Users(self.__connection)
return self.__users | 0.00905 |
def user_choice(prompt, choices=("yes", "no"), default=None):
"""
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
"""
assert default is None or default in choices
choice_list = ', '.join((choice.title() if choice == default else choice for choice in choices))
response = None
while response not in choices:
response = input(prompt + ' [' + choice_list + ']: ')
response = response.lower() if response else default
return response | 0.004392 |
def ImportConfig(filename, config):
"""Reads an old config file and imports keys and user accounts."""
sections_to_import = ["PrivateKeys"]
entries_to_import = [
"Client.executable_signing_public_key", "CA.certificate",
"Frontend.certificate"
]
options_imported = 0
old_config = grr_config.CONFIG.MakeNewConfig()
old_config.Initialize(filename)
for entry in old_config.raw_data:
try:
section = entry.split(".")[0]
if section in sections_to_import or entry in entries_to_import:
config.Set(entry, old_config.Get(entry))
print("Imported %s." % entry)
options_imported += 1
except Exception as e: # pylint: disable=broad-except
print("Exception during import of %s: %s" % (entry, e))
return options_imported | 0.015248 |
def export_to_wif(self, compressed=None):
"""Export a key to WIF.
:param compressed: False if you want a standard WIF export (the most
standard option). True if you want the compressed form (Note that
not all clients will accept this form). Defaults to None, which
in turn uses the self.compressed attribute.
:type compressed: bool
See https://en.bitcoin.it/wiki/Wallet_import_format for a full
description.
"""
# Add the network byte, creating the "extended key"
extended_key_hex = self.get_extended_key()
extended_key_bytes = unhexlify(extended_key_hex)
if compressed is None:
compressed = self.compressed
if compressed:
extended_key_bytes += '\01'
# And return the base58-encoded result with a checksum
return ensure_str(base58.b58encode_check(extended_key_bytes)) | 0.002146 |
def config_path(self, value):
"""Set config_path"""
self._config_path = value or ''
if not isinstance(self._config_path, str):
raise BadArgumentError("config_path must be string: {}".format(
self._config_path)) | 0.007634 |
def list(self, **params):
"""
Retrieve all lead unqualified reasons
Returns all lead unqualified reasons available to the user according to the parameters provided
:calls: ``get /lead_unqualified_reasons``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of LeadUnqualifiedReasons.
:rtype: list
"""
_, _, lead_unqualified_reasons = self.http_client.get("/lead_unqualified_reasons", params=params)
return lead_unqualified_reasons | 0.008292 |
def _bgzip_from_fastq(data):
"""Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already).
"""
in_file = data["in_file"]
if isinstance(in_file, (list, tuple)):
in_file = in_file[0]
needs_convert = dd.get_quality_format(data).lower() == "illumina"
# special case, empty files that have been cleaned
if not objectstore.is_remote(in_file) and os.path.getsize(in_file) == 0:
needs_bgzip, needs_gunzip = False, False
elif in_file.endswith(".gz") and not objectstore.is_remote(in_file):
if needs_convert or dd.get_trim_ends(data):
needs_bgzip, needs_gunzip = True, True
else:
needs_bgzip, needs_gunzip = _check_gzipped_input(in_file, data)
elif in_file.endswith(".bz2"):
needs_bgzip, needs_gunzip = True, True
elif objectstore.is_remote(in_file) and not tz.get_in(["config", "algorithm", "align_split_size"], data):
needs_bgzip, needs_gunzip = False, False
else:
needs_bgzip, needs_gunzip = True, False
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "align_prep"))
if (needs_bgzip or needs_gunzip or needs_convert or dd.get_trim_ends(data) or
objectstore.is_remote(in_file) or
(isinstance(data["in_file"], (tuple, list)) and len(data["in_file"]) > 1)):
out_file = _bgzip_file(data["in_file"], data["config"], work_dir,
needs_bgzip, needs_gunzip, needs_convert, data)
else:
out_file = os.path.join(work_dir, "%s_%s" % (dd.get_sample_name(data), os.path.basename(in_file)))
out_file = _symlink_or_copy_grabix(in_file, out_file, data)
return out_file | 0.005288 |
def post_bug(self, bug):
'''http://bugzilla.readthedocs.org/en/latest/api/core/v1/bug.html#create-bug'''
assert type(bug) is DotDict
assert 'product' in bug
assert 'component' in bug
assert 'summary' in bug
if (not 'version' in bug): bug.version = 'other'
if (not 'op_sys' in bug): bug.op_sys = 'All'
if (not 'platform' in bug): bug.platform = 'All'
return self._post('bug', json.dumps(bug)) | 0.019397 |
def stringify(req, resp):
"""
dumps all valid jsons
This is the latest after hook
"""
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500 | 0.003774 |
def get_commands(self):
"""Gets command that have been run and have not been redacted.
"""
shutit_global.shutit_global_object.yield_to_draw()
s = ''
for c in self.build['shutit_command_history']:
if isinstance(c, str):
#Ignore commands with leading spaces
if c and c[0] != ' ':
s += c + '\n'
return s | 0.039634 |
def args_ok(self, options, args):
"""Check for conflicts and problems in the options.
Returns True if everything is ok, or False if not.
"""
for i in ['erase', 'execute']:
for j in ['annotate', 'html', 'report', 'combine']:
if (i in options.actions) and (j in options.actions):
self.help_fn("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
return False
if not options.actions:
self.help_fn(
"You must specify at least one of -e, -x, -c, -r, -a, or -b."
)
return False
args_allowed = (
'execute' in options.actions or
'annotate' in options.actions or
'html' in options.actions or
'debug' in options.actions or
'report' in options.actions or
'xml' in options.actions
)
if not args_allowed and args:
self.help_fn("Unexpected arguments: %s" % " ".join(args))
return False
if 'execute' in options.actions and not args:
self.help_fn("Nothing to do.")
return False
return True | 0.002381 |
def quaternion_to_rotation_matrix(quaternion):
"""Compute the rotation matrix representated by the quaternion"""
c, x, y, z = quaternion
return np.array([
[c*c + x*x - y*y - z*z, 2*x*y - 2*c*z, 2*x*z + 2*c*y ],
[2*x*y + 2*c*z, c*c - x*x + y*y - z*z, 2*y*z - 2*c*x ],
[2*x*z - 2*c*y, 2*y*z + 2*c*x, c*c - x*x - y*y + z*z]
], float) | 0.007212 |
def save(self, file_path, note_df):
'''
Save MIDI file.
Args:
file_path: File path of MIDI.
note_df: `pd.DataFrame` of note data.
'''
chord = pretty_midi.PrettyMIDI()
for program in note_df.program.drop_duplicates().values.tolist():
df = note_df[note_df.program == program]
midi_obj = pretty_midi.Instrument(program=program)
for i in range(df.shape[0]):
note = pretty_midi.Note(
velocity=int(df.iloc[i, :]["velocity"]),
pitch=int(df.iloc[i, :]["pitch"]),
start=float(df.iloc[i, :]["start"]),
end=float(df.iloc[i, :]["end"])
)
# Add it to our cello instrument
midi_obj.notes.append(note)
# Add the cello instrument to the PrettyMIDI object
chord.instruments.append(midi_obj)
# Write out the MIDI data
chord.write(file_path) | 0.00478 |
def write_metadata(self):
"""Write all ID3v2.4 tags to file from self.metadata"""
import mutagen
from mutagen import id3
id3 = id3.ID3(self.filename)
for tag in self.metadata.keys():
value = self.metadata[tag]
frame = mutagen.id3.Frames[tag](3, value)
try:
id3.add(frame)
except:
raise IOError('EncoderError: cannot tag "' + tag + '"')
try:
id3.save()
except:
raise IOError('EncoderError: cannot write tags') | 0.00703 |
def merge_directories(self, directory_digests):
"""Merges any number of directories.
:param directory_digests: Tuple of DirectoryDigests.
:return: A Digest.
"""
result = self._native.lib.merge_directories(
self._scheduler,
self._to_value(_DirectoryDigests(directory_digests)),
)
return self._raise_or_return(result) | 0.002809 |
def venqueue(trg_queue, item_f, args, user=None, group=None, mode=None):
'''Enqueue the contents of a file, or file-like object, file-descriptor or
the contents of a file at an address (e.g. '/my/file') queue with
an argument list, venqueue is to enqueue what vprintf is to printf
If entropy is passed in, failure on duplicates is raised to the caller,
if entropy is not passed in, venqueue will increment entropy until it
can create the queue item.
'''
# setup defaults
trg_fd = name = None
user = _c.FSQ_ITEM_USER if user is None else user
group = _c.FSQ_ITEM_GROUP if group is None else group
mode = _c.FSQ_ITEM_MODE if mode is None else mode
now = fmt_time(datetime.datetime.now(), _c.FSQ_TIMEFMT, _c.FSQ_CHARSET)
pid = coerce_unicode(os.getpid(), _c.FSQ_CHARSET)
host = coerce_unicode(_HOSTNAME, _c.FSQ_CHARSET)
tries = u'0'
entropy = _mkentropy(pid, now, host)
# open source file
try:
src_file = rationalize_file(item_f, _c.FSQ_CHARSET)
except (OSError, IOError, ), e:
raise FSQEnqueueError(e.errno, wrap_io_os_err(e))
try:
real_file = True if hasattr(src_file, 'fileno') else False
# get low, so we can use some handy options; man 2 open
try:
item_name = construct(( now, entropy, pid, host,
tries, ) + tuple(args))
tmp_name = os.path.join(fsq_path.tmp(trg_queue), item_name)
trg_fd = os.open(tmp_name, os.O_WRONLY|os.O_CREAT|os.O_EXCL, mode)
except (OSError, IOError, ), e:
if isinstance(e, FSQError):
raise e
raise FSQEnqueueError(e.errno, wrap_io_os_err(e))
try:
if user is not None or group is not None:
# set user/group ownership for file; man 2 fchown
os.fchown(trg_fd, *uid_gid(user, group, fd=trg_fd))
with closing(os.fdopen(trg_fd, 'wb', 1)) as trg_file:
# i/o time ... assume line-buffered
while True:
if real_file:
reads, dis, card = select.select([src_file], [], [])
try:
msg = os.read(reads[0].fileno(), 2048)
if 0 == len(msg):
break
except (OSError, IOError, ), e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN,):
continue
raise e
trg_file.write(msg)
else:
line = src_file.readline()
if not line:
break
trg_file.write(line)
# flush buffers, and force write to disk pre mv.
trg_file.flush()
os.fsync(trg_file.fileno())
# hard-link into queue, unlink tmp, failure case here leaves
# cruft in tmp, but no race condition into queue
os.link(tmp_name, os.path.join(fsq_path.item(trg_queue,
item_name)))
os.unlink(tmp_name)
# return the queue item id (filename)
return item_name
except Exception, e:
try:
os.close(trg_fd)
except (OSError, IOError, ), err:
if err.errno != errno.EBADF:
raise FSQEnqueueError(err.errno, wrap_io_os_err(err))
try:
if tmp_name is not None:
os.unlink(tmp_name)
except (OSError, IOError, ), err:
if err.errno != errno.ENOENT:
raise FSQEnqueueError(err.errno, wrap_io_os_err(err))
try:
if name is not None:
os.unlink(name)
except OSError, err:
if err.errno != errno.ENOENT:
raise FSQEnqueueError(err.errno, wrap_io_os_err(err))
if (isinstance(e, OSError) or isinstance(e, IOError)) and\
not isinstance(e, FSQError):
raise FSQEnqueueError(e.errno, wrap_io_os_err(e))
raise e
finally:
src_file.close() | 0.001605 |
def buildprior(self, prior, mopt=None, extend=False):
" Extract the model's parameters from prior. "
newprior = {}
# allow for log-normal, etc priors
intercept, slope = gv.get_dictkeys(
prior, [self.intercept, self.slope]
)
newprior[intercept] = prior[intercept]
if mopt is None:
# slope parameter marginalized if mopt is not None
newprior[slope] = prior[slope]
return newprior | 0.004158 |
def qos_queue_scheduler_strict_priority_dwrr_traffic_class0(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
queue = ET.SubElement(qos, "queue")
scheduler = ET.SubElement(queue, "scheduler")
strict_priority = ET.SubElement(scheduler, "strict-priority")
dwrr_traffic_class0 = ET.SubElement(strict_priority, "dwrr-traffic-class0")
dwrr_traffic_class0.text = kwargs.pop('dwrr_traffic_class0')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006116 |
def _run_info_from_yaml(dirs, run_info_yaml, config, sample_names=None,
is_cwl=False, integrations=None):
"""Read run information from a passed YAML file.
"""
validate_yaml(run_info_yaml, run_info_yaml)
with open(run_info_yaml) as in_handle:
loaded = yaml.safe_load(in_handle)
fc_name, fc_date = None, None
if dirs.get("flowcell"):
try:
fc_name, fc_date = flowcell.parse_dirname(dirs.get("flowcell"))
except ValueError:
pass
global_config = {}
global_vars = {}
resources = {}
integration_config = {}
if isinstance(loaded, dict):
global_config = copy.deepcopy(loaded)
del global_config["details"]
if "fc_name" in loaded:
fc_name = loaded["fc_name"].replace(" ", "_")
if "fc_date" in loaded:
fc_date = str(loaded["fc_date"]).replace(" ", "_")
global_vars = global_config.pop("globals", {})
resources = global_config.pop("resources", {})
for iname in ["arvados"]:
integration_config[iname] = global_config.pop(iname, {})
loaded = loaded["details"]
if sample_names:
loaded = [x for x in loaded if x["description"] in sample_names]
if integrations:
for iname, retriever in integrations.items():
if iname in config:
config[iname] = retriever.set_cache(config[iname])
loaded = retriever.add_remotes(loaded, config[iname])
run_details = []
for i, item in enumerate(loaded):
item = _normalize_files(item, dirs.get("flowcell"))
if "lane" not in item:
item["lane"] = str(i + 1)
item["lane"] = _clean_characters(item["lane"])
if "description" not in item:
if _item_is_bam(item):
item["description"] = get_sample_name(item["files"][0])
else:
raise ValueError("No `description` sample name provided for input #%s" % (i + 1))
description = _clean_characters(item["description"])
item["description"] = description
# make names R safe if we are likely to use R downstream
if item["analysis"].lower() in R_DOWNSTREAM_ANALYSIS:
if description[0].isdigit():
valid = "X" + description
logger.info("%s is not a valid R name, converting to %s." % (description, valid))
item["description"] = valid
if "upload" not in item and not is_cwl:
upload = global_config.get("upload", {})
# Handle specifying a local directory directly in upload
if isinstance(upload, six.string_types):
upload = {"dir": upload}
if not upload:
upload["dir"] = "../final"
if fc_name:
upload["fc_name"] = fc_name
if fc_date:
upload["fc_date"] = fc_date
upload["run_id"] = ""
if upload.get("dir"):
upload["dir"] = _file_to_abs(upload["dir"], [dirs.get("work")], makedir=True)
item["upload"] = upload
item["algorithm"] = _replace_global_vars(item["algorithm"], global_vars)
item["algorithm"] = genome.abs_file_paths(item["algorithm"],
ignore_keys=ALGORITHM_NOPATH_KEYS,
fileonly_keys=ALGORITHM_FILEONLY_KEYS,
do_download=all(not x for x in integrations.values()))
item["genome_build"] = str(item.get("genome_build", ""))
item["algorithm"] = _add_algorithm_defaults(item["algorithm"], item.get("analysis", ""), is_cwl)
item["metadata"] = add_metadata_defaults(item.get("metadata", {}))
item["rgnames"] = prep_rg_names(item, config, fc_name, fc_date)
if item.get("files"):
item["files"] = [genome.abs_file_paths(f, do_download=all(not x for x in integrations.values()))
for f in item["files"]]
elif "files" in item:
del item["files"]
if item.get("vrn_file") and isinstance(item["vrn_file"], six.string_types):
item["vrn_file"] = genome.abs_file_paths(item["vrn_file"],
do_download=all(not x for x in integrations.values()))
if os.path.isfile(item["vrn_file"]):
# Try to prepare in place (or use ready to go inputs)
try:
item["vrn_file"] = vcfutils.bgzip_and_index(item["vrn_file"], config,
remove_orig=False)
# In case of permission errors, fix in inputs directory
except IOError:
inputs_dir = utils.safe_makedir(os.path.join(dirs.get("work", os.getcwd()), "inputs",
item["description"]))
item["vrn_file"] = vcfutils.bgzip_and_index(item["vrn_file"], config,
remove_orig=False, out_dir=inputs_dir)
if not tz.get_in(("metadata", "batch"), item) and tz.get_in(["algorithm", "validate"], item):
raise ValueError("%s: Please specify a metadata batch for variant file (vrn_file) input.\n" %
(item["description"]) +
"Batching with a standard sample provides callable regions for validation.")
item = _clean_metadata(item)
item = _clean_algorithm(item)
item = _organize_tools_on(item, is_cwl)
item = _clean_background(item)
# Add any global resource specifications
if "resources" not in item:
item["resources"] = {}
for prog, pkvs in resources.items():
if prog not in item["resources"]:
item["resources"][prog] = {}
if pkvs is not None:
for key, val in pkvs.items():
item["resources"][prog][key] = val
for iname, ivals in integration_config.items():
if ivals:
if iname not in item:
item[iname] = {}
for k, v in ivals.items():
item[iname][k] = v
run_details.append(item)
_check_sample_config(run_details, run_info_yaml, config)
return run_details | 0.003241 |
def get_outputs(self):
"""
Get a list of outputs. The equivalent of :command:`i3-msg -t get_outputs`.
:rtype: List of :class:`OutputReply`.
Example output:
.. code:: python
>>> i3ipc.Connection().get_outputs()
[{'name': 'eDP1',
'primary': True,
'active': True,
'rect': {'width': 1920, 'height': 1080, 'y': 0, 'x': 0},
'current_workspace': '2'},
{'name': 'xroot-0',
'primary': False,
'active': False,
'rect': {'width': 1920, 'height': 1080, 'y': 0, 'x': 0},
'current_workspace': None}]
"""
data = self.message(MessageType.GET_OUTPUTS, '')
return json.loads(data, object_hook=OutputReply) | 0.003731 |
def warmness(level=100, group=0):
""" Assumes level is out of 100 """
if level not in range(0,101):
raise Exception("Warmness must be value between 0 and 100")
b = int(floor(level / 10.0)) #lights have 10 levels of warmness
commands = list(coolest(group))
for i in range(0, b):
commands.append(COMMANDS['WARMER'])
return tuple(commands) | 0.010638 |
def mutualFundSymbolsDF(token='', version=''):
'''This call returns an array of mutual fund symbols that IEX Cloud supports for API calls.
https://iexcloud.io/docs/api/#mutual-fund-symbols
8am, 9am, 12pm, 1pm UTC daily
Args:
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.DataFrame(mutualFundSymbols(token, version))
_toDatetime(df)
_reindex(df, 'symbol')
return df | 0.004141 |
def set_(device, minor, flag, state):
'''
Changes a flag on the partition with number <minor>.
A flag can be either "on" or "off" (make sure to use proper quoting, see
:ref:`YAML Idiosyncrasies <yaml-idiosyncrasies>`). Some or all of these
flags will be available, depending on what disk label you are using.
Valid flags are:
* boot
* root
* swap
* hidden
* raid
* lvm
* lba
* hp-service
* palo
* prep
* msftres
* bios_grub
* atvrecv
* diag
* legacy_boot
* msftdata
* irst
* esp
* type
CLI Example:
.. code-block:: bash
salt '*' partition.set /dev/sda 1 boot '"on"'
'''
_validate_device(device)
try:
int(minor)
except Exception:
raise CommandExecutionError(
'Invalid minor number passed to partition.set'
)
if flag not in VALID_PARTITION_FLAGS:
raise CommandExecutionError('Invalid flag passed to partition.set')
if state not in set(['on', 'off']):
raise CommandExecutionError('Invalid state passed to partition.set')
cmd = 'parted -m -s {0} set {1} {2} {3}'.format(device, minor, flag, state)
out = __salt__['cmd.run'](cmd).splitlines()
return out | 0.00077 |
def rtgen_family(self, value):
"""Family setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_ubyte(value or 0)) | 0.014599 |
def get_subs_dict(self, qnodes=None):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
#d = self.qparams.copy()
d = self.qparams
d.update(self.optimize_params(qnodes=qnodes))
# clean null values
subs_dict = {k: v for k, v in d.items() if v is not None}
#print("subs_dict:", subs_dict)
return subs_dict | 0.008811 |
def add_group(self, name: str, **kwargs) -> None:
"""
Add a group to the inventory after initialization
"""
group = {
name: deserializer.inventory.InventoryElement.deserialize_group(
name=name, defaults=self.defaults, **kwargs
)
}
self.groups.update(group) | 0.005814 |
def dump_all(data_list, stream=None, **kwargs):
"""
Serialize YAMLDict into a YAML stream.
If stream is None, return the produced string instead.
"""
return yaml.dump_all(
data_list,
stream=stream,
Dumper=YAMLDictDumper,
**kwargs
) | 0.003484 |
def read_index(fn):
"""Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file.
"""
index = None
with open(fn, "rb") as i_file:
if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:
raise ValueError("{}: not a valid index file".format(fn))
index = pd.read_csv(io.StringIO(
zlib.decompress(i_file.read()).decode(encoding="utf-8"),
))
return index | 0.001608 |
def _safe_minmax(values):
"""Calculate min and max of array with guards for nan and inf."""
# Nan and inf guarded min and max
isfinite = np.isfinite(values)
if np.any(isfinite):
# Only use finite values
values = values[isfinite]
minval = np.min(values)
maxval = np.max(values)
return minval, maxval | 0.002899 |
def validate(self, request, data):
"""
Validate response from OpenID server.
Set identity in case of successfull validation.
"""
client = consumer.Consumer(request.session, None)
try:
resp = client.complete(data, request.session['openid_return_to'])
except KeyError:
messages.error(request, lang.INVALID_RESPONSE_FROM_OPENID)
return redirect('netauth-login')
if resp.status == consumer.CANCEL:
messages.warning(request, lang.OPENID_CANCELED)
return redirect('netauth-login')
elif resp.status == consumer.FAILURE:
messages.error(request, lang.OPENID_FAILED % resp.message)
return redirect('netauth-login')
elif resp.status == consumer.SUCCESS:
self.identity = resp.identity_url
del request.session['openid_return_to']
return resp | 0.002153 |
def create_api_dict(bases, url, **kwargs):
"""Create an API dict
:param bases: configuration bases
:type bases: :class:`~pyextdirect.configuration.Base` or list of :class:`~pyextdirect.configuration.Base`
:param string url: URL where the router can be reached
:param \*\*kwargs: extra keyword arguments to populate the API dict. Most common keyword arguments are *id*, *maxRetries*, *namespace*, *priority* and *timeout*
.. note::
Keyword arguments *type*, *url*, *actions* and *enableUrlEncode* will be overridden
"""
api = kwargs or {}
api.update({'type': 'remoting', 'url': url, 'actions': defaultdict(list), 'enableUrlEncode': 'data'})
if not isinstance(bases, list):
bases = [bases]
configuration = merge_configurations([b.configuration for b in bases])
for action, methods in configuration.iteritems():
for method, element in methods.iteritems():
if isinstance(element, tuple):
func = getattr(element[0], element[1])
attrs = len(inspect.getargspec(func)[0]) - 1
else:
func = element
attrs = len(inspect.getargspec(func)[0])
spec = {'name': method, 'len': attrs}
if func.exposed_kind == SUBMIT:
spec['formHandler'] = True
api['actions'][action].append(spec)
return api | 0.005029 |
def _Supercooled(T, P):
"""Guideline on thermodynamic properties of supercooled water
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Returns
-------
prop : dict
Dict with calculated properties of water. The available properties are:
* L: Ordering field, [-]
* x: Mole fraction of low-density structure, [-]
* rho: Density, [kg/m³]
* s: Specific entropy, [kJ/kgK]
* h: Specific enthalpy, [kJ/kg]
* u: Specific internal energy, [kJ/kg]
* a: Specific Helmholtz energy, [kJ/kg]
* g: Specific Gibbs energy, [kJ/kg]
* alfap: Thermal expansion coefficient, [1/K]
* xkappa : Isothermal compressibility, [1/MPa]
* cp: Specific isobaric heat capacity, [kJ/kgK]
* cv: Specific isochoric heat capacity, [kJ/kgK]
* w: Speed of sound, [m/s²]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* Tm ≤ T ≤ 300
* 0 < P ≤ 1000
The minimum temperature in range of validity is the melting temperature, it
depend of pressure
Examples
--------
>>> liq = _Supercooled(235.15, 0.101325)
>>> liq["rho"], liq["cp"], liq["w"]
968.09999 5.997563 1134.5855
References
----------
IAPWS, Guideline on Thermodynamic Properties of Supercooled Water,
http://iapws.org/relguide/Supercooled.html
"""
# Check input in range of validity
if P < 198.9:
Tita = T/235.15
Ph = 0.1+228.27*(1-Tita**6.243)+15.724*(1-Tita**79.81)
if P < Ph or T > 300:
raise NotImplementedError("Incoming out of bound")
else:
Th = 172.82+0.03718*P+3.403e-5*P**2-1.573e-8*P**3
if T < Th or T > 300 or P > 1000:
raise NotImplementedError("Incoming out of bound")
# Parameters, Table 1
Tll = 228.2
rho0 = 1081.6482
R = 0.461523087
pi0 = 300e3/rho0/R/Tll
omega0 = 0.5212269
L0 = 0.76317954
k0 = 0.072158686
k1 = -0.31569232
k2 = 5.2992608
# Reducing parameters, Eq 2
tau = T/Tll-1
p = P*1000/rho0/R/Tll
tau_ = tau+1
p_ = p+pi0
# Eq 3
ci = [-8.1570681381655, 1.2875032, 7.0901673598012, -3.2779161e-2,
7.3703949e-1, -2.1628622e-1, -5.1782479, 4.2293517e-4, 2.3592109e-2,
4.3773754, -2.9967770e-3, -9.6558018e-1, 3.7595286, 1.2632441,
2.8542697e-1, -8.5994947e-1, -3.2916153e-1, 9.0019616e-2,
8.1149726e-2, -3.2788213]
ai = [0, 0, 1, -0.2555, 1.5762, 1.6400, 3.6385, -0.3828, 1.6219, 4.3287,
3.4763, 5.1556, -0.3593, 5.0361, 2.9786, 6.2373, 4.0460, 5.3558,
9.0157, 1.2194]
bi = [0, 1, 0, 2.1051, 1.1422, 0.9510, 0, 3.6402, 2.0760, -0.0016, 2.2769,
0.0008, 0.3706, -0.3975, 2.9730, -0.3180, 2.9805, 2.9265, 0.4456,
0.1298]
di = [0, 0, 0, -0.0016, 0.6894, 0.0130, 0.0002, 0.0435, 0.0500, 0.0004,
0.0528, 0.0147, 0.8584, 0.9924, 1.0041, 1.0961, 1.0228, 1.0303,
1.6180, 0.5213]
phir = phirt = phirp = phirtt = phirtp = phirpp = 0
for c, a, b, d in zip(ci, ai, bi, di):
phir += c*tau_**a*p_**b*exp(-d*p_)
phirt += c*a*tau_**(a-1)*p_**b*exp(-d*p_)
phirp += c*tau_**a*p_**(b-1)*(b-d*p_)*exp(-d*p_)
phirtt += c*a*(a-1)*tau_**(a-2)*p_**b*exp(-d*p_)
phirtp += c*a*tau_**(a-1)*p_**(b-1)*(b-d*p_)*exp(-d*p_)
phirpp += c*tau_**a*p_**(b-2)*((d*p_-b)**2-b)*exp(-d*p_)
# Eq 5
K1 = ((1+k0*k2+k1*(p-k2*tau))**2-4*k0*k1*k2*(p-k2*tau))**0.5
K2 = (1+k2**2)**0.5
# Eq 6
omega = 2+omega0*p
# Eq 4
L = L0*K2/2/k1/k2*(1+k0*k2+k1*(p+k2*tau)-K1)
# Define interval of solution, Table 4
if omega < 10/9*(log(19)-L):
xmin = 0.049
xmax = 0.5
elif 10/9*(log(19)-L) <= omega < 50/49*(log(99)-L):
xmin = 0.0099
xmax = 0.051
else:
xmin = 0.99*exp(-50/49*L-omega)
xmax = min(1.1*exp(-L-omega), 0.0101)
def f(x):
return abs(L+log(x/(1-x))+omega*(1-2*x))
x = minimize(f, ((xmin+xmax)/2,), bounds=((xmin, xmax),))["x"][0]
# Eq 12
fi = 2*x-1
Xi = 1/(2/(1-fi**2)-omega)
# Derivatives, Table 3
Lt = L0*K2/2*(1+(1-k0*k2+k1*(p-k2*tau))/K1)
Lp = L0*K2*(K1+k0*k2-k1*p+k1*k2*tau-1)/2/k2/K1
Ltt = -2*L0*K2*k0*k1*k2**2/K1**3
Ltp = 2*L0*K2*k0*k1*k2/K1**3
Lpp = -2*L0*K2*k0*k1/K1**3
prop = {}
prop["L"] = L
prop["x"] = x
# Eq 13
prop["rho"] = rho0/((tau+1)/2*(omega0/2*(1-fi**2)+Lp*(fi+1))+phirp)
# Eq 1
prop["g"] = phir+(tau+1)*(x*L+x*log(x)+(1-x)*log(1-x)+omega*x*(1-x))
# Eq 14
prop["s"] = -R*((tau+1)/2*Lt*(fi+1) +
(x*L+x*log(x)+(1-x)*log(1-x)+omega*x*(1-x))+phirt)
# Basic derived state properties
prop["h"] = prop["g"]+T*prop["s"]
prop["u"] = prop["h"]+P/prop["rho"]
prop["a"] = prop["u"]-T*prop["s"]
# Eq 15
prop["xkappa"] = prop["rho"]/rho0**2/R*1000/Tll*(
(tau+1)/2*(Xi*(Lp-omega0*fi)**2-(fi+1)*Lpp)-phirpp)
prop["alfap"] = prop["rho"]/rho0/Tll*(
Ltp/2*(tau+1)*(fi+1) + (omega0*(1-fi**2)/2+Lp*(fi+1))/2 -
(tau+1)*Lt/2*Xi*(Lp-omega0*fi) + phirtp)
prop["cp"] = -R*(tau+1)*(Lt*(fi+1)+(tau+1)/2*(Ltt*(fi+1)-Lt**2*Xi)+phirtt)
# Eq 16
prop["cv"] = prop["cp"]-T*prop["alfap"]**2/prop["rho"]/prop["xkappa"]*1e3
# Eq 17
prop["w"] = (prop["rho"]*prop["xkappa"]*1e-6*prop["cv"]/prop["cp"])**-0.5
return prop | 0.000181 |
def create_request_url(self, profile_type, steamID):
"""Create the url to submit to the Steam Community XML feed."""
regex = re.compile('^\d{17,}$')
if regex.match(steamID):
if profile_type == self.USER:
url = "http://steamcommunity.com/profiles/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/gid/%s/memberslistxml/?xml=1" % (steamID)
else:
if profile_type == self.USER:
url = "http://steamcommunity.com/id/%s/?xml=1" % (steamID)
if profile_type == self.GROUP:
url = "http://steamcommunity.com/groups/%s/memberslistxml/?xml=1" % (steamID)
return url | 0.008097 |
def dump_system_json(self, filepath=None, modular=False, **kwargs):
"""
Dump a :class:`MolecularSystem` to a JSON dictionary.
The dumped JSON dictionary, with :class:`MolecularSystem`, can then be
loaded through a JSON loader and then through :func:`load_system()`
to retrieve a :class:`MolecularSystem`.
Kwargs are passed to :func:`pywindow.io_tools.Output.dump2json()`.
Parameters
----------
filepath : :class:`str`
The filepath for the dumped file. If :class:`None`, the file is
dumped localy with :attr:`system_id` as filename.
(defualt=None)
modular : :class:`bool`
If False, dump the :class:`MolecularSystem` as in
:attr:`MolecularSystem.system`, if True, dump the
:class:`MolecularSystem` as catenated :class:Molecule objects
from :attr:`MolecularSystem.molecules`
Returns
-------
None : :class:`NoneType`
"""
# We pass a copy of the properties dictionary.
dict_obj = deepcopy(self.system)
# In case we want a modular system.
if modular is True:
try:
if self.molecules:
pass
except AttributeError:
raise _NotAModularSystem(
"This system is not modular. Please, run first the "
"make_modular() function of this class.")
dict_obj = {}
for molecule in self.molecules:
mol_ = self.molecules[molecule]
dict_obj[molecule] = mol_.mol
# If no filepath is provided we create one.
if filepath is None:
filepath = '/'.join((os.getcwd(), str(self.system_id)))
# Dump the dictionary to json file.
self._Output.dump2json(dict_obj, filepath, default=to_list, **kwargs) | 0.001049 |
def stack_eggs(eggs, meta='concatenate'):
'''
Takes a list of eggs, stacks them and reindexes the subject number
Parameters
----------
eggs : list of Egg data objects
A list of Eggs that you want to combine
meta : string
Determines how the meta data of each Egg combines. Default is 'concatenate'
'concatenate' concatenates keys in meta data dictionary shared between eggs, and copies non-overlapping keys
'separate' keeps the Eggs' meta data dictionaries separate, with each as a list index in the stacked meta data
Returns
----------
new_egg : Egg data object
A mega egg comprised of the input eggs stacked together
'''
from .egg import Egg
pres = [egg.pres.loc[sub,:].values.tolist() for egg in eggs for sub in egg.pres.index.levels[0].values.tolist()]
rec = [egg.rec.loc[sub,:].values.tolist() for egg in eggs for sub in egg.rec.index.levels[0].values.tolist()]
if meta is 'concatenate':
new_meta = {}
for egg in eggs:
for key in egg.meta:
if key in new_meta:
new_meta[key] = list(new_meta[key])
new_meta[key].extend(egg.meta.get(key))
else:
new_meta[key] = egg.meta.get(key)
elif meta is 'separate':
new_meta = list(egg.meta for egg in eggs)
return Egg(pres=pres, rec=rec, meta=new_meta) | 0.005587 |
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup | 0.003711 |
def get_shutit_pexpect_session_from_id(self, shutit_pexpect_id):
"""Get the pexpect session from the given identifier.
"""
shutit_global.shutit_global_object.yield_to_draw()
for key in self.shutit_pexpect_sessions:
if self.shutit_pexpect_sessions[key].pexpect_session_id == shutit_pexpect_id:
return self.shutit_pexpect_sessions[key]
return self.fail('Should not get here in get_shutit_pexpect_session_from_id',throw_exception=True) | 0.026726 |
def _to_array(value):
"""As a convenience, turn Python lists and tuples into NumPy arrays."""
if isinstance(value, (tuple, list)):
return array(value)
elif isinstance(value, (float, int)):
return np.float64(value)
else:
return value | 0.003676 |
def exists(self, file_ref):
"""
Parameters
----------
file_ref: str
reference of file.
Available references: 'idf', 'epw', 'eio', 'eso', 'mtr', 'mtd', 'mdd', 'err', 'summary_table'
See EnergyPlus documentation for more information.
Returns
-------
Boolean
"""
if file_ref not in FILE_REFS:
raise ValueError("Unknown file_ref: '%s'. Available: '%s'." % (file_ref, list(sorted(FILE_REFS._fields))))
return os.path.isfile(self._path(file_ref)) | 0.007018 |
def get_top_stories(self):
"""
Get the item numbers for the current top stories.
Will raise an requests.HTTPError if we got a non-200 response back.
:return: A list with the top story item numbers.
"""
suburl = "v0/topstories.json"
try:
top_stories = self._make_request(suburl)
except requests.HTTPError as e:
hn_logger.exception('Faulted on getting top stories, with status {}'.format(e.errno))
raise e
return top_stories | 0.00566 |
def iter_variants_by_names(self, names):
"""Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
"""
if not self.is_parallel:
yield from super().iter_variants_by_names(names)
else:
for info, dosage in self._bgen.iter_variants_by_names(names):
yield Genotypes(
Variant(info.name,
CHROM_STR_ENCODE.get(info.chrom, info.chrom),
info.pos, [info.a1, info.a2]),
dosage,
reference=info.a1,
coded=info.a2,
multiallelic=True,
) | 0.002639 |
def parse_file(filename):
"""Parse the provided file, and return Code object."""
assert isinstance(filename, _str_type), "`filename` parameter should be a string, got %r" % type(filename)
with open(filename, "rt", encoding="utf-8") as f:
return Code(_tokenize(f.readline)) | 0.006849 |
def set_sp_template_updated(self, vlan_id, sp_template, device_id):
"""Sets update_on_ucs flag to True."""
entry = self.get_sp_template_vlan_entry(vlan_id,
sp_template,
device_id)
if entry:
entry.updated_on_ucs = True
self.session.merge(entry)
return entry
else:
return False | 0.004435 |
def wallet_change_seed(self, wallet, seed):
"""
Changes seed for **wallet** to **seed**
.. enable_control required
:param wallet: Wallet to change seed for
:type wallet: str
:param seed: Seed to change wallet to
:type seed: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_change_seed(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... seed="74F2B37AAD20F4A260F0A5B3CB3D7FB51673212263E58A380BC10474BB039CEE"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
seed = self._process_value(seed, 'seed')
payload = {"wallet": wallet, "seed": seed}
resp = self.call('wallet_change_seed', payload)
return 'success' in resp | 0.004768 |
def size(self):
"""Total number of grid points."""
# Since np.prod(()) == 1.0 we need to handle that by ourselves
return (0 if self.shape == () else
int(np.prod(self.shape, dtype='int64'))) | 0.008734 |
def get_learning_rate(self, iter):
'''
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate
'''
lr = self.init_lr
for iter_step in self.iter_steps:
if iter >= iter_step:
lr *= self.gamma
return lr | 0.004819 |
def control_loop():
'''Main loop, retrieving the schedule.
'''
set_service_status(Service.SCHEDULE, ServiceStatus.BUSY)
notify.notify('READY=1')
while not terminate():
notify.notify('WATCHDOG=1')
# Try getting an updated schedule
get_schedule()
session = get_session()
next_event = session.query(UpcomingEvent)\
.filter(UpcomingEvent.end > timestamp())\
.order_by(UpcomingEvent.start)\
.first()
if next_event:
logger.info('Next scheduled recording: %s',
datetime.fromtimestamp(next_event.start))
notify.notify('STATUS=Next scheduled recording: %s' %
datetime.fromtimestamp(next_event.start))
else:
logger.info('No scheduled recording')
notify.notify('STATUS=No scheduled recording')
session.close()
next_update = timestamp() + config()['agent']['update_frequency']
while not terminate() and timestamp() < next_update:
time.sleep(0.1)
logger.info('Shutting down schedule service')
set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED) | 0.000805 |
def simulate_reads(self):
"""
Use the PacBio assembly FASTA files to generate simulated reads of appropriate forward and reverse lengths
at different depths of sequencing using randomreads.sh from the bbtools suite
"""
logging.info('Read simulation')
for sample in self.metadata:
# Create the simulated_reads GenObject
sample.simulated_reads = GenObject()
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
# Create the depth GenObject
setattr(sample.simulated_reads, depth, GenObject())
# Set the depth and output directory attributes for the depth GenObject
sample.simulated_reads[depth].depth = depth
sample.simulated_reads[depth].depth_dir = os.path.join(sample.outputdir, 'simulated', depth)
# Create the output directory
make_path(sample.simulated_reads[depth].depth_dir)
# Iterate through all the desired forward and reverse read pair lengths
for read_pair in self.read_lengths:
# Create the read_pair GenObject within the depth GenObject
setattr(sample.simulated_reads[depth], read_pair, GenObject())
# Set and create the output directory
sample.simulated_reads[depth][read_pair].outputdir = \
os.path.join(sample.simulated_reads[depth].depth_dir, read_pair)
make_path(sample.simulated_reads[depth][read_pair].outputdir)
# Create both forward_reads and reverse_reads sub-GenObjects
sample.simulated_reads[depth][read_pair].forward_reads = GenObject()
sample.simulated_reads[depth][read_pair].reverse_reads = GenObject()
# Extract the forward and reverse reads lengths from the read_pair variable
sample.simulated_reads[depth][read_pair].forward_reads.length, \
sample.simulated_reads[depth][read_pair].reverse_reads.length = read_pair.split('_')
# Set the name of the forward reads - include the depth and read length information
sample.simulated_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Create the trimmed output directory attribute
sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir \
= os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'simulated_trimmed')
# Set the name of the forward trimmed reads - include the depth and read length information
# This is set now, as the untrimmed files will be removed, and a check is necessary
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Calculate the number of reads required for the forward and reverse reads to yield the
# desired coverage depth e.g. 5Mbp genome at 20X coverage: 100Mbp in reads. 50bp forward reads
# 150bp reverse reads: forward proportion is 50 / (150 + 50) = 0.25 (and reverse is 0.75).
# Forward total reads is 25Mbp (75Mbp reverse). Number of reads required = 25Mbp / 50 bp
# 500000 reads total (same for reverse, as the reads are longer)
sample.simulated_reads[depth][read_pair].num_reads = \
int(sample.assembly_length *
int(depth) *
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) /
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) +
int(sample.simulated_reads[depth][read_pair].reverse_reads.length)
)
) /
int(sample.simulated_reads[depth][read_pair].forward_reads.length)
)
logging.info(
'Simulating {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.simulated_reads[depth][read_pair].forward_reads.length,
rl=sample.simulated_reads[depth][read_pair].reverse_reads.length))
# If the reverse reads are set to 0, supply different parameters to randomreads
if sample.simulated_reads[depth][read_pair].reverse_reads.length != '0':
# Ensure that both the simulated reads, and the trimmed simulated reads files don't
# exist before simulating the reads
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq) and \
not os.path.isfile(
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't',
'Xmx': self.mem}
)
else:
try:
forward_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.forward_reads.fastq)
except FileNotFoundError:
forward_size = 0
try:
reverse_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.reverse_reads.fastq)
except FileNotFoundError:
reverse_size = 0
if forward_size <= 100 or reverse_size <= 100:
try:
os.remove(sample.simulated_reads[depth][read_pair].forward_reads.fastq)
except FileNotFoundError:
pass
try:
os.remove(sample.simulated_reads[depth][read_pair].reverse_reads.fastq)
except FileNotFoundError:
pass
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools \
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
else:
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].forward_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=False,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
# Update the JSON file
self.write_json(sample) | 0.006369 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.