text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def to_netcdf(self, filename, compress=True):
"""Write InferenceData to file using netcdf4.
Parameters
----------
filename : str
Location to write to
compress : bool
Whether to compress result. Note this saves disk space, but may make
saving and loading somewhat slower (default: True).
Returns
-------
str
Location of netcdf file
"""
mode = "w" # overwrite first, then append
if self._groups: # check's whether a group is present or not.
for group in self._groups:
data = getattr(self, group)
kwargs = {}
if compress:
kwargs["encoding"] = {var_name: {"zlib": True} for var_name in data.variables}
data.to_netcdf(filename, mode=mode, group=group, **kwargs)
data.close()
mode = "a"
else: # creates a netcdf file for an empty InferenceData object.
empty_netcdf_file = nc.Dataset(filename, mode="w", format="NETCDF4")
empty_netcdf_file.close()
return filename | 0.004292 |
def check_lazy_load_afdeling(f):
'''
Decorator function to lazy load a :class:`Afdeling`.
'''
def wrapper(self):
afdeling = self
if (getattr(afdeling, '_%s' % f.__name__, None) is None):
log.debug('Lazy loading Afdeling %d', afdeling.id)
afdeling.check_gateway()
a = afdeling.gateway.get_kadastrale_afdeling_by_id(afdeling.id)
afdeling._naam = a._naam
afdeling._gemeente = a._gemeente
afdeling._centroid = a._centroid
afdeling._bounding_box = a._bounding_box
return f(self)
return wrapper | 0.001618 |
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache)
if extras:
res.extras = pkg_resources.Requirement.parse('__placeholder__' +
extras).extras
return res | 0.000678 |
def unmarkCollapsed( self ):
"""
Unmarks this splitter as being in a collapsed state, clearing any \
collapsed information.
"""
if ( not self.isCollapsed() ):
return
self._collapsed = False
self._storedSizes = None
if ( self.orientation() == Qt.Vertical ):
self._collapseBefore.setArrowType( Qt.UpArrow )
self._collapseAfter.setArrowType( Qt.DownArrow )
else:
self._collapseBefore.setArrowType( Qt.LeftArrow )
self._collapseAfter.setArrowType( Qt.RightArrow ) | 0.032415 |
def get_article_status(self, url=None, article_id=None):
"""
Send a HEAD request to the `parser` endpoint to the parser API to
get the articles status.
Returned is a `requests.Response` object. The id and status for the
article can be extracted from the `X-Article-Id` and `X-Article-Status`
headers.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('parser', query_params=query_params)
return self.head(url) | 0.002181 |
def get_fixtures(self, competition=None, team=None, timeFrame=None, matchday=None, season=None, venue=None, league=None):
""" This method gets a set of fixtures. There are several possibilities to load them. The following three main resources are available:
* competitions
* teams
* fixtures
Args:
* competition (:obj: json, optional): a competition in json format obtained from the service.
* team (:obj: json, optional): a team in json format obtained from the service.
* timeFrame (str, optional): a timeFrame string.
* matchday (int, optional): the matchday in integer format.
* season (str, optional): the year of the season.
* venue (str, optional): 'home' or 'away'.
Returns:
* :obj: list of :obj: json: a list of fixtures
#### Competitions
To load the fixtures of a competition you have to provide a competition-json that you obtained from the service.
You can provide the following filters:
* timeFrame (str): a timeFrame string (e.g. n14 for fixtures in the next 14 days)
* matchday (int): the matchday
Note that the filters can not be combined.
#### Teams
To load the fixtures of a team you have to provide a team-json that you obtained from the service.
You can provide the following filters:
* timeFrame (str): a timeFrame string (e.g. n14 for fixtures in the next 14 days)
* season (str): the year of the season (e.g. 2017)
* venue: (str): either 'home' or 'away'
Note that timeFrame and season can not be combined.
#### Fixtures
To load a list of upcoming or passed fixtures you do not need to provide a json. You can provide the following filters:
* league: a leagueCode. Please refer to the documentation to get a list of league codes.
* timeFrame: a timeFrame string (e.g. n14 for fixtures in the next 14 days)
"""
self.logger.debug(f'Getting fixtures with the following parameters: {locals()}')
filters = []
if competition is not None:
if timeFrame is not None and matchday is not None:
raise IncorrectMethodCallException("Please filter by either timeFrame or matchday.")
elif timeFrame is not None and matchday is None:
filters.append(self.__createFilter('timeFrame', timeFrame))
elif timeFrame is None and matchday is not None:
filters.append(self.__createFilter('matchday', matchday))
competition_id = self.__get_competition_id(competition)
fixtures = self._request('competitions', competition_id, 'fixtures', filters=filters)
if team is not None:
fixtures['fixtures'] = list(filter(lambda fixture: fixture['_links']['homeTeam']['href'] == team['_links']['self']['href'] or fixture['_links']['awayTeam']['href'] == team['_links']['self']['href'], fixtures['fixtures']))
fixtures['count'] = len(fixtures['fixtures'])
elif team is not None:
if venue is not None:
filters.append(self.__createFilter('venue', venue))
if season is not None and timeFrame is not None:
raise IncorrectMethodCallException("Please filter by either timeFrame or season.")
elif season is not None:
filters.append(self.__createFilter('season', season))
elif timeFrame is not None:
filters.append(self.__createFilter('timeFrame', timeFrame))
team_id = self.__get_team_id(team)
fixtures = self._request('teams', team_id, 'fixtures', filters=filters)
else:
if league is not None:
filters.append(self.__createFilter('league', league))
if timeFrame is not None:
filters.append(self.__createFilter('timeFrame', timeFrame))
fixtures = self._request('fixtures', filters=filters)
return fixtures | 0.006176 |
def _makeNestedTempDir(top, seed, levels=2):
"""
Gets a temporary directory in the hierarchy of directories under a given
top directory.
This exists to avoid placing too many temporary directories under a single
top in a flat structure, which can slow down metadata updates such as
deletes on the local file system.
The seed parameter allows for deterministic placement of the created
directory. The seed is hashed into hex digest and the directory structure
is created from the initial letters of the digest.
:param top : string, top directory for the hierarchy
:param seed : string, the hierarchy will be generated from this seed string
:rtype : string, path to temporary directory - will be created when
necessary.
"""
# Valid chars for the creation of temporary directories
validDirs = hashlib.md5(six.b(str(seed))).hexdigest()
tempDir = top
for i in range(max(min(levels, len(validDirs)), 1)):
tempDir = os.path.join(tempDir, validDirs[i])
if not os.path.exists(tempDir):
try:
os.makedirs(tempDir)
except os.error:
if not os.path.exists(tempDir):
# In the case that a collision occurs and
# it is created while we wait then we ignore
raise
return tempDir | 0.000728 |
def _make_df(rows):
"""Internal Method to make and clean the dataframe in preparation for sending to Parquet"""
# Make DataFrame
df = pd.DataFrame(rows).set_index('ts')
# TimeDelta Support: https://issues.apache.org/jira/browse/ARROW-835
for column in df.columns:
if(df[column].dtype == 'timedelta64[ns]'):
print('Converting timedelta column {:s}...'.format(column))
df[column] = df[column].astype(str)
return df | 0.004255 |
def driver_from_file(input_file):
"""
Guess driver from file extension.
Returns
-------
driver : string
driver name
"""
file_ext = os.path.splitext(input_file)[1].split(".")[1]
if file_ext not in _file_ext_to_driver():
raise MapcheteDriverError(
"no driver could be found for file extension %s" % file_ext
)
driver = _file_ext_to_driver()[file_ext]
if len(driver) > 1:
warnings.warn(
DeprecationWarning(
"more than one driver for file found, taking %s" % driver[0]
)
)
return driver[0] | 0.001608 |
def initFilter(input, filterInfo = None):
""" Initializes internal filter variables for further processing.
Returns a tuple (function to call,parameters for the filter call)
The filterInfo is a dict. Here is an example structure:
{fieldName: {'min': x,
'max': y,
'type': 'category', # or 'number'
'acceptValues': ['foo', 'bar'],
}
}
This returns the following:
(filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),
...)
Where fieldIdx is the index of the field within each record
fieldFilterFunc returns True if the value is "OK" (within min, max or
part of acceptValues)
fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
"""
if filterInfo is None:
return None
# Build an array of index/func to call on record[index]
filterList = []
for i, fieldName in enumerate(input.getFieldNames()):
fieldFilter = filterInfo.get(fieldName, None)
if fieldFilter == None:
continue
var = dict()
var['acceptValues'] = None
min = fieldFilter.get('min', None)
max = fieldFilter.get('max', None)
var['min'] = min
var['max'] = max
if fieldFilter['type'] == 'category':
var['acceptValues'] = fieldFilter['acceptValues']
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] in x['acceptValues'])
elif fieldFilter['type'] == 'number':
if min != None and max != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'] and x['value'] <= x['max'])
elif min != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'])
else:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] <= x['max'])
filterList.append((i, fp, var))
return (_filterRecord, filterList) | 0.013719 |
def convert_snapshot(self, shift, instruction):
"""Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters.
"""
command_dict = {
'name': 'snapshot',
't0': shift+instruction.start_time,
'label': instruction.name,
'type': instruction.type
}
return self._qobj_model(**command_dict) | 0.003861 |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
opt = "annotation"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The (optional) annotation for this actor (string)."
opt = "skip"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to skip (disable) this actor (bool)."
return super(Actor, self).fix_config(options) | 0.005355 |
def order_percent(id_or_ins, percent, price=None, style=None):
"""
发送一个花费价值等于目前投资组合(市场价值和目前现金的总和)一定百分比现金的买/卖单,正数代表买,负数代表卖。股票的股数总是会被调整成对应的一手的股票数的倍数(1手是100股)。百分比是一个小数,并且小于或等于1(<=100%),0.5表示的是50%.需要注意,如果资金不足,该API将不会创建发送订单。
需要注意:
发送买单时,percent 代表的是期望买入股票消耗的金额(包含税费)占投资组合总权益的比例。
发送卖单时,percent 代表的是期望卖出的股票总价值占投资组合总权益的比例。
:param id_or_ins: 下单标的物
:type id_or_ins: :class:`~Instrument` object | `str`
:param float percent: 占有现有的投资组合价值的百分比。正数表示买入,负数表示卖出。
:param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。
:param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder`
:type style: `OrderStyle` object
:return: :class:`~Order` object | None
:example:
.. code-block:: python
#花费等于现有投资组合50%价值的现金买入平安银行股票:
order_percent('000001.XSHG', 0.5)
"""
if percent < -1 or percent > 1:
raise RQInvalidArgument(_(u"percent should between -1 and 1"))
style = cal_style(price, style)
account = Environment.get_instance().portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name]
return order_value(id_or_ins, account.total_value * percent, style=style) | 0.00339 |
def sumexp_stable(data):
"""Compute the sum of exponents for a list of samples
Parameters
----------
data : array, shape=[features, samples]
A data array containing samples.
Returns
-------
result_sum : array, shape=[samples,]
The sum of exponents for each sample divided by the exponent
of the maximum feature value in the sample.
max_value : array, shape=[samples,]
The maximum feature value for each sample.
result_exp : array, shape=[features, samples]
The exponent of each element in each sample divided by the exponent
of the maximum feature value in the sample.
Note
----
This function is more stable than computing the sum(exp(v)).
It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function.
"""
max_value = data.max(axis=0)
result_exp = np.exp(data - max_value)
result_sum = np.sum(result_exp, axis=0)
return result_sum, max_value, result_exp | 0.001 |
def get_partial_word_under_cursor(self):
"""
Returns the document partial word under cursor ( From word start to cursor position ).
:return: Partial word under cursor.
:rtype: QString
"""
if not re.match(r"^\w+$", foundations.strings.to_string(self.get_previous_character())):
return QString()
cursor = self.textCursor()
position = cursor.position()
cursor.movePosition(QTextCursor.PreviousWord, QTextCursor.KeepAnchor)
return cursor.selectedText() | 0.007366 |
def get_token(username, length=20, timeout=20):
"""
Obtain an access token that can be passed to a websocket client.
"""
redis = get_redis_client()
token = get_random_string(length)
token_key = 'token:{}'.format(token)
redis.set(token_key, username)
redis.expire(token_key, timeout)
return token | 0.003021 |
def output(self, args):
'''
Print the output message.
'''
print("SensuPlugin: {}".format(' '.join(str(a) for a in args))) | 0.013072 |
def asyncPipeItembuilder(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that asynchronously builds an item. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'attrs': [
{'key': {'value': 'title'}, 'value': {'value': 'new title'}},
{'key': {'value': 'desc.content'}, 'value': {'value': 'new desc'}}
]
}
Returns
------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
pkwargs = cdicts(opts, kwargs)
asyncFuncs = yield asyncGetSplits(None, conf['attrs'], **pkwargs)
_input = yield _INPUT
finite = utils.finitize(_input)
inputs = imap(DotDict, finite)
pieces = yield asyncImap(asyncFuncs[0], inputs)
results = imap(utils.parse_params, pieces)
_OUTPUT = imap(DotDict, results)
returnValue(_OUTPUT) | 0.001068 |
def from_series(cls, series):
"""Convert a pandas.Series into an xarray.DataArray.
If the series's index is a MultiIndex, it will be expanded into a
tensor product of one-dimensional coordinates (filling in missing
values with NaN). Thus this operation should be the inverse of the
`to_series` method.
"""
# TODO: add a 'name' parameter
name = series.name
df = pd.DataFrame({name: series})
ds = Dataset.from_dataframe(df)
return ds[name] | 0.003802 |
def get_file_to_stream(self, stream, share_name, directory_name, file_name, **kwargs):
"""
Download a file from Azure File Share.
:param stream: A filehandle to store the file to.
:type stream: file-like object
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.get_file_to_stream()` takes.
:type kwargs: object
"""
self.connection.get_file_to_stream(share_name, directory_name,
file_name, stream, **kwargs) | 0.003881 |
def to_OrderedDict(self, include_null=True):
"""
Convert to OrderedDict.
"""
if include_null:
return OrderedDict(self.items())
else:
items = list()
for c in self.__table__._columns:
try:
items.append((c.name, self.__dict__[c.name]))
except KeyError:
pass
return OrderedDict(items) | 0.004545 |
def restore(self):
"""Reimplemented OneColumnTree method"""
if self.current_editor is not None:
self.collapseAll()
editor_id = self.editor_ids[self.current_editor]
self.root_item_selected(self.editor_items[editor_id]) | 0.007299 |
def match_paren(self, tokens, item):
"""Matches a paren."""
match, = tokens
return self.match(match, item) | 0.015385 |
def send_password_reset_notice(user):
"""Sends the password reset notice email for the specified user.
:param user: The user to send the notice to
"""
if config_value('SEND_PASSWORD_RESET_NOTICE_EMAIL'):
_security.send_mail(config_value('EMAIL_SUBJECT_PASSWORD_NOTICE'),
user.email, 'reset_notice', user=user) | 0.002762 |
def set_parent(self, new_site):
"""
Set self.site as either an empty string, or with a new Site.
"""
if new_site:
if not isinstance(new_site, Site):
raise Exception
self.site = new_site
self.propagate_data()
return new_site | 0.006515 |
def enable_branching_model(self, project, repository):
"""
Enable branching model by setting it with default configuration
:param project:
:param repository:
:return:
"""
default_model_data = {'development': {'refId': None, 'useDefault': True},
'types': [{'displayName': 'Bugfix',
'enabled': True,
'id': 'BUGFIX',
'prefix': 'bugfix/'},
{'displayName': 'Feature',
'enabled': True,
'id': 'FEATURE',
'prefix': 'feature/'},
{'displayName': 'Hotfix',
'enabled': True,
'id': 'HOTFIX',
'prefix': 'hotfix/'},
{'displayName': 'Release',
'enabled': True,
'id': 'RELEASE',
'prefix': 'release/'}]}
return self.set_branching_model(project,
repository,
default_model_data) | 0.002078 |
def assign_from_user(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
This interface does not allow for incremental assignment and will
replace the previous assignment (if there was one).
Manual topic assignment through this method does not use the consumer's
group management functionality. As such, there will be no rebalance
operation triggered when group membership or cluster and topic metadata
change. Note that it is not possible to use both manual partition
assignment with assign() and group assignment with subscribe().
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
"""
if self.subscription is not None:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
if self._user_assignment != set(partitions):
self._user_assignment = set(partitions)
for partition in partitions:
if partition not in self.assignment:
self._add_assigned_partition(partition)
for tp in set(self.assignment.keys()) - self._user_assignment:
del self.assignment[tp]
self.needs_fetch_committed_offsets = True | 0.001451 |
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format(
"Database dump file {dump_fn} does not exist."
)
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
#r.pc('Uploading PostgreSQL database snapshot...')
# r.put(
# local_path=r.env.dump_fn,
# remote_path=r.env.remote_dump_fn)
#r.local('rsync -rvz --progress --no-p --no-g '
#'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" '
#'{dump_fn} {user}@{host_string}:{remote_dump_fn}')
self.upload_snapshot(name=name, site=site, local_dump_fn=r.env.dump_fn, remote_dump_fn=r.env.remote_dump_fn)
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
if force_host:
r.env.db_host = force_host
with settings(warn_only=True):
r.sudo('dropdb --if-exists --no-password --user={db_root_username} --host={db_host} {db_name}', user=r.env.postgres_user)
r.sudo('psql --no-password --user={db_root_username} --host={db_host} -c "CREATE DATABASE {db_name};"', user=r.env.postgres_user)
with settings(warn_only=True):
if r.env.engine == POSTGIS:
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis;"',
user=r.env.postgres_user)
r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis_topology;"',
user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "REASSIGN OWNED BY {db_user} TO {db_root_username};"', user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP OWNED BY {db_user} CASCADE;"', user=r.env.postgres_user)
r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP USER IF EXISTS {db_user}; '
'CREATE USER {db_user} WITH PASSWORD \'{db_password}\'; '
'GRANT ALL PRIVILEGES ON DATABASE {db_name} to {db_user};"', user=r.env.postgres_user)
for createlang in r.env.createlangs:
r.env.createlang = createlang
r.sudo('createlang -U {db_root_username} --host={db_host} {createlang} {db_name} || true', user=r.env.postgres_user)
if not prep_only:
# Ignore errors needed to work around bug "ERROR: schema "public" already exists", which is thrown in 9.6 even if we use --clean and --if-exists?
with settings(warn_only=True):
r.sudo(r.env.load_command, user=r.env.postgres_user) | 0.006138 |
def groups_pools_getGroups():
"""Get a list of groups the auth'd user can post photos to."""
method = 'flickr.groups.pools.getGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name, \
privacy=group.privacy))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name, privacy=group.privacy)]
return groups | 0.003676 |
def get_medium(self, agent_type, index=0):
'''Returns the medium class for the
given agent_type. Optional index tells which one to give.'''
mediums = list(x for x in self.agency._agents
if x.get_descriptor().type_name == agent_type)
try:
return mediums[index]
except KeyError:
raise RuntimeError(
'Requested medium class of %s with index %d, but found only '
'%d medium of this type'
% (agent_type, index, len(mediums))), None, sys.exc_info()[2] | 0.003448 |
def hist2d(x, y, bins=10, labels=None, aspect="auto", plot=True, fig=None, ax=None, interpolation='none', cbar=True, **kwargs):
"""
Creates a 2-D histogram of data *x*, *y* with *bins*, *labels* = :code:`[title, xlabel, ylabel]`, aspect ration *aspect*. Attempts to use axis *ax* first, then the current axis of *fig*, then the last axis, to use an already-created window.
Plotting (*plot*) is on by default, setting false doesn't attempt to create a figure.
*interpolation* sets the interpolation type of :meth:`matplotlib.axis.imshow`.
Returns a handle and extent as :code:`h, extent`
"""
h_range = kwargs.pop('range', None)
h_normed = kwargs.pop('normed', None)
h_weights = kwargs.pop('weights', None)
h, xe, ye = _np.histogram2d(x, y, bins=bins, range=h_range, normed=h_normed, weights=h_weights)
extent = [xe[0], xe[-1], ye[0], ye[-1]]
# fig = plt.figure()
if plot:
if ax is None:
if fig is None:
fig = _figure('hist2d')
ax = fig.gca()
ax.clear()
img = ax.imshow(h.transpose(), extent=extent, interpolation=interpolation, aspect=aspect, **kwargs)
if cbar:
_colorbar(ax=ax, im=img)
if labels is not None:
_addlabel(labels[0], labels[1], labels[2])
# _showfig(fig, aspect)
return h, extent | 0.007942 |
def set_is_immediate(self, value):
"""
Setter for 'is_immediate' field.
:param value - a new value of 'is_immediate' field. Must be a boolean type.
"""
if value is None:
self.__is_immediate = value
elif not isinstance(value, bool):
raise TypeError("IsImediate must be set to a bool")
else:
self.__is_immediate = value | 0.007335 |
def rcfile(appname, section=None, args={}, strip_dashes=True):
"""Read environment variables and config files and return them merged with
predefined list of arguments.
Parameters
----------
appname: str
Application name, used for config files and environment variable
names.
section: str
Name of the section to be read. If this is not set: appname.
args:
arguments from command line (optparse, docopt, etc).
strip_dashes: bool
Strip dashes prefixing key names from args dict.
Returns
--------
dict
containing the merged variables of environment variables, config
files and args.
Raises
------
IOError
In case the return value is empty.
Notes
-----
Environment variables are read if they start with appname in uppercase
with underscore, for example:
TEST_VAR=1
Config files compatible with ConfigParser are read and the section name
appname is read, example:
[appname]
var=1
We can also have host-dependent configuration values, which have
priority over the default appname values.
[appname]
var=1
[appname:mylinux]
var=3
For boolean flags do not try to use: 'True' or 'False',
'on' or 'off',
'1' or '0'.
Unless you are willing to parse this values by yourself.
We recommend commenting the variables out with '#' if you want to set a
flag to False and check if it is in the rcfile cfg dict, i.e.:
flag_value = 'flag_variable' in cfg
Files are read from: /etc/appname/config,
/etc/appfilerc,
~/.config/appname/config,
~/.config/appname,
~/.appname/config,
~/.appnamerc,
appnamerc,
.appnamerc,
appnamerc file found in 'path' folder variable in args,
.appnamerc file found in 'path' folder variable in args,
file provided by 'config' variable in args.
Example
-------
args = rcfile(__name__, docopt(__doc__, version=__version__))
"""
if strip_dashes:
for k in args.keys():
args[k.lstrip('-')] = args.pop(k)
environ = get_environment(appname)
if section is None:
section = appname
config = get_config(appname,
section,
args.get('config', ''),
args.get('path', ''))
config = merge(merge(args, config), environ)
if not config:
raise IOError('Could not find any rcfile for application '
'{}.'.format(appname))
return config | 0.00104 |
def filter(self, base_collection):
'''Yields subset of base_collection/generator based on filters.'''
for item in base_collection:
excluded = []
for (name, exclude) in self._filters:
if exclude(item):
excluded.append(name)
if excluded:
filter_value = "; ".join(excluded)
else:
filter_value = None
yield item, filter_value | 0.004301 |
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
return {x['OutputKey']: x['OutputValue'] for x in stack['Outputs']}
except botocore.client.ClientError:
return {} | 0.00716 |
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
write = self.write
if name is None:
name = obj.__name__
try:
# whichmodule() could fail, see
# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling
modname = pickle.whichmodule(obj, name)
except Exception:
modname = None
# print('which gives %s %s %s' % (modname, obj, name))
try:
themodule = sys.modules[modname]
except KeyError:
# eval'd items such as namedtuple give invalid items for their function __module__
modname = '__main__'
if modname == '__main__':
themodule = None
if themodule:
self.modules.add(themodule)
if getattr(themodule, name, None) is obj:
return self.save_global(obj, name)
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
if islambda(obj) or obj.__code__.co_filename == '<stdin>' or themodule is None:
#print("save global", islambda(obj), obj.__code__.co_filename, modname, themodule)
self.save_function_tuple(obj)
return
else:
# func is nested
klass = getattr(themodule, name, None)
if klass is None or klass is not obj:
self.save_function_tuple(obj)
return
if obj.__dict__:
# essentially save_reduce, but workaround needed to avoid recursion
self.save(_restore_attr)
write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
self.save(obj.__dict__)
write(pickle.TUPLE + pickle.REDUCE)
else:
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj) | 0.01506 |
def profile(func):
"""
Simple profile decorator, monitors method execution time
"""
@inlineCallbacks
def callme(*args, **kwargs):
start = time.time()
ret = yield func(*args, **kwargs)
time_to_execute = time.time() - start
log.msg('%s executed in %.3f seconds' % (func.__name__, time_to_execute))
returnValue(ret)
return callme | 0.005128 |
def speed_difference(points):
""" Computes the speed difference between each adjacent point
Args:
points (:obj:`Point`)
Returns:
:obj:`list` of int: Indexes of changepoints
"""
data = [0]
for before, after in pairwise(points):
data.append(before.vel - after.vel)
return data | 0.003058 |
def list_mapping(html_cleaned):
"""将预处理后的网页文档映射成列表和字典,并提取虚假标题
Keyword arguments:
html_cleaned -- 预处理后的网页源代码,字符串类型
Return:
unit_raw -- 网页文本行
init_dict -- 字典的key是索引,value是网页文本行,并按照网页文本行长度降序排序
fake_title -- 虚假标题,即网页源代码<title>中的文本行
"""
unit_raw = html_cleaned.split('\n')
for i in unit_raw:
c = CDM(i)
if c.PTN is not 0:
fake_title = i
break
init_list = []
init_dict = {}
for i in unit_raw:
init_list.append(len(i))
for i in range(0, len(init_list)):
init_dict[i] = init_list[i]
init_dict = sorted(init_dict.items(), key=lambda item: item[1], reverse=True)
try:
log('debug', '映射成功,提取的虚假标题为:【{}】'.format(fake_title))
except UnboundLocalError:
fake_title = ''
log('err', '虚假标题提取失败')
return unit_raw, init_dict, fake_title | 0.002141 |
def new_linsolver(name,prop):
"""
Creates a linear solver.
Parameters
----------
name : string
prop : string
Returns
-------
solver : :class:`LinSolver <optalg.lin_solver.LinSolver>`
"""
if name == 'mumps':
return LinSolverMUMPS(prop)
elif name == 'superlu':
return LinSolverSUPERLU(prop)
elif name == 'umfpack':
return LinSolverUMFPACK(prop)
elif name == 'default':
try:
return new_linsolver('mumps',prop)
except ImportError:
return new_linsolver('superlu',prop)
else:
raise ValueError('invalid linear solver name') | 0.010432 |
def other_dependancies(server, environment):
"""
Installs things that need to be in place before installing the main package
"""
print(' ** Other Dependancides, based on server', server, '**')
server = server.lower()
# Pillow is not on TestPyPI
if server is "local":
pass
elif server in ["testpypi", "pypitest"]:
# these are packages not available on the test server, so install them
# off the regular pypi server
print(" **Install Pillow**")
subprocess.call([environment + '\\Scripts\\pip.exe', 'install', 'Pillow'], shell=True)
elif server in ["pypi"]:
print(" **Install Pillow**")
subprocess.call([environment + '\\Scripts\\pip.exe', 'install', 'Pillow'], shell=True)
else:
print(" **Nothing more to install**") | 0.00365 |
def build_parameters(request, meta, orgaMode, currentOrga):
"""Return the list of get, post and file parameters to send"""
postParameters = {}
getParameters = {}
files = {}
def update_parameters(data):
tmp_getParameters, tmp_postParameters, tmp_files = data
getParameters.update(tmp_getParameters)
postParameters.update(tmp_postParameters)
files.update(tmp_files)
update_parameters(build_base_parameters(request))
update_parameters(build_user_requested_parameters(request, meta))
update_parameters(build_orga_parameters(request, orgaMode, currentOrga))
return (getParameters, postParameters, files) | 0.00149 |
def dashed(requestContext, seriesList, dashLength=5):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example::
&target=dashed(server01.instance01.memory.free,2.5)
"""
for series in seriesList:
series.name = 'dashed(%s, %g)' % (series.name, dashLength)
series.options['dashed'] = dashLength
return seriesList | 0.001942 |
def _process_response(response: requests.Response, expected: list = []) -> dict:
"""
Processes an API response. Raises an exception when appropriate.
The exception that will be raised is MoneyBird.APIError. This exception is subclassed so implementing programs
can easily react appropriately to different exceptions.
The following subclasses of MoneyBird.APIError are likely to be raised:
- MoneyBird.Unauthorized: No access to the resource or invalid authentication
- MoneyBird.Throttled: Access (temporarily) denied, please try again
- MoneyBird.NotFound: Resource not found, check resource path
- MoneyBird.InvalidData: Validation errors occured while processing your input
- MoneyBird.ServerError: Error on the server
:param response: The response to process.
:param expected: A list of expected status codes which won't raise an exception.
:return: The useful data in the response (may be None).
"""
responses = {
200: None,
201: None,
204: None,
400: MoneyBird.Unauthorized,
401: MoneyBird.Unauthorized,
403: MoneyBird.Throttled,
404: MoneyBird.NotFound,
406: MoneyBird.NotFound,
422: MoneyBird.InvalidData,
429: MoneyBird.Throttled,
500: MoneyBird.ServerError,
}
logger.debug("API request: %s %s\n" % (response.request.method, response.request.url) +
"Response: %s %s" % (response.status_code, response.text))
if response.status_code not in expected:
if response.status_code not in responses:
logger.error("API response contained unknown status code")
raise MoneyBird.APIError(response, "API response contained unknown status code")
elif responses[response.status_code] is not None:
try:
description = response.json()['error']
except (AttributeError, TypeError, KeyError, ValueError):
description = None
raise responses[response.status_code](response, description)
try:
data = response.json()
except ValueError:
logger.error("API response is not JSON decodable")
data = None
return data | 0.003733 |
def get_previous_thumbprint(self, components=None):
"""
Returns a dictionary representing the previous configuration state.
Thumbprint is of the form:
{
component_name1: {key: value},
component_name2: {key: value},
...
}
"""
components = str_to_component_list(components)
tp_fn = self.manifest_filename
tp_text = None
if self.file_exists(tp_fn):
fd = six.BytesIO()
get(tp_fn, fd)
tp_text = fd.getvalue()
manifest_data = {}
raw_data = yaml.load(tp_text)
for k, v in raw_data.items():
manifest_key = assert_valid_satchel(k)
service_name = clean_service_name(k)
if components and service_name not in components:
continue
manifest_data[manifest_key] = v
return manifest_data | 0.002047 |
def RechazarCTG(self, carta_porte, ctg, motivo):
"El Destino puede rechazar el CTG a través de la siguiente operatoria"
response = self.client.rechazarCTG(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosRechazarCTG={
'cartaPorte': carta_porte,
'ctg': ctg, 'motivoRechazo': motivo,
}))['response']
datos = response.get('datosResponse')
self.__analizar_errores(response)
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['CTG'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoOperacion = str(datos['codigoOperacion']) | 0.002278 |
def cross_entropy_calc(TOP, P, POP):
"""
Calculate cross entropy.
:param TOP: test outcome positive
:type TOP : dict
:param P: condition positive
:type P : dict
:param POP: population
:type POP : dict
:return: cross entropy as float
"""
try:
result = 0
for i in TOP.keys():
reference_likelihood = P[i] / POP[i]
response_likelihood = TOP[i] / POP[i]
if response_likelihood != 0 and reference_likelihood != 0:
result += reference_likelihood * \
math.log(response_likelihood, 2)
return -result
except Exception:
return "None" | 0.001481 |
def GetAccounts(self):
"""Return the client accounts associated with the user's manager account.
Returns:
list List of ManagedCustomer data objects.
"""
selector = {
'fields': ['CustomerId', 'CanManageClients']
}
accounts = self.client.GetService('ManagedCustomerService').get(selector)
return accounts['entries'] | 0.002793 |
def synchronizeReplica(self,
replicaID,
transportType="esriTransportTypeUrl",
replicaServerGen=None,
returnIdsForAdds=False,
edits=None,
returnAttachmentDatabyURL=False,
async=False,
syncDirection="snapshot",
syncLayers="perReplica",
editsUploadID=None,
editsUploadFormat=None,
dataFormat="json",
rollbackOnFailure=True):
"""
TODO: implement synchronize replica
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000000vv000000
"""
params = {
"f" : "json",
"replicaID" : replicaID,
"transportType" : transportType,
"dataFormat" : dataFormat,
"rollbackOnFailure" : rollbackOnFailure,
"async" : async,
"returnIdsForAdds": returnIdsForAdds,
"syncDirection" : syncDirection,
"returnAttachmentDatabyURL" : returnAttachmentDatabyURL
}
return | 0.019716 |
def pop(self, strip=False):
"""Current content popped, useful for testing"""
r = self.contents()
self.clear()
if r and strip:
r = r.strip()
return r | 0.01 |
def load_corpus(self, path, config):
'''Load a dialogue corpus; eventually, support pickles and potentially other formats'''
# use the default dataset if no path is provided
# TODO -- change this to use a pre-saved dataset
if path == '':
path = self.default_path_to_corpus
self.data = Corpus(path=path, config=self.data_config) | 0.007874 |
def _find_feed_language(self):
"""Find feed language based specified feed_info.txt or agency.txt.
"""
self.feed_language = (
read_first_available_value(
os.path.join(self.src_dir, 'feed_info.txt'), 'feed_lang') or
read_first_available_value(
os.path.join(self.src_dir, 'agency.txt'), 'agency_lang'))
if not self.feed_language:
raise Exception(
'Cannot find feed language in feed_info.txt and agency.txt')
print('\tfeed language: %s' % self.feed_language) | 0.00346 |
def flat_data(self):
"""
Function to pass our modified values to the original ones
"""
def flat_field(value):
"""
Flat item
"""
try:
value.flat_data()
return value
except AttributeError:
return value
modified_data = self.__modified_data__ if self.__modified_data__ is not None else self.__original_data__
if modified_data is not None:
self.__original_data__ = [flat_field(value) for value in modified_data]
self.__modified_data__ = None | 0.006515 |
def is_on_tag(self) -> bool:
"""
:return: True if latest commit is tagged
:rtype: bool
"""
if self.get_current_tag():
LOGGER.debug('latest commit is tagged')
return True
LOGGER.debug('latest commit is NOT tagged')
return False | 0.006515 |
def wnunid(a, b):
"""
Place the union of two double precision windows into a third window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnunid_c.html
:param a: Input window A.
:type a: spiceypy.utils.support_types.SpiceCell
:param b: Input window B.
:type b: spiceypy.utils.support_types.SpiceCell
:return: Union of a and b.
:rtype: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(a, stypes.SpiceCell)
assert b.dtype == 1
assert isinstance(b, stypes.SpiceCell)
assert a.dtype == 1
c = stypes.SpiceCell.double(b.size + a.size)
libspice.wnunid_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c))
return c | 0.004292 |
def camel_case(string):
"""
Converts a string to camel case. For example::
camel_case('one_two_three') -> 'oneTwoThree'
"""
if not string:
return string
parts = snake_case(string).split('_')
rv = ''
while parts:
part = parts.pop(0)
rv += part or '_'
if part:
break
return rv + ''.join(x.title() for x in parts) | 0.002532 |
def combine_images(imgs, register=True):
"""Combine similar images into one to reduce the noise
Parameters
----------
imgs: list of 2d array
Series of images
register: Boolean, default False
True if the images should be register before combination
Returns
-------
im: 2d array
The result image
Notes
-----
This is an example of the usage of the library
"""
imgs = np.asarray(imgs, dtype="float")
if register:
for i in range(1, imgs.shape[0]):
ret = register_images(imgs[0, :, :], imgs[i, :, :])
imgs[i, :, :] = rotate_scale_shift(imgs[i, :, :], *ret[:3], np.nan)
return np.mean(imgs, 0) | 0.001418 |
def initialize_state(self):
""" Call this to initialize the state of the UI after everything has been connected. """
if self.__hardware_source:
self.__profile_changed_event_listener = self.__hardware_source.profile_changed_event.listen(self.__update_profile_index)
self.__frame_parameters_changed_event_listener = self.__hardware_source.frame_parameters_changed_event.listen(self.__update_frame_parameters)
self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed)
self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed)
self.__log_messages_event_listener = self.__hardware_source.log_messages_event.listen(self.__log_messages)
if self.on_display_name_changed:
self.on_display_name_changed(self.display_name)
if self.on_binning_values_changed:
self.on_binning_values_changed(self.__hardware_source.binning_values)
if self.on_monitor_button_state_changed:
has_monitor = self.__hardware_source and self.__hardware_source.features.get("has_monitor", False)
self.on_monitor_button_state_changed(has_monitor, has_monitor)
self.__update_buttons()
if self.on_profiles_changed:
profile_items = self.__hardware_source.modes
self.on_profiles_changed(profile_items)
self.__update_profile_index(self.__hardware_source.selected_profile_index)
if self.on_data_item_states_changed:
self.on_data_item_states_changed(list()) | 0.006463 |
def get_port_channel_detail_output_lacp_partner_oper_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
lacp = ET.SubElement(output, "lacp")
partner_oper_key = ET.SubElement(lacp, "partner-oper-key")
partner_oper_key.text = kwargs.pop('partner_oper_key')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003339 |
def return_period_from_string(arg):
"""
Takes a string such as "days=1,seconds=30" and strips the quotes
and returns a dictionary with the key/value pairs
"""
period = {}
if arg[0] == '"' and arg[-1] == '"':
opt = arg[1:-1] # remove quotes
else:
opt = arg
for o in opt.split(","):
key, value = o.split("=")
period[str(key)] = int(value)
return period | 0.002364 |
def pprint(to_be_printed):
"""nicely formated print"""
try:
import pprint as pp
# generate an instance PrettyPrinter
# pp.PrettyPrinter().pprint(to_be_printed)
pp.pprint(to_be_printed)
except ImportError:
if isinstance(to_be_printed, dict):
print('{')
for k, v in to_be_printed.items():
print("'" + k + "'" if isinstance(k, basestring) else k,
': ',
"'" + v + "'" if isinstance(k, basestring) else v,
sep="")
print('}')
else:
print('could not import pprint module, appling regular print')
print(to_be_printed) | 0.001406 |
def move_distance(self, distance_x_m, distance_y_m, distance_z_m,
velocity=VELOCITY):
"""
Move in a straight line.
positive X is forward
positive Y is left
positive Z is up
:param distance_x_m: The distance to travel along the X-axis (meters)
:param distance_y_m: The distance to travel along the Y-axis (meters)
:param distance_z_m: The distance to travel along the Z-axis (meters)
:param velocity: the velocity of the motion (meters/second)
:return:
"""
distance = math.sqrt(distance_x_m * distance_x_m +
distance_y_m * distance_y_m +
distance_z_m * distance_z_m)
flight_time = distance / velocity
velocity_x = velocity * distance_x_m / distance
velocity_y = velocity * distance_y_m / distance
velocity_z = velocity * distance_z_m / distance
self.start_linear_motion(velocity_x, velocity_y, velocity_z)
time.sleep(flight_time)
self.stop() | 0.002791 |
def callable_name(func: Callable) -> str:
"""Return the qualified name (e.g. package.module.func) for the given callable."""
if func.__module__ == 'builtins':
return func.__name__
else:
return '{}.{}'.format(func.__module__, func.__qualname__) | 0.00738 |
def make_sloppy_codec(encoding):
"""
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
Single-byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work,
`codecs.charmap_decode` and `charmap_encode`. This function, given an
encoding name, *defines* those boilerplate classes.
"""
# Make a bytestring of all 256 possible bytes.
all_bytes = bytes(range(256))
# Get a list of what they would decode to in Latin-1.
sloppy_chars = list(all_bytes.decode('latin-1'))
# Get a list of what they decode to in the given encoding. Use the
# replacement character for unassigned bytes.
if PY26:
decoded_chars = all_bytes.decode(encoding, 'replace')
else:
decoded_chars = all_bytes.decode(encoding, errors='replace')
# Update the sloppy_chars list. Each byte that was successfully decoded
# gets its decoded value in the list. The unassigned bytes are left as
# they are, which gives their decoding in Latin-1.
for i, char in enumerate(decoded_chars):
if char != REPLACEMENT_CHAR:
sloppy_chars[i] = char
# For ftfy's own purposes, we're going to allow byte 1A, the "Substitute"
# control code, to encode the Unicode replacement character U+FFFD.
sloppy_chars[0x1a] = REPLACEMENT_CHAR
# Create the data structures that tell the charmap methods how to encode
# and decode in this sloppy encoding.
decoding_table = ''.join(sloppy_chars)
encoding_table = codecs.charmap_build(decoding_table)
# Now produce all the class boilerplate. Look at the Python source for
# `encodings.cp1252` for comparison; this is almost exactly the same,
# except I made it follow pep8.
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
name='sloppy-' + encoding,
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
) | 0.000341 |
def backspace_changed(self, settings, key, user_data):
"""If the gconf var compat_backspace be changed, this method
will be called and will change the binding configuration in
all terminals open.
"""
for i in self.guake.notebook_manager.iter_terminals():
i.set_backspace_binding(self.getEraseBinding(settings.get_string(key))) | 0.007937 |
def _validate_compute_chunk_params(self,
graph,
dates,
sids,
initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(sids)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
for term in initial_workspace:
if self._is_special_root_term(term):
continue
if term.domain is GENERIC:
# XXX: We really shouldn't allow **any** generic terms to be
# populated in the initial workspace. A generic term, by
# definition, can't correspond to concrete data until it's
# paired with a domain, and populate_initial_workspace isn't
# given the domain of execution, so it can't possibly know what
# data to use when populating a generic term.
#
# In our current implementation, however, we don't have a good
# way to represent specializations of ComputableTerms that take
# only generic inputs, so there's no good way for the initial
# workspace to provide data for such terms except by populating
# the generic ComputableTerm.
#
# The right fix for the above is to implement "full
# specialization", i.e., implementing ``specialize`` uniformly
# across all terms, not just LoadableTerms. Having full
# specialization will also remove the need for all of the
# remaining ``maybe_specialize`` calls floating around in this
# file.
#
# In the meantime, disallowing ComputableTerms in the initial
# workspace would break almost every test in
# `test_filter`/`test_factor`/`test_classifier`, and fixing
# them would require updating all those tests to compute with
# more specialized terms. Once we have full specialization, we
# can fix all the tests without a large volume of edits by
# simply specializing their workspaces, so for now I'm leaving
# this in place as a somewhat sharp edge.
if isinstance(term, LoadableTerm):
raise ValueError(
"Loadable workspace terms must be specialized to a "
"domain, but got generic term {}".format(term)
)
elif term.domain != graph.domain:
raise ValueError(
"Initial workspace term {} has domain {}. "
"Does not match pipeline domain {}".format(
term, term.domain, graph.domain,
)
) | 0.001572 |
def create_enrollment_term(self, account_id, enrollment_term_end_at=None, enrollment_term_name=None, enrollment_term_sis_term_id=None, enrollment_term_start_at=None):
"""
Create enrollment term.
Create a new enrollment term for the specified account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - enrollment_term[name]
"""The name of the term."""
if enrollment_term_name is not None:
data["enrollment_term[name]"] = enrollment_term_name
# OPTIONAL - enrollment_term[start_at]
"""The day/time the term starts.
Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z."""
if enrollment_term_start_at is not None:
data["enrollment_term[start_at]"] = enrollment_term_start_at
# OPTIONAL - enrollment_term[end_at]
"""The day/time the term ends.
Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z."""
if enrollment_term_end_at is not None:
data["enrollment_term[end_at]"] = enrollment_term_end_at
# OPTIONAL - enrollment_term[sis_term_id]
"""The unique SIS identifier for the term."""
if enrollment_term_sis_term_id is not None:
data["enrollment_term[sis_term_id]"] = enrollment_term_sis_term_id
self.logger.debug("POST /api/v1/accounts/{account_id}/terms with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/terms".format(**path), data=data, params=params, single_item=True) | 0.002836 |
def _get_rule_definition(self, rule):
"""Generates the source code for a rule."""
fmt = """def {rule_fxn_name}(self, text):
{indent}\"\"\"{rule_source}\"\"\"
{indent}self._attempting(text)
{indent}return {rule_definition}(text){transform}
"""
fmt = self._clean_fmt(fmt)
source = self._indent(self._ast_to_code(rule.expression), skip_first_line=True)
# All the primitives will accept a string x in place of terminal(x). This is terminal shorthand.
# However, if a rule is only a wrapper around a single terminal, we have to actually make a
# terminal call. This handles that situation.
if self.use_terminal_shorthand and len(source) == 1 and source[0].startswith(("'", '"')):
source = ["terminal({})".format(source[0])]
rule_source = fmt.format(rule_fxn_name=self._get_rule_fxn_name(rule.name),
indent=self.indent,
rule_source=self._get_rule_source(rule),
rule_definition="\n".join(source),
transform=self._get_rule_transform(rule))
return self._indent(rule_source, 1) | 0.005085 |
def from_bytes(SaplingTx, byte_string):
'''
byte-like -> SaplingTx
'''
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b'\x04\x00\x00\x80' or group_id != b'\x85\x20\x2f\x89':
raise ValueError(
'Bad header or group ID. Expected {} and {}. Got: {} and {}'
.format(b'\x04\x00\x00\x80'.hex(),
b'\x85\x20\x2f\x89'.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
value_balance = byte_string[current:current + 8]
current += 8
tx_shielded_spends = []
shielded_spends_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(shielded_spends_num)
for _ in range(shielded_spends_num.number):
ss = SaplingShieldedSpend.from_bytes(byte_string[current:])
current += len(ss)
tx_shielded_spends.append(ss)
tx_shielded_outputs = []
shielded_outputs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(shielded_outputs_num)
for _ in range(shielded_outputs_num.number):
so = SaplingShieldedOutput.from_bytes(byte_string[current:])
current += len(so)
tx_shielded_outputs.append(so)
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = SaplingJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
if len(tx_joinsplits) > 0:
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
current += 64
else:
joinsplit_pubkey = None
joinsplit_sig = None
if len(tx_shielded_spends) + len(tx_shielded_outputs) > 0:
binding_sig = byte_string[current:current + 64]
current += 64
else:
binding_sig = None
return SaplingTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
value_balance=value_balance,
tx_shielded_spends=tx_shielded_spends,
tx_shielded_outputs=tx_shielded_outputs,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig,
binding_sig=binding_sig) | 0.000587 |
def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):
"""given an alias and optional namespace, return a list of all other
aliases for same sequence
"""
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
aliases = self.aliases.fetch_aliases(seq_id=seq_id,
translate_ncbi_namespace=translate_ncbi_namespace)
if target_namespaces:
aliases = [a for a in aliases if a["namespace"] in target_namespaces]
return aliases | 0.007062 |
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
if self._closed:
self._raise_closed()
self._accessor.replace(self, target) | 0.004662 |
def loaded_modules(self):
'''The list of loaded module profile dictionaries.'''
with self._mutex:
if not self._loaded_modules:
self._loaded_modules = []
for mp in self._obj.get_loaded_modules():
self._loaded_modules.append(utils.nvlist_to_dict(mp.properties))
return self._loaded_modules | 0.008 |
def _proc_no_rot_sym(self):
"""
Handles molecules with no rotational symmetry. Only possible point
groups are C1, Cs and Ci.
"""
self.sch_symbol = "C1"
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "Ci"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
for v in self.principal_axes:
mirror_type = self._find_mirror(v)
if not mirror_type == "":
self.sch_symbol = "Cs"
break | 0.003503 |
def mf_aBl(self):
"""
These are the expected log likelihoods (node potentials)
as seen from the discrete states.
"""
mf_aBl = self._mf_aBl = np.zeros((self.T, self.num_states))
ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, \
self.emission_distns
for idx, (d1, d2, d3) in enumerate(zip(ids, dds, eds)):
mf_aBl[0,idx] = d1.expected_log_likelihood(
stats=self.E_init_stats)
mf_aBl[:-1,idx] += d2.expected_log_likelihood(
stats=self.E_dynamics_stats)
mf_aBl[:,idx] += d3.expected_log_likelihood(
stats=self.E_emission_stats)
mf_aBl[np.isnan(mf_aBl).any(1)] = 0.
return mf_aBl | 0.006579 |
def run_display_profile(self, program_main):
"""Print profile name with programMain.
Args:
program_main (str): The executable name.
"""
install_json = self.profile.get('install_json')
output = 'Profile: '
output += '{}{}{}{} '.format(
c.Style.BRIGHT, c.Fore.CYAN, self.profile.get('profile_name'), c.Style.RESET_ALL
)
output += '[{}{}{}{}'.format(
c.Style.BRIGHT, c.Fore.MAGENTA, program_main, c.Style.RESET_ALL
)
if install_json.get('programVersion') is not None:
output += '{}:{}'.format(c.Style.BRIGHT, c.Style.RESET_ALL)
output += '{}{}{}{}'.format(
c.Style.BRIGHT,
c.Fore.MAGENTA,
install_json.get('programVersion'),
c.Style.RESET_ALL,
)
output += ']'
print(output) | 0.003319 |
def virtual_memory():
'''
.. versionadded:: 2014.7.0
Return a dict that describes statistics about system memory usage.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.virtual_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
return dict(psutil.virtual_memory()._asdict()) | 0.001927 |
def get_likes(self, offset=0, limit=50):
""" Get user's likes. """
response = self.client.get(
self.client.USER_LIKES % (self.name, offset, limit))
return self._parse_response(response, strack) | 0.008734 |
def array2bytes(arr, bytes_type=bytes):
"""Wraps NumPy's save function to return bytes.
We use :func:`numpy.save` rather than :meth:`numpy.ndarray.tobytes` because
it encodes endianness and order.
Args:
arr (:obj:`numpy.ndarray`):
Array to be saved.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
bytes_type
"""
bio = io.BytesIO()
np.save(bio, arr, allow_pickle=False)
return bytes_type(bio.getvalue()) | 0.001302 |
def entries(self, query=None):
"""Fetches all Entries from the Space (up to the set limit, can be modified in `query`).
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = client.entries()
[<Entry[cat] id='happycat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>,
<Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>,
<Entry[cat] id='garfield'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>,
<Entry[cat] id='nyancat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>,
<Entry[human] id='finn'>,
<Entry[dog] id='jake'>]
"""
if query is None:
query = {}
self._normalize_select(query)
return self._get(
self.environment_url('/entries'),
query
) | 0.00313 |
def create_hook(self, auth, repo_name, hook_type, config, events=None, organization=None, active=False):
"""
Creates a new hook, and returns the created hook.
:param auth.Authentication auth: authentication object, must be admin-level
:param str repo_name: the name of the repo for which we create the hook
:param str hook_type: The type of webhook, either "gogs" or "slack"
:param dict config: Settings for this hook (possible keys are
``"url"``, ``"content_type"``, ``"secret"``)
:param list events: Determines what events the hook is triggered for. Default: ["push"]
:param str organization: Organization of the repo
:param bool active: Determines whether the hook is actually triggered on pushes. Default is false
:return: a representation of the created hook
:rtype: GogsRepo.Hook
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
if events is None:
events = ["push"] # default value is mutable, so assign inside body
data = {
"type": hook_type,
"config": config,
"events": events,
"active": active
}
url = "/repos/{o}/{r}/hooks".format(o=organization, r=repo_name) if organization is not None \
else "/repos/{r}/hooks".format(r=repo_name)
response = self.post(url, auth=auth, data=data)
return GogsRepo.Hook.from_json(response.json()) | 0.005671 |
def compute_consistency_score(returns_test, preds):
"""
Compute Bayesian consistency score.
Parameters
----------
returns_test : pd.Series
Observed cumulative returns.
preds : numpy.array
Multiple (simulated) cumulative returns.
Returns
-------
Consistency score
Score from 100 (returns_test perfectly on the median line of the
Bayesian cone spanned by preds) to 0 (returns_test completely
outside of Bayesian cone.)
"""
returns_test_cum = cum_returns(returns_test, starting_value=1.)
cum_preds = np.cumprod(preds + 1, 1)
q = [sp.stats.percentileofscore(cum_preds[:, i],
returns_test_cum.iloc[i],
kind='weak')
for i in range(len(returns_test_cum))]
# normalize to be from 100 (perfect median line) to 0 (completely outside
# of cone)
return 100 - np.abs(50 - np.mean(q)) / .5 | 0.001037 |
def add_source(self, source):
"""Connect the source to all existing other nodes."""
nodes = [n for n in self.nodes() if not isinstance(n, Source)]
source.connect(whom=nodes) | 0.010152 |
def get_dates_file(path):
""" parse dates file of dates and probability of choosing"""
with open(path) as f:
dates = f.readlines()
return [(convert_time_string(date_string.split(" ")[0]), float(date_string.split(" ")[1]))
for date_string in dates] | 0.007168 |
def _split_dimension(text):
"""
Returns the number and unit from the given piece of text as a pair.
>>> _split_dimension('1pt')
(1, 'pt')
>>> _split_dimension('1 pt')
(1, 'pt')
>>> _split_dimension('1 \tpt')
(1, 'pt')
>>> _split_dimension('1 \tpt ')
(1, 'pt')
>>> _split_dimension(' 1 \tpt ')
(1, 'pt')
>>> _split_dimension('3')
(3, None)
>>> _split_dimension('-12.43mm')
(-12.43, 'mm')
>>> _split_dimension('-12.43"')
(-12.43, '"')
"""
match = _dimension_finder.match(text)
if not match:
raise DimensionError("Can't parse dimension '%s'." % text)
number = match.group(1)
unit = match.group(4)
if '.' in number:
return (float(number), unit)
else:
return (int(number), unit) | 0.001245 |
def _get_valid_indices(shape, ix0, ix1, iy0, iy1):
"""Give array shape and desired indices, return indices that are
correctly bounded by the shape."""
ymax, xmax = shape
if ix0 < 0:
ix0 = 0
if ix1 > xmax:
ix1 = xmax
if iy0 < 0:
iy0 = 0
if iy1 > ymax:
iy1 = ymax
if iy1 <= iy0 or ix1 <= ix0:
raise IndexError(
'array[{0}:{1},{2}:{3}] is invalid'.format(iy0, iy1, ix0, ix1))
return list(map(int, [ix0, ix1, iy0, iy1])) | 0.001972 |
def list_tables(self):
'''
Load existing tables and their descriptions.
:return:
'''
if not self._tables:
for table_name in os.listdir(self.db_path):
self._tables[table_name] = self._load_table(table_name)
return self._tables.keys() | 0.006452 |
def dropAssayFromStudy(assayNum, studyNum, pathToISATABFile):
"""
This function removes an Assay from a study in an ISA file
Typically, you should use the exploreISA function to check the contents
of the ISA file and retrieve the assay and study numbers you are interested in!
:param assayNum: The Assay number (notice it's 1-based index).
:type assayNum: int
:param studyNum: The Study number (notice it's 1-based index).
:type studyNum: int
:param pathToISATABFile: The path to the ISATAB file
:type pathToISATABFile: string
:raise FileNotFoundError: If pathToISATABFile does not contain file 'i_Investigation.txt'.
"""
from isatools import isatab
import os
try:
isa = isatab.load(pathToISATABFile, skip_load_tables=True)
std = isa.studies[studyNum - 1]
assays = std.assays
if os.path.isfile(os.path.join(pathToISATABFile,assays[assayNum - 1].filename)):
os.remove(os.path.join(pathToISATABFile,assays[assayNum - 1].filename))
del assays[assayNum - 1]
isatab.dump(isa_obj=isa, output_path=pathToISATABFile)
except FileNotFoundError as err:
raise err | 0.005917 |
def elements_to_kwargs(elements, fix_texture, image):
"""
Given an elements data structure, extract the keyword
arguments that a Trimesh object constructor will expect.
Parameters
------------
elements: OrderedDict object, with fields and data loaded
Returns
-----------
kwargs: dict, with keys for Trimesh constructor.
eg: mesh = trimesh.Trimesh(**kwargs)
"""
kwargs = {'metadata': {'ply_raw': elements}}
vertices = np.column_stack([elements['vertex']['data'][i]
for i in 'xyz'])
if not util.is_shape(vertices, (-1, 3)):
raise ValueError('Vertices were not (n,3)!')
try:
face_data = elements['face']['data']
except (KeyError, ValueError):
# some PLY files only include vertices
face_data = None
faces = None
# what keys do in-the-wild exporters use for vertices
index_names = ['vertex_index',
'vertex_indices']
texcoord = None
if util.is_shape(face_data, (-1, (3, 4))):
faces = face_data
elif isinstance(face_data, dict):
# get vertex indexes
for i in index_names:
if i in face_data:
faces = face_data[i]
break
# if faces have UV coordinates defined use them
if 'texcoord' in face_data:
texcoord = face_data['texcoord']
elif isinstance(face_data, np.ndarray):
face_blob = elements['face']['data']
# some exporters set this name to 'vertex_index'
# and some others use 'vertex_indices' but we really
# don't care about the name unless there are multiple
if len(face_blob.dtype.names) == 1:
name = face_blob.dtype.names[0]
elif len(face_blob.dtype.names) > 1:
# loop through options
for i in face_blob.dtype.names:
if i in index_names:
name = i
break
# get faces
faces = face_blob[name]['f1']
try:
texcoord = face_blob['texcoord']['f1']
except (ValueError, KeyError):
# accessing numpy arrays with named fields
# incorrectly is a ValueError
pass
# PLY stores texture coordinates per- face which is
# slightly annoying, as we have to then figure out
# which vertices have the same position but different UV
expected = (faces.shape[0], faces.shape[1] * 2)
if (image is not None and
texcoord is not None and
texcoord.shape == expected):
# vertices with the same position but different
# UV coordinates can't be merged without it
# looking like it went through a woodchipper
# in- the- wild PLY comes with things merged that
# probably shouldn't be so disconnect vertices
if fix_texture:
# reshape to correspond with flattened faces
uv = texcoord.reshape((-1, 2))
# round UV to OOM 10^4 as they are pixel coordinates
# and more precision is not necessary or desirable
search = np.column_stack((
vertices[faces.reshape(-1)],
(uv * 1e4).round()))
# find vertices which have the same position AND UV
unique, inverse = grouping.unique_rows(search)
# set vertices, faces, and UV to the new values
vertices = search[:, :3][unique]
faces = inverse.reshape((-1, 3))
uv = uv[unique]
else:
# don't alter vertices, UV will look like crap
# if it was exported with vertices merged
uv = np.zeros((len(vertices), 2))
uv[faces.reshape(-1)] = texcoord.reshape((-1, 2))
# create the visuals object for the texture
kwargs['visual'] = visual.texture.TextureVisuals(
uv=uv, image=image)
# kwargs for Trimesh or PointCloud
kwargs.update({'faces': faces,
'vertices': vertices})
# if both vertex and face color are defined pick the one
# with the most going on
colors = []
signal = []
if kwargs['faces'] is not None:
f_color, f_signal = element_colors(elements['face'])
colors.append({'face_colors': f_color})
signal.append(f_signal)
if kwargs['vertices'] is not None:
v_color, v_signal = element_colors(elements['vertex'])
colors.append({'vertex_colors': v_color})
signal.append(v_signal)
# add the winning colors to the result
kwargs.update(colors[np.argmax(signal)])
return kwargs | 0.000216 |
def confd_state_ha_node_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
ha = ET.SubElement(confd_state, "ha")
node_id = ET.SubElement(ha, "node-id")
node_id.text = kwargs.pop('node_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006508 |
def search_for_devices_by_serial_number(self, sn):
"""
Returns a list of device objects that match the serial number
in param 'sn'.
This will match partial serial numbers.
"""
import re
sn_search = re.compile(sn)
matches = []
for dev_o in self.get_all_devices_in_portal():
# print("Checking {0}".format(dev_o['sn']))
try:
if sn_search.match(dev_o['sn']):
matches.append(dev_o)
except TypeError as err:
print("Problem checking device {!r}: {!r}".format(
dev_o['info']['description']['name'],
str(err)))
return matches | 0.002591 |
def file_is_attached(self, url):
'''return true if at least one book has
file with the given url as attachment
'''
body = self._get_search_field('_attachments.url', url)
return self.es.count(index=self.index_name, body=body)['count'] > 0 | 0.007143 |
def makenex(assembly, names, longname, partitions):
""" PRINT NEXUS """
## make nexus output
data = iter(open(os.path.join(assembly.dirs.outfiles, assembly.name+".phy" ), 'r' ))
nexout = open(os.path.join(assembly.dirs.outfiles, assembly.name+".nex" ), 'wb' )
ntax, nchar = data.next().strip().split(" ")
print >>nexout, "#NEXUS"
print >>nexout, "BEGIN DATA;"
print >>nexout, " DIMENSIONS NTAX=%s NCHAR=%s;" % (ntax,nchar)
print >>nexout, " FORMAT DATATYPE=DNA MISSING=N GAP=- INTERLEAVE=YES;"
print >>nexout, " MATRIX"
idict = {}
## read in max 1M bp at a time
for line in data:
tax, seq = line.strip().split()
idict[tax] = seq[0:100000]
del line
nameorder = idict.keys()
nameorder.sort()
n=0
tempn=0
sz = 100
while n < len(seq):
for tax in nameorder:
print >>nexout, " "+tax+" "*\
((longname-len(tax))+3)+\
idict[tax][tempn:tempn+sz]
n += sz
tempn += sz
print >>nexout, ""
if not n % 100000:
#print idict[tax][tempn:tempn+sz]
idict = update(assembly, idict, n)
tempn -= 100000
print >>nexout, ';'
print >>nexout, 'END;'
### partitions info
print >>nexout, "BEGIN SETS;"
for partition in partitions:
print >>nexout, " CHARSET %s;" % (partition)
print >>nexout, "END;"
nexout.close() | 0.012056 |
def clean(self, py_value):
"""
Cleans the value before storing it.
:param: py_value : <str>
:return: <str>
"""
try:
from webhelpers.text import strip_tags
return strip_tags(py_value)
except ImportError:
warnings.warn('Unable to clean string column without webhelpers installed.')
return py_value | 0.007353 |
def options_request(
self,
alias,
uri,
headers=None,
allow_redirects=None,
timeout=None):
""" Send an OPTIONS request on the session object found using the
given `alias`
``alias`` that will be used to identify the Session object in the cache
``uri`` to send the OPTIONS request to
``allow_redirects`` Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
``headers`` a dictionary of headers to use with the request
"""
session = self._cache.switch(alias)
redir = True if allow_redirects is None else allow_redirects
response = self._options_request(session, uri, headers, redir, timeout)
logger.info(
'Options Request using : alias=%s, uri=%s, headers=%s, allow_redirects=%s ' %
(alias, uri, headers, redir))
return response | 0.004264 |
def create_account_user(self, account_id, body, **kwargs): # noqa: E501
"""Create a new user. # noqa: E501
An endpoint for creating or inviting a new user to the account. In case of invitation email address is used only, other attributes are set in the 2nd step. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users -d {\"email\": \"[email protected]\"} -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_account_user(account_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param UserInfoReq body: A user object with attributes. (required)
:param str action: Create or invite user.
:return: UserInfoResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_account_user_with_http_info(account_id, body, **kwargs) # noqa: E501
else:
(data) = self.create_account_user_with_http_info(account_id, body, **kwargs) # noqa: E501
return data | 0.001378 |
def run(self):
"""
Start queueing the chain to the worker cluster
:return: the chain's group id
"""
self.group = async_chain(chain=self.chain[:], group=self.group, cached=self.cached, sync=self.sync,
broker=self.broker)
self.started = True
return self.group | 0.008671 |
def remove_sensor(self, sensor):
"""Remove a sensor from the device.
Also deregisters all clients observing the sensor.
Parameters
----------
sensor : Sensor object or name string
The sensor to remove from the device server.
"""
if isinstance(sensor, basestring):
sensor_name = sensor
else:
sensor_name = sensor.name
sensor = self._sensors.pop(sensor_name)
def cancel_sensor_strategies():
for conn_strategies in self._strategies.values():
strategy = conn_strategies.pop(sensor, None)
if strategy:
strategy.cancel()
self.ioloop.add_callback(cancel_sensor_strategies) | 0.002642 |
def make_serviceitem_servicedll(servicedll, condition='contains', negate=False, preserve_case=False):
"""
Create a node for ServiceItem/serviceDLL
:return: A IndicatorItem represented as an Element node
"""
document = 'ServiceItem'
search = 'ServiceItem/serviceDLL'
content_type = 'string'
content = servicedll
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | 0.009025 |
def _parse_type_rule(ctype, typespec):
"""
Parse a content type rule. Unlike the other rules, content type
rules are more complex, since both selected content type and API
version must be expressed by one rule. The rule is split on
whitespace, then the components beginning with "type:" and
"version:" are selected; in both cases, the text following the ":"
character will be treated as a format string, which will be
formatted using a content parameter dictionary. Components
beginning with "param:" specify key="quoted value" pairs that
specify parameters; these parameters are ignored by AVersion, but
may be used by the application.
:param ctype: The content type the rule is for.
:param typespec: The rule text, described above.
:returns: An instance of TypeRule.
"""
params = {'param': {}}
for token in quoted_split(typespec, ' ', quotes='"\''):
if not token:
continue
tok_type, _sep, tok_val = token.partition(':')
# Validate the token type
if not tok_val:
LOG.warn("%s: Invalid type token %r" % (ctype, token))
continue
elif tok_type not in ('type', 'version', 'param'):
LOG.warn("%s: Unrecognized token type %r" % (ctype, tok_type))
continue
# Intercept 'param' clauses
if tok_type == 'param':
key, _eq, value = tok_val.partition('=')
# Set the parameter key
_set_key('type.%s' % ctype, params['param'], key, value)
continue
# Set the token value
_set_key('type.%s' % ctype, params, tok_type, tok_val,
desc="token type")
return TypeRule(ctype=params.get('type'),
version=params.get('version'),
params=params['param']) | 0.000541 |
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed | 0.00314 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.