text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def save_token(self):
"""
Saves the token dict in the specified file
:return bool: Success / Failure
"""
if self.token is None:
raise ValueError('You have to set the "token" first.')
try:
if not self.token_path.parent.exists():
self.token_path.parent.mkdir(parents=True)
except Exception as e:
log.error('Token could not be saved: {}'.format(str(e)))
return False
with self.token_path.open('w') as token_file:
# 'indent = True' will make the file human readable
self.serializer.dump(self.token, token_file, indent=True)
return True | 0.002882 |
def enableBranch(self, enabled):
""" Sets the enabled member to True or False for a node and all it's children
"""
self.enabled = enabled
# Disabled children and further descendants
enabled = enabled and self.data != self.childrenDisabledValue
for child in self.childItems:
child.enableBranch(enabled) | 0.008264 |
def visit_expr(self, node, parent):
"""visit a Expr node by returning a fresh instance of it"""
newnode = nodes.Expr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode | 0.007968 |
def tablib_export_action(modeladmin, request, queryset, file_type="xls"):
"""
Allow the user to download the current filtered list of items
:param file_type:
One of the formats supported by tablib (e.g. "xls", "csv", "html",
etc.)
"""
dataset = SimpleDataset(queryset, headers=None)
filename = '{0}.{1}'.format(
smart_str(modeladmin.model._meta.verbose_name_plural), file_type)
response_kwargs = {
'content_type': get_content_type(file_type)
}
response = HttpResponse(getattr(dataset, file_type), **response_kwargs)
response['Content-Disposition'] = 'attachment; filename={0}'.format(
filename)
return response | 0.001433 |
def shiftAccent(self, shiftAmount):
'''
Move the whole accent earlier or later
'''
if shiftAmount == 0:
return
self.pointList = [(time + shiftAmount, pitch)
for time, pitch in self.pointList]
# Update shift amounts
if shiftAmount < 0:
self.netLeftShift += shiftAmount
elif shiftAmount >= 0:
self.netRightShift += shiftAmount | 0.008547 |
def transformProperMotions(self, phi, theta, muphistar, mutheta):
"""
Converts proper motions from one reference system to another, using the prescriptions in section
1.5.3 of the Hipparcos Explanatory Volume 1 (equations 1.5.18, 1.5.19).
Parameters
----------
phi - The longitude-like angle of the position of the source (radians).
theta - The latitude-like angle of the position of the source (radians).
muphistar - Value of the proper motion in the longitude-like angle, multiplied by cos(latitude).
mutheta - Value of the proper motion in the latitude-like angle.
Returns
-------
muphistarrot - Value of the transformed proper motion in the longitude-like angle (including the
cos(latitude) factor).
muthetarot - Value of the transformed proper motion in the latitude-like angle.
"""
c, s = self._getJacobian(phi,theta)
return c*muphistar+s*mutheta, c*mutheta-s*muphistar | 0.008746 |
def set_user_password(self, uid, mode='set_password', password=None):
"""Set user password and (modes)
:param uid: id number of user. see: get_names_uid()['name']
:param mode:
disable = disable user connections
enable = enable user connections
set_password = set or ensure password
test_password = test password is correct
:param password: max 16 char string
(optional when mode is [disable or enable])
:return:
True on success
when mode = test_password, return False on bad password
"""
mode_mask = {
'disable': 0,
'enable': 1,
'set_password': 2,
'test_password': 3
}
data = [uid, mode_mask[mode]]
if password:
password = str(password)
if 21 > len(password) > 16:
password = password.ljust(20, b'\x00')
data[0] |= 0b10000000
elif len(password) > 20:
raise Exception('password has limit of 20 chars')
else:
password = password.ljust(16, "\x00")
data.extend([ord(x) for x in password])
try:
self.xraw_command(netfn=0x06, command=0x47, data=data)
except exc.IpmiException as ie:
if mode == 'test_password':
return False
elif mode in ('enable', 'disable') and ie.ipmicode == 0xcc:
# Some BMCs see redundant calls to password disable/enable
# as invalid
return True
raise
return True | 0.001198 |
def compute_pmf(fsamps, y, **kwargs):
""" Compute the pmf defined by fsamps at each x for each y.
Parameters
----------
fsamps: 2D array-like
array of function samples, as returned by
:func:`fgivenx.compute_samples`
y: 1D array-like
y values to evaluate the PMF at
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
Returns
-------
2D numpy.array
probability mass function at each x for each y
`shape=(len(fsamps),len(y)`
"""
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_masses')
try:
return cache.check(fsamps, y)
except CacheException as e:
print(e)
masses = parallel_apply(PMF, fsamps, postcurry=(y,), parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
masses = numpy.array(masses).transpose().copy()
if cache:
cache.save(fsamps, y, masses)
return masses | 0.000845 |
def get_field_kwargs(self):
"""
Return a dict of arguments to use as parameters for the field
class instianciation.
This will use :py:attr:`field_kwargs` as a starter,
and use sensible defaults for a few attributes:
- :py:attr:`instance.verbose_name` for the field label
- :py:attr:`instance.help_text` for the field help text
- :py:attr:`instance.widget` for the field widget
- :py:attr:`instance.required` defined if the value is required or not
- :py:attr:`instance.initial` defined if the initial value
"""
kwargs = self.field_kwargs.copy()
kwargs.setdefault('label', self.get('verbose_name'))
kwargs.setdefault('help_text', self.get('help_text'))
kwargs.setdefault('widget', self.get('widget'))
kwargs.setdefault('required', self.get('required'))
kwargs.setdefault('initial', self.initial)
kwargs.setdefault('validators', [])
kwargs['validators'].append(self.validate)
return kwargs | 0.001907 |
def connect(self,
engine: str = None,
interface: str = None,
host: str = None,
port: int = None,
database: str = None,
driver: str = None,
dsn: str = None,
odbc_connection_string: str = None,
user: str = None,
password: str = None,
autocommit: bool = True,
charset: str = "utf8",
use_unicode: bool = True) -> bool:
"""
engine: access, mysql, sqlserver
interface: mysql, odbc, jdbc
"""
# Catch all exceptions, so the error-catcher never shows a password.
# Note also that higher-level things may catch exceptions, so use the
# logger as well.
try:
return self._connect(
engine=engine, interface=interface,
host=host, port=port, database=database,
driver=driver, dsn=dsn,
odbc_connection_string=odbc_connection_string,
user=user, password=password,
autocommit=autocommit, charset=charset,
use_unicode=use_unicode)
except Exception as e:
self.reraise_connection_exception(e) | 0.011574 |
def get(self, key):
"""
Returns an individual Location by query lookup, e.g. address or point.
"""
if isinstance(key, tuple):
# TODO handle different ordering
try:
x, y = float(key[0]), float(key[1])
except IndexError:
raise ValueError("Two values are required for a coordinate pair")
except ValueError:
raise ValueError("Only float or float-coercable values can be passed")
key = "{0},{1}".format(x, y)
return self[self.lookups[key]] | 0.006826 |
def interpolate_nearest(self, xi, yi, zdata):
"""
Nearest-neighbour interpolation.
Calls nearnd to find the index of the closest neighbours to xi,yi
Parameters
----------
xi : float / array of floats, shape (l,)
x coordinates on the Cartesian plane
yi : float / array of floats, shape (l,)
y coordinates on the Cartesian plane
Returns
-------
zi : float / array of floats, shape (l,)
nearest-neighbour interpolated value(s) of (xi,yi)
"""
if zdata.size != self.npoints:
raise ValueError('zdata should be same size as mesh')
zdata = self._shuffle_field(zdata)
ist = np.ones_like(xi, dtype=np.int32)
ist, dist = _tripack.nearnds(xi, yi, ist, self._x, self._y,
self.lst, self.lptr, self.lend)
return zdata[ist - 1] | 0.002148 |
def set_(schema=None, key=None, user=None, value=None, **kwargs):
'''
Set key in a particular GNOME schema
CLI Example:
.. code-block:: bash
salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False
'''
_gsession = _GSettings(user=user, schema=schema, key=key)
return _gsession._set(value) | 0.005222 |
def gen_ref_docs(gen_index=False):
# type: (int, bool) -> None
""" Generate reference documentation for the project.
This will use **sphinx-refdoc** to generate the source .rst files for the
reference documentation.
Args:
gen_index (bool):
Set it to **True** if you want to generate the index file with the
list of top-level packages. This is set to default as in most cases
you only have one package per project so you can link directly to
that package reference (and if index were generated sphinx would
complain about file not included in toctree).
"""
try:
from refdoc import generate_docs
except ImportError as ex:
msg = ("You need to install sphinx-refdoc if you want to generate "
"code reference docs.")
print(msg, file=sys.stderr)
log.err("Exception: {}".format(ex))
sys.exit(-1)
pretend = context.get('pretend', False)
docs_dir = conf.get_path('docs.path', 'docs')
docs_ref_dir = os.path.join(docs_dir, 'ref')
refdoc_paths = conf.get('docs.reference', [])
if os.path.exists(docs_ref_dir):
if not pretend:
log.info('Removing existing reference docs')
shutil.rmtree(docs_ref_dir)
else:
log.info('Would remove old reference docs')
args = {
'out_dir': docs_ref_dir,
'verbose': context.get('verbose', 0),
}
if gen_index:
args['gen_index'] = True
pkg_paths = [conf.proj_path(p) for p in refdoc_paths]
if not pretend:
log.info('Generating reference documentation')
generate_docs(pkg_paths, **args)
else:
log.info("Would generate reference docs with the following params")
shell.cprint('<90>{}', util.yaml_dump(args).rstrip())
shell.cprint('<90>paths:\n<34>{}', util.yaml_dump(pkg_paths).rstrip()) | 0.00052 |
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
# One iteration changes the opinion of several voters using the following procedure:
# - select randomly one voter (speaker 1)
# - select randomly one of its neighbours (speaker 2)
# - if the two voters agree, their neighbours take their opinion
self.clean_initial_status(self.available_statuses.values())
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {"iteration": 0, "status": self.status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
delta = {}
status_delta = {st: 0 for st in self.available_statuses.values()}
# select a random node
speaker1 = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# select a random neighbour
neighbours = list(self.graph.neighbors(speaker1))
if isinstance(self.graph, nx.DiGraph):
# add also the predecessors
neighbours += list(self.graph.predecessors(speaker1))
speaker2 = neighbours[np.random.randint(0, len(neighbours))]
if self.status[speaker1] == self.status[speaker2]:
# select listeners (all neighbours of two speakers)
neighbours = list(self.graph.neighbors(speaker1)) + list(self.graph.neighbors(speaker2))
if isinstance(self.graph, nx.DiGraph):
# assumed if a->b then b can be influenced by a
# but not the other way around - the link between the speakers doesn't matter
neighbours = list(self.graph.successors(speaker1)) + list(self.graph.successors(speaker2))
# update status of listeners
for listener in neighbours:
if self.status[speaker1] != self.status[listener]:
delta[listener] = self.status[speaker1]
status_delta[self.status[listener]] += 1
for x in self.available_statuses.values():
if x != self.status[listener]:
status_delta[x] -= 1
self.status[listener] = self.status[speaker1]
node_count = {st: len([n for n in self.status if self.status[n] == st])
for st in self.available_statuses.values()}
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | 0.004078 |
def get_issue(issue_number, repo_name=None, profile='github', output='min'):
'''
Return information about a single issue in a named repository.
.. versionadded:: 2016.11.0
issue_number
The number of the issue to retrieve.
repo_name
The name of the repository from which to get the issue. This argument is
required, either passed via the CLI, or defined in the configured
profile. A ``repo_name`` passed as a CLI argument will override the
repo_name defined in the configured profile, if provided.
profile
The name of the profile configuration to use. Defaults to ``github``.
output
The amount of data returned by each issue. Defaults to ``min``. Change
to ``full`` to see all issue output.
CLI Example:
.. code-block:: bash
salt myminion github.get_issue 514
salt myminion github.get_issue 514 repo_name=salt
'''
org_name = _get_config_value(profile, 'org_name')
if repo_name is None:
repo_name = _get_config_value(profile, 'repo_name')
action = '/'.join(['repos', org_name, repo_name])
command = 'issues/' + six.text_type(issue_number)
ret = {}
issue_data = _query(profile, action=action, command=command)
issue_id = issue_data.get('id')
if output == 'full':
ret[issue_id] = issue_data
else:
ret[issue_id] = _format_issue(issue_data)
return ret | 0.001388 |
def memoized_method(func):
"""
A decorator that performs memoization on methods. It stores the cache on the object instance itself.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
assert func.__name__ in dir(self), "memoized_method can only be used on method!"
if not hasattr(self, '_MEMOIZED_CACHE'):
cache = self._MEMOIZED_CACHE = {}
else:
cache = self._MEMOIZED_CACHE
key = (func, ) + args[1:] + tuple(kwargs)
ret = cache.get(key, None)
if ret is not None:
return ret
value = func(*args, **kwargs)
cache[key] = value
return value
return wrapper | 0.004196 |
def table(tab):
"""Access IPTables transactionally in a uniform way.
Ensures all access is done without autocommit and that only the outer
most task commits, and also ensures we refresh once and commit once.
"""
global open_tables
if tab in open_tables:
yield open_tables[tab]
else:
open_tables[tab] = iptc.Table(tab)
open_tables[tab].refresh()
open_tables[tab].autocommit = False
yield open_tables[tab]
open_tables[tab].commit()
del open_tables[tab] | 0.001869 |
def fetch(self, failures=True, wait=0):
"""
get the task result objects from the chain when it finishes. blocks until timeout.
:param failures: include failed tasks
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of task objects
"""
if self.started:
return fetch_group(self.group, failures=failures, wait=wait, count=self.length(), cached=self.cached) | 0.008753 |
def success_redirect(self, msg='', log=''):
'''
Shortcut for redirecting Django view to LTI Consumer with messages
'''
from django.shortcuts import redirect
self.lti_msg = msg
self.lti_log = log
return redirect(self.build_return_url()) | 0.006873 |
def _get_list_of_completed_locales(product, channel):
""" Get all the translated locales supported by Google play
So, locale unsupported by Google play won't be downloaded
Idem for not translated locale
"""
return utils.load_json_url(_ALL_LOCALES_URL.format(product=product, channel=channel)) | 0.00641 |
def select_python_parser(parser=None):
"""
Select default parser for loading and refactoring steps. Passing `redbaron` as argument
will select the old paring engine from v0.3.3
Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our
best to make sure there is no user impact on users. However, there may be regressions with
new parser backend.
To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property
to the `python.properties` file in the `<PROJECT_DIR>/env/default directory.
This property along with the redbaron parser will be removed in future releases.
"""
if parser == 'redbaron' or os.environ.get('GETGAUGE_USE_0_3_3_PARSER'):
PythonFile.Class = RedbaronPythonFile
else:
PythonFile.Class = ParsoPythonFile | 0.008859 |
def response_address(self, beacon_config, request, client_address):
""" :meth:`.WBeaconMessengerBase.response_address` method implementation. It just removes host group names
part and return :meth:`.WBeaconMessengerBase.response_address` result
"""
si = self._message_hostgroup_parse(request)[1]
address = si.address()
port = si.port()
return WIPV4SocketInfo(
address if address is not None else client_address.address(),
port if port is not None else client_address.port()
) | 0.026157 |
def deploy_to_s3(self):
"""
Deploy a directory to an s3 bucket.
"""
self.tempdir = tempfile.mkdtemp('s3deploy')
for keyname, absolute_path in self.find_file_paths():
self.s3_upload(keyname, absolute_path)
shutil.rmtree(self.tempdir, True)
return True | 0.00625 |
def authenticate_with_access_token(access_token):
"""Authenticate using an existing access token."""
credentials = Credentials(access_token=access_token)
client = YamcsClient('localhost:8090', credentials=credentials)
for link in client.list_data_links('simulator'):
print(link) | 0.0033 |
def unimplemented_abstract_methods(
node: astroid.node_classes.NodeNG, is_abstract_cb: astroid.FunctionDef = None
) -> Dict[str, astroid.node_classes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited = {} # type: Dict[str, astroid.node_classes.NodeNG]
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
infered = obj
if isinstance(obj, astroid.AssignName):
infered = safe_infer(obj)
if not infered:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(infered, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(infered, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(infered)
if abstract:
visited[obj.name] = infered
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited | 0.000801 |
def search(self, *filters, **kwargs):
"""Shortcut to generate a new temporary search report using provided filters and return the resulting records
Args:
*filters (tuple): Zero or more filter tuples of (field_name, operator, field_value)
Keyword Args:
keywords (list(str)): List of strings of keywords to use in report search
limit (int): Set maximum number of returned Records, defaults to `Report.default_limit`. Set to 0 to return
all records
Notes:
Uses a temporary Report instance with a random name to facilitate search. Records are normally paginated,
but are returned as a single list here, potentially causing performance issues with large searches.
All provided filters are AND'ed together
Filter operators are available as constants in `swimlane.core.search`
Examples:
::
# Return records matching all filters with default limit
from swimlane.core import search
records = app.records.search(
('field_name', 'equals', 'field_value'),
('other_field', search.NOT_EQ, 'value')
)
::
# Run keyword search with multiple keywords
records = app.records.search(keywords=['example', 'test'])
::
# Return all records from app
records = app.records.search(limit=0)
Returns:
:class:`list` of :class:`~swimlane.core.resources.record.Record`: List of Record instances returned from the
search results
"""
report = self._app.reports.build(
'search-' + random_string(8),
keywords=kwargs.pop('keywords', []),
limit=kwargs.pop('limit', Report.default_limit)
)
for filter_tuples in filters:
report.filter(*filter_tuples)
return list(report) | 0.00496 |
def create_object(self, type: Union[type, str], **constructor_kwargs):
"""
Instantiate a plugin.
The entry points in this namespace must point to subclasses of the ``base_class`` parameter
passed to this container.
:param type: an entry point identifier, a ``module:varname`` reference to a class, or an
actual class object
:param constructor_kwargs: keyword arguments passed to the constructor of the plugin class
:return: the plugin instance
"""
assert check_argument_types()
assert self.base_class, 'base class has not been defined'
plugin_class = self.resolve(type)
if not issubclass(plugin_class, self.base_class):
raise TypeError('{} is not a subclass of {}'.format(
qualified_name(plugin_class), qualified_name(self.base_class)))
return plugin_class(**constructor_kwargs) | 0.0054 |
def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src | 0.001727 |
def from_response(cls, response, attrs):
""" Create an index from returned Dynamo data """
proj = response['Projection']
index = cls(proj['ProjectionType'], response['IndexName'],
attrs[response['KeySchema'][1]['AttributeName']],
proj.get('NonKeyAttributes'))
index.response = response
return index | 0.005291 |
def destination_from_source(sources, use_glob=True):
"""
Split each of the sources in the array on ':'
First part will be source, second will be destination.
Modifies the the original array to contain only sources
and returns an array of destinations.
"""
destinations = []
newsources = []
for i in range(0, len(sources)):
srcdst = sources[i].split(':')
if len(srcdst) == 2:
destinations.append(srcdst[1])
newsources.append(srcdst[0]) #proper list assignment
else:
if use_glob:
listing = glob.glob(srcdst[0])
for filename in listing:
newsources.append(filename)
#always use forward slash at destination
destinations.append(filename.replace('\\', '/'))
else:
newsources.append(srcdst[0])
destinations.append(srcdst[0])
return [newsources, destinations] | 0.004049 |
def _get_extended_palette_entry(self, name, index, is_hex=False):
''' Compute extended entry, once on the fly. '''
values = None
is_fbterm = (env.TERM == 'fbterm') # sigh
if 'extended' in self._palette_support: # build entry
if is_hex:
index = str(find_nearest_color_hexstr(index,
method=self._dg_method))
start_codes = self._start_codes_extended
if is_fbterm:
start_codes = self._start_codes_extended_fbterm
values = [start_codes, index]
# downgrade section
elif 'basic' in self._palette_support:
if is_hex:
nearest_idx = find_nearest_color_hexstr(index, color_table4,
method=self._dg_method)
else:
from .color_tables import index_to_rgb8 # find rgb for idx
nearest_idx = find_nearest_color_index(*index_to_rgb8[index],
color_table=color_table4,
method=self._dg_method)
values = self._index_to_ansi_values(nearest_idx)
return (self._create_entry(name, values, fbterm=is_fbterm)
if values else empty) | 0.002201 |
def wrap(cls, url):
"""Given a url that is either a string or :class:`Link`, return a :class:`Link`.
:param url: A string-like or :class:`Link` object to wrap.
:returns: A :class:`Link` object wrapping the url.
"""
if isinstance(url, cls):
return url
elif isinstance(url, compatible_string):
return cls(url)
else:
raise ValueError('url must be either a string or Link.') | 0.01199 |
def to_neo(self,index_label='N',time_label=0,name='segment of exported spikes',index=0):
"""
Returns a `neo` Segment containing the spike trains.
Example usage::
import quantities as pq
seg = sp.to_neo()
fig = pyplot.figure()
trains = [st.rescale('s').magnitude for st in seg.spiketrains]
colors = pyplot.cm.jet(np.linspace(0, 1, len(seg.spiketrains)))
gca().eventplot(trains, colors=colors)
gca().set_title(seg.file_origin)
f = neo.io.AsciiSpikeTrainIO('a_spike_file.txt')
f.write_segment(seg)
"""
import neo
from quantities import s
seq = neo.Segment(name=name,index=index)
t_start = None
t_stop = None
if self.min_t is not None:
t_start = convert_time(self.min_t,from_units=self.units,to_units='s')*s
if self.max_t is not None:
t_stop = convert_time(self.max_t,from_units=self.units,to_units='s')*s
for train in self.generate(index_label):
seq.spiketrains.append(neo.SpikeTrain(train.spike_times.get_converted(time_label,'s')[1]*s,t_start=t_start,t_stop=t_stop))
return seq | 0.014129 |
def fetch_items(self, category, **kwargs):
"""Fetch the messages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Looking for messages from '%s' on '%s' since %s",
self.uri, self.dirpath, str(from_date))
mailing_list = MailingList(self.uri, self.dirpath)
messages = self._fetch_and_parse_messages(mailing_list, from_date)
for message in messages:
yield message
logger.info("Fetch process completed") | 0.003135 |
def get_country_by_name(self, country_name) -> 'Country':
"""
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
"""
VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name] | 0.003295 |
def references(self):
"""
Returns the joined DataFrame of references and repositories.
>>> refs_df = repos_df.references
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getReferences(),
self._session, self._implicits) | 0.005952 |
def gemset_list_all(runas=None):
'''
List all gemsets for all installed rubies.
Note that you must have set a default ruby before this can work.
runas
The user under which to run rvm. If not specified, then rvm will be run
as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rvm.gemset_list_all
'''
gemsets = {}
current_ruby = None
output = _rvm_do('default', ['rvm', 'gemset', 'list_all'], runas=runas)
if output:
gems_regex = re.compile('^ ([^ ]+)')
gemset_regex = re.compile('^gemsets for ([^ ]+)')
for line in output.splitlines():
match = gemset_regex.match(line)
if match:
current_ruby = match.group(1)
gemsets[current_ruby] = []
match = gems_regex.match(line)
if match:
gemsets[current_ruby].append(match.group(1))
return gemsets | 0.001035 |
def tree(self):
"""Like tree view mode
"""
self.msg.template(78)
print("| Dependencies\n"
"| -- Packages")
self.msg.template(78)
self.data()
for pkg, dep in self.dmap.iteritems():
print("+ {0}{1}{2}".format(self.green, pkg, self.endc))
print("|")
for d in dep:
print("+-- {0}".format(d))
print("|")
sys.stdout.write("\x1b[1A{0}\n".format(" "))
sys.stdout.flush()
self.summary()
if self.image:
Graph(self.image).dependencies(self.dmap) | 0.003205 |
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(utcnow.override_time is not None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta | 0.003546 |
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
return raw | 0.002309 |
def get_protein_refs(self, hms_lincs_id):
"""Get the refs for a protein from the LINCs protein metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID for the protein
Returns
-------
dict
A dictionary of protein references.
"""
# TODO: We could get phosphorylation states from the protein data.
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._prot_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
mappings = dict(egid='Gene ID', up='UniProt ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs | 0.002509 |
def ENUM_DECL(self, cursor):
"""Gets the enumeration declaration."""
name = self.get_unique_name(cursor)
if self.is_registered(name):
return self.get_registered(name)
align = cursor.type.get_align()
size = cursor.type.get_size()
obj = self.register(name, typedesc.Enumeration(name, size, align))
self.set_location(obj, cursor)
self.set_comment(obj, cursor)
# parse all children
for child in cursor.get_children():
self.parse_cursor(child) # FIXME, where is the starElement
return obj | 0.003356 |
def Regions(self,
skip_mapped_files=False,
skip_shared_regions=False,
skip_executable_regions=False,
skip_readonly_regions=False):
"""Returns an iterator over the readable regions for this process."""
try:
maps_file = open("/proc/" + str(self.pid) + "/maps", "r")
except OSError as e:
raise process_error.ProcessError(e)
with maps_file:
for line in maps_file:
m = self.maps_re.match(line)
if not m:
continue
start = int(m.group(1), 16)
end = int(m.group(2), 16)
region_protec = m.group(3)
inode = int(m.group(6))
if "r" in region_protec:
if skip_mapped_files and inode != 0:
continue
if skip_shared_regions and "s" in region_protec:
continue
if skip_executable_regions and "x" in region_protec:
continue
if skip_readonly_regions and "w" not in region_protec:
continue
yield start, end - start | 0.013359 |
def configure(self, sampling=None, plugins=None,
context_missing=None, sampling_rules=None,
daemon_address=None, service=None,
context=None, emitter=None, streaming=None,
dynamic_naming=None, streaming_threshold=None,
max_trace_back=None, sampler=None,
stream_sql=True):
"""Configure global X-Ray recorder.
Configure needs to run before patching thrid party libraries
to avoid creating dangling subsegment.
:param bool sampling: If sampling is enabled, every time the recorder
creates a segment it decides whether to send this segment to
the X-Ray daemon. This setting is not used if the recorder
is running in AWS Lambda. The recorder always respect the incoming
sampling decisions regardless of this setting.
:param sampling_rules: Pass a set of local custom sampling rules.
Can be an absolute path of the sampling rule config json file
or a dictionary that defines those rules. This will also be the
fallback rules in case of centralized sampling opted-in while
the cetralized sampling rules are not available.
:param sampler: The sampler used to make sampling decisions. The SDK
provides two built-in samplers. One is centralized rules based and
the other is local rules based. The former is the default.
:param tuple plugins: plugins that add extra metadata to each segment.
Currently available plugins are EC2Plugin, ECS plugin and
ElasticBeanstalkPlugin.
If you want to disable all previously enabled plugins,
pass an empty tuple ``()``.
:param str context_missing: recorder behavior when it tries to mutate
a segment or add a subsegment but there is no active segment.
RUNTIME_ERROR means the recorder will raise an exception.
LOG_ERROR means the recorder will only log the error and
do nothing.
:param str daemon_address: The X-Ray daemon address where the recorder
sends data to.
:param str service: default segment name if creating a segment without
providing a name.
:param context: You can pass your own implementation of context storage
for active segment/subsegment by overriding the default
``Context`` class.
:param emitter: The emitter that sends a segment/subsegment to
the X-Ray daemon. You can override ``UDPEmitter`` class.
:param dynamic_naming: a string that defines a pattern that host names
should match. Alternatively you can pass a module which
overrides ``DefaultDynamicNaming`` module.
:param streaming: The streaming module to stream out trace documents
when they grow too large. You can override ``DefaultStreaming``
class to have your own implementation of the streaming process.
:param streaming_threshold: If breaks within a single segment it will
start streaming out children subsegments. By default it is the
maximum number of subsegments within a segment.
:param int max_trace_back: The maxinum number of stack traces recorded
by auto-capture. Lower this if a single document becomes too large.
:param bool stream_sql: Whether SQL query texts should be streamed.
Environment variables AWS_XRAY_DAEMON_ADDRESS, AWS_XRAY_CONTEXT_MISSING
and AWS_XRAY_TRACING_NAME respectively overrides arguments
daemon_address, context_missing and service.
"""
if sampling is not None:
self.sampling = sampling
if sampler:
self.sampler = sampler
if service:
self.service = os.getenv(TRACING_NAME_KEY, service)
if sampling_rules:
self._load_sampling_rules(sampling_rules)
if emitter:
self.emitter = emitter
if daemon_address:
self.emitter.set_daemon_address(os.getenv(DAEMON_ADDR_KEY, daemon_address))
if context:
self.context = context
if context_missing:
self.context.context_missing = os.getenv(CONTEXT_MISSING_KEY, context_missing)
if dynamic_naming:
self.dynamic_naming = dynamic_naming
if streaming:
self.streaming = streaming
if streaming_threshold:
self.streaming_threshold = streaming_threshold
if type(max_trace_back) == int and max_trace_back >= 0:
self.max_trace_back = max_trace_back
if stream_sql is not None:
self.stream_sql = stream_sql
if plugins:
plugin_modules = get_plugin_modules(plugins)
for plugin in plugin_modules:
plugin.initialize()
if plugin.runtime_context:
self._aws_metadata[plugin.SERVICE_NAME] = plugin.runtime_context
self._origin = plugin.ORIGIN
# handling explicitly using empty list to clean up plugins.
elif plugins is not None:
self._aws_metadata = copy.deepcopy(XRAY_META)
self._origin = None
if type(self.sampler).__name__ == 'DefaultSampler':
self.sampler.load_settings(DaemonConfig(daemon_address),
self.context, self._origin) | 0.002003 |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Locate response payload and decode it
into its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(LocateResponsePayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.LOCATED_ITEMS, local_buffer):
self._located_items = primitives.Integer(
tag=enums.Tags.LOCATED_ITEMS
)
self._located_items.read(
local_buffer,
kmip_version=kmip_version
)
self._unique_identifiers = []
while self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
unique_identifier.read(local_buffer, kmip_version=kmip_version)
self._unique_identifiers.append(unique_identifier)
self.is_oversized(local_buffer) | 0.001384 |
def _validate_ding0_mv_grid_import(grid, ding0_grid):
"""Verify imported data with original data from Ding0
Parameters
----------
grid: MVGrid
MV Grid data (eDisGo)
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Notes
-----
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo.
Returns
-------
dict
Dict showing data integrity for each type of grid component
"""
integrity_checks = ['branch_tee',
'disconnection_point', 'mv_transformer',
'lv_station'#,'line',
]
data_integrity = {}
data_integrity.update({_: {'ding0': None, 'edisgo': None, 'msg': None}
for _ in integrity_checks})
# Check number of branch tees
data_integrity['branch_tee']['ding0'] = len(ding0_grid._cable_distributors)
data_integrity['branch_tee']['edisgo'] = len(
grid.graph.nodes_by_attribute('branch_tee'))
# Check number of disconnecting points
data_integrity['disconnection_point']['ding0'] = len(
ding0_grid._circuit_breakers)
data_integrity['disconnection_point']['edisgo'] = len(
grid.graph.nodes_by_attribute('mv_disconnecting_point'))
# Check number of MV transformers
data_integrity['mv_transformer']['ding0'] = len(
list(ding0_grid.station().transformers()))
data_integrity['mv_transformer']['edisgo'] = len(
grid.station.transformers)
# Check number of LV stations in MV grid (graph)
data_integrity['lv_station']['edisgo'] = len(grid.graph.nodes_by_attribute(
'lv_station'))
data_integrity['lv_station']['ding0'] = len(
[_ for _ in ding0_grid._graph.nodes()
if (isinstance(_, LVStationDing0) and
not _.grid.grid_district.lv_load_area.is_aggregated)])
# Check number of lines outside aggregated LA
# edges_w_la = grid.graph.lines()
# data_integrity['line']['edisgo'] = len([_ for _ in edges_w_la
# if not (_['adj_nodes'][0] == grid.station or
# _['adj_nodes'][1] == grid.station) and
# _['line']._length > .5])
# data_integrity['line']['ding0'] = len(
# [_ for _ in ding0_grid.lines()
# if not _['branch'].connects_aggregated])
# raise an error if data does not match
for c in integrity_checks:
if data_integrity[c]['edisgo'] != data_integrity[c]['ding0']:
raise ValueError(
'Unequal number of objects for {c}. '
'\n\tDing0:\t{ding0_no}'
'\n\teDisGo:\t{edisgo_no}'.format(
c=c,
ding0_no=data_integrity[c]['ding0'],
edisgo_no=data_integrity[c]['edisgo']))
return data_integrity | 0.001043 |
def vimeo_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs):
'''
try:
# normal Vimeo video
html = get_content('https://vimeo.com/' + id)
cfg_patt = r'clip_page_config\s*=\s*(\{.+?\});'
cfg = json.loads(match1(html, cfg_patt))
video_page = get_content(cfg['player']['config_url'], headers=fake_headers)
title = cfg['clip']['title']
info = loads(video_page)
except:
# embedded player - referer may be required
if 'referer' in kwargs:
fake_headers['Referer'] = kwargs['referer']
video_page = get_content('http://player.vimeo.com/video/%s' % id, headers=fake_headers)
title = r1(r'<title>([^<]+)</title>', video_page)
info = loads(match1(video_page, r'var t=(\{.+?\});'))
streams = info['request']['files']['progressive']
streams = sorted(streams, key=lambda i: i['height'])
url = streams[-1]['url']
type, ext, size = url_info(url, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge, faker=True)
'''
site = VimeoExtractor()
site.download_by_vid(id, info_only=info_only, output_dir=output_dir, merge=merge, **kwargs) | 0.009245 |
def send_notification(self, method, params):
"""Send a notification
"""
msg = self._encoder.create_notification(method, params)
self._send_message(msg) | 0.010929 |
def by_skills(queryset, skill_string=None):
""" Filter queryset by a comma delimeted skill list """
if skill_string:
operator, items = get_operator_and_items(skill_string)
q_obj = SQ()
for s in items:
if len(s) > 0:
q_obj.add(SQ(skills=s), operator)
queryset = queryset.filter(q_obj)
return queryset | 0.014925 |
def backward(self, gradient, image=None, strict=True):
"""Interface to model.backward for attacks.
Parameters
----------
gradient : `numpy.ndarray`
Gradient of some loss w.r.t. the logits.
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
Returns
-------
gradient : `numpy.ndarray`
The gradient w.r.t the image.
See Also
--------
:meth:`gradient`
"""
assert self.has_gradient()
assert gradient.ndim == 1
if image is None:
image = self.__original_image
assert not strict or self.in_bounds(image)
self._total_gradient_calls += 1
gradient = self.__model.backward(gradient, image)
assert gradient.shape == image.shape
return gradient | 0.002195 |
def content_download(self, cik, vendor, model, contentid):
"""(Speculation) Fetches content information for a given vendor, model, and ID as chunks.
This method might map to:
https://github.com/exosite/docs/tree/master/provision#get---get-content-blob-1,
but seems to be missing serial number.
Args:
cik: The CIK for the device
vendor: The name of the vendor
model:
contentid: The ID used to name the entity bucket
"""
data = urlencode({'vendor': vendor,
'model': model,
'id': contentid})
headers = {"Accept": "*"}
return self._request(PROVISION_DOWNLOAD,
cik, data, 'GET', True, headers) | 0.003793 |
def create(self, parameters={}, **kwargs):
"""
Create an instance of the US Weather Forecast Service with
typical starting settings.
"""
# Add parameter during create for UAA issuer
uri = self.uaa.service.settings.data['uri'] + '/oauth/token'
parameters["trustedIssuerIds"] = [uri]
super(PredixService, self).create(parameters=parameters, **kwargs) | 0.004854 |
def get_description_lines(docstring):
"""Extract the description from the given docstring.
This grabs everything up to the first occurrence of something that looks
like a parameter description. The docstring will be dedented and cleaned
up using the standard Sphinx methods.
:param str docstring: The source docstring.
:returns: list
"""
if prepare_docstring is None:
raise ImportError('sphinx must be installed to use this function.')
if not isinstance(docstring, str):
return []
lines = []
for line in prepare_docstring(docstring):
if DESCRIPTION_END_RE.match(line):
break
lines.append(line)
if lines and lines[-1] != '':
lines.append('')
return lines | 0.001316 |
def get_deployment_groups(self, project, name=None, action_filter=None, expand=None, continuation_token=None, top=None, ids=None):
"""GetDeploymentGroups.
[Preview API] Get a list of deployment groups by name or IDs.
:param str project: Project ID or project name
:param str name: Name of the deployment group.
:param str action_filter: Get only deployment groups on which this action can be performed.
:param str expand: Include these additional details in the returned objects.
:param str continuation_token: Get deployment groups with names greater than this continuationToken lexicographically.
:param int top: Maximum number of deployment groups to return. Default is **1000**.
:param [int] ids: Comma separated list of IDs of the deployment groups.
:rtype: [DeploymentGroup]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
response = self._send(http_method='GET',
location_id='083c4d89-ab35-45af-aa11-7cf66895c53e',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[DeploymentGroup]', self._unwrap_collection(response)) | 0.006244 |
def compute_invalidation_globs(bootstrap_options):
"""
Combine --pythonpath and --pants_config_files(pants.ini) files that are in {buildroot} dir
with those invalidation_globs provided by users
:param bootstrap_options:
:return: A list of invalidation_globs
"""
buildroot = get_buildroot()
invalidation_globs = []
globs = bootstrap_options.pythonpath + \
bootstrap_options.pants_config_files + \
bootstrap_options.pantsd_invalidation_globs
for glob in globs:
glob_relpath = os.path.relpath(glob, buildroot)
if glob_relpath and (not glob_relpath.startswith("../")):
invalidation_globs.extend([glob_relpath, glob_relpath + '/**'])
else:
logging.getLogger(__name__).warning("Changes to {}, outside of the buildroot"
", will not be invalidated.".format(glob))
return invalidation_globs | 0.009815 |
def fragmentate(self, give_only_index=False,
use_lookup=None):
"""Get the indices of non bonded parts in the molecule.
Args:
give_only_index (bool): If ``True`` a set of indices is returned.
Otherwise a new Cartesian instance.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
list: A list of sets of indices or new Cartesian instances.
"""
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
fragments = []
pending = set(self.index)
self.get_bonds(use_lookup=use_lookup)
while pending:
index = self.get_coordination_sphere(
pending.pop(), use_lookup=True, n_sphere=float('inf'),
only_surface=False, give_only_index=True)
pending = pending - index
if give_only_index:
fragments.append(index)
else:
fragment = self.loc[index]
fragment._metadata['bond_dict'] = fragment.restrict_bond_dict(
self._metadata['bond_dict'])
try:
fragment._metadata['val_bond_dict'] = (
fragment.restrict_bond_dict(
self._metadata['val_bond_dict']))
except KeyError:
pass
fragments.append(fragment)
return fragments | 0.001743 |
def make_instance(cls, data):
"""Validate the data and create a model instance from the data.
Args:
data (dict): The unserialized data to insert into the new model
instance through it's constructor.
Returns:
peewee.Model|sqlalchemy.Model: The model instance with it's data
inserted into it.
Raises:
AttributeError: This is raised if ``Meta.model`` isn't set on the
schema's definition.
"""
schema = cls()
if not hasattr(schema.Meta, 'model'):
raise AttributeError("In order to make an instance, a model for "
"the schema must be defined in the Meta "
"class.")
serialized_data = schema.load(data).data
return cls.Meta.model(**serialized_data) | 0.002268 |
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
if timeout:
cmd = ['timeout', str(timeout)] + cmd
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() | 0.004396 |
def _des_dict_check(self, des_dict, req_keys, cond_name):
"""Check if an input design condition dictionary is acceptable."""
assert isinstance(des_dict, dict), '{}' \
' must be a dictionary. Got {}.'.format(cond_name, type(des_dict))
if bool(des_dict) is True:
input_keys = list(des_dict.keys())
for key in req_keys:
assert key in input_keys, 'Required key "{}" was not found in ' \
'{}'.format(key, cond_name) | 0.005929 |
def update_exit_code(self, code: int):
'''Set the exit code if it is serious than before.
Args:
code: The exit code.
'''
if code:
if self._exit_code:
self._exit_code = min(self._exit_code, code)
else:
self._exit_code = code | 0.006173 |
def get_subject_guide_for_section_params(
year, quarter, curriculum_abbr, course_number, section_id=None):
"""
Returns a SubjectGuide model for the passed section params:
year: year for the section term (4-digits)
quarter: quarter (AUT, WIN, SPR, or SUM)
curriculum_abbr: curriculum abbreviation
course_number: course number
section_id: course section identifier (optional)
"""
quarter = quarter.upper()[:3]
url = "{}/{}/{}/{}/{}/{}/{}".format(
subject_guide_url_prefix, 'course', year, quarter,
quote(curriculum_abbr.upper()), course_number, section_id.upper())
headers = {'Accept': 'application/json'}
response = SubjectGuide_DAO().getURL(url, headers)
response_data = str(response.data)
if response.status != 200:
raise DataFailureException(url, response.status, response_data)
return _subject_guide_from_json(json.loads(response.data)) | 0.001068 |
def time_series(timefile, colnames):
"""Read temporal series text file.
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional numeric column names from 0 to N-1 will be attributed to the N
extra columns present in :data:`timefile`.
Args:
timefile (:class:`pathlib.Path`): path of the time.dat file.
colnames (list of names): names of the variables expected in
:data:`timefile` (may be modified).
Returns:
:class:`pandas.DataFrame`:
Time series, with the variables in columns and the time steps in
rows.
"""
if not timefile.is_file():
return None
data = pd.read_csv(timefile, delim_whitespace=True, dtype=str,
header=None, skiprows=1, index_col=0,
engine='c', memory_map=True,
error_bad_lines=False, warn_bad_lines=False)
data = data.apply(pd.to_numeric, raw=True, errors='coerce')
# detect useless lines produced when run is restarted
rows_to_del = []
irow = len(data) - 1
while irow > 0:
iprev = irow - 1
while iprev >= 0 and data.index[irow] <= data.index[iprev]:
rows_to_del.append(iprev)
iprev -= 1
irow = iprev
if rows_to_del:
rows_to_keep = set(range(len(data))) - set(rows_to_del)
data = data.take(list(rows_to_keep), convert=False)
ncols = data.shape[1]
_tidy_names(colnames, ncols)
data.columns = colnames
return data | 0.000649 |
def rdf_graph_from_yaml(yaml_root):
"""Convert the YAML object into an RDF Graph object."""
G = Graph()
for top_entry in yaml_root:
assert len(top_entry) == 1
node = list(top_entry.keys())[0]
build_relations(G, node, top_entry[node], None)
return G | 0.003472 |
def get_system_by_name(self, name):
"""Return a system with that name or None."""
for elem in self.systems:
if elem.name == name:
return elem
return None | 0.009709 |
def _parse_email(self, val):
"""
The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['email'].append(ret)
except AttributeError:
self.vars['email'] = []
self.vars['email'].append(ret) | 0.005042 |
def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):
"""
assigning the actual script settings depending on the iterator type
this might be overwritten by classes that inherit form ScriptIterator
Args:
sub_scripts: dictionary with the subscripts
script_order: execution order of subscripts
script_execution_freq: execution frequency of subscripts
Returns:
the default setting for the iterator
"""
def populate_sweep_param(scripts, parameter_list, trace=''):
'''
Args:
scripts: a dict of {'class name': <class object>} pairs
Returns: A list of all parameters of the input scripts
'''
def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):
"""
appends keys in the dict to a list in the form trace.key.subkey.subsubkey...
Args:
trace: initial prefix (path through scripts and parameters to current location)
dic: dictionary
parameter_list: list to which append the parameters
valid_values: valid values of dictionary values if None dic should be a dictionary
Returns:
"""
if valid_values is None and isinstance(dic, Parameter):
valid_values = dic.valid_values
for key, value in dic.items():
if isinstance(value, dict): # for nested parameters ex {point: {'x': int, 'y': int}}
parameter_list = get_parameter_from_dict(trace + '.' + key, value, parameter_list,
dic.valid_values[key])
elif (valid_values[key] in (float, int)) or \
(isinstance(valid_values[key], list) and valid_values[key][0] in (float, int)):
parameter_list.append(trace + '.' + key)
else: # once down to the form {key: value}
# in all other cases ignore parameter
print(('ignoring sweep parameter', key))
return parameter_list
for script_name in list(scripts.keys()):
from pylabcontrol.core import ScriptIterator
script_trace = trace
if script_trace == '':
script_trace = script_name
else:
script_trace = script_trace + '->' + script_name
if issubclass(scripts[script_name], ScriptIterator): # gets subscripts of ScriptIterator objects
populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list,
trace=script_trace)
else:
# use inspect instead of vars to get _DEFAULT_SETTINGS also for classes that inherit _DEFAULT_SETTINGS from a superclass
for setting in \
[elem[1] for elem in inspect.getmembers(scripts[script_name]) if elem[0] == '_DEFAULT_SETTINGS'][0]:
parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)
return parameter_list
if iterator_type == 'loop':
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('num_loops', 0, int, 'times the subscripts will be executed'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
elif iterator_type == 'sweep':
sweep_params = populate_sweep_param(sub_scripts, [])
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'),
Parameter('sweep_range',
[Parameter('min_value', 0, float, 'min parameter value'),
Parameter('max_value', 0, float, 'max parameter value'),
Parameter('N/value_step', 0, float,
'either number of steps or parameter value step, depending on mode')]),
Parameter('stepping_mode', 'N', ['N', 'value_step'],
'Switch between number of steps and step amount'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
else:
print(('unknown iterator type ' + iterator_type))
raise TypeError('unknown iterator type ' + iterator_type)
return script_default_settings | 0.004762 |
def get_value(self, item, source_name):
"""
This method receives an item from the source and a source name,
and returns the text content for the `source_name` node.
"""
return force_text(smart_str(item.findtext(source_name))).strip() | 0.007326 |
def load_riskmodel(self):
# to be called before read_exposure
# NB: this is called even if there is no risk model
"""
Read the risk model and set the attribute .riskmodel.
The riskmodel can be empty for hazard calculations.
Save the loss ratios (if any) in the datastore.
"""
logging.info('Reading the risk model if present')
self.riskmodel = readinput.get_risk_model(self.oqparam)
if not self.riskmodel:
parent = self.datastore.parent
if 'risk_model' in parent:
self.riskmodel = riskinput.CompositeRiskModel.read(parent)
return
if self.oqparam.ground_motion_fields and not self.oqparam.imtls:
raise InvalidFile('No intensity_measure_types specified in %s' %
self.oqparam.inputs['job_ini'])
self.save_params() | 0.004449 |
def mass_loss_loon05(L,Teff):
'''
mass loss rate van Loon etal (2005).
Parameters
----------
L : float
L in L_sun.
Teff : float
Teff in K.
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273
'''
Mdot = -5.65 + np.log10(old_div(L,10.**4)) -6.3*np.log10(old_div(Teff,3500.))
return Mdot | 0.025641 |
def init_logging(logfile=DEFAULT_LOGNAME, default=None, level=logging.INFO):
"""
Set up logger for capturing stdout/stderr messages.
Must be called prior to writing any messages that you want to log.
"""
if logfile == "INDEF":
if not is_blank(default):
logname = fileutil.buildNewRootname(default, '.log')
else:
logname = DEFAULT_LOGNAME
elif logfile not in [None, "" , " "]:
if logfile.endswith('.log'):
logname = logfile
else:
logname = logfile + '.log'
else:
logname = None
if logname is not None:
logutil.setup_global_logging()
# Don't use logging.basicConfig since it can only be called once in a
# session
# TODO: Would be fine to use logging.config.dictConfig, but it's not
# available in Python 2.5
global _log_file_handler
root_logger = logging.getLogger()
if _log_file_handler:
root_logger.removeHandler(_log_file_handler)
# Default mode is 'a' which is fine
_log_file_handler = logging.FileHandler(logname)
# TODO: Make the default level configurable in the task parameters
_log_file_handler.setLevel(level)
_log_file_handler.setFormatter(
logging.Formatter('[%(levelname)-8s] %(message)s'))
root_logger.setLevel(level)
root_logger.addHandler(_log_file_handler)
print('Setting up logfile : ', logname)
#stdout_logger = logging.getLogger('stsci.tools.logutil.stdout')
# Disable display of prints to stdout from all packages except
# drizzlepac
#stdout_logger.addFilter(logutil.EchoFilter(include=['drizzlepac']))
else:
print('No trailer file created...') | 0.002241 |
def option(self, section, option):
""" Returns the value of the option """
if self.config.has_section(section):
if self.config.has_option(section, option):
return (True, self.config.get(section, option))
return (False, 'Option: ' + option + ' does not exist')
return (False, 'Section: ' + section + ' does not exist') | 0.005249 |
def soft_fail(msg=''):
"""Adds error message to soft errors list if within soft assertions context.
Either just force test failure with the given message."""
global _soft_ctx
if _soft_ctx:
global _soft_err
_soft_err.append('Fail: %s!' % msg if msg else 'Fail!')
return
fail(msg) | 0.006154 |
def module_help(self, module):
"""Describes the key flags of a module.
Args:
module: module|str, the module to describe the key flags for.
Returns:
str, describing the key flags of a module.
"""
helplist = []
self._render_our_module_key_flags(module, helplist)
return '\n'.join(helplist) | 0.00304 |
def waveset(self):
"""Optimal wavelengths for sampling the spectrum or bandpass."""
w = get_waveset(self.model)
if w is not None:
utils.validate_wavelengths(w)
w = w * self._internal_wave_unit
return w | 0.007782 |
def TEST():
""" tests for this module """
grd = Grid(4,4, [2,4])
grd.new_tile()
grd.new_tile()
print(grd)
print("There are ", grd.count_blank_positions(), " blanks in grid 1\n")
grd2 = Grid(5,5, ['A','B'])
grd2.new_tile(26)
print(grd2)
build_board_checkers()
print("There are ", grd2.count_blank_positions(), " blanks in grid 2") | 0.013333 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'user') and self.user is not None:
_dict['user'] = self.user
return _dict | 0.00627 |
def build_url(host, path):
"""
Builds a valid URL from a host and path which may or may not have slashes in the proper place.
Does not conform to `IETF RFC 1808 <https://tools.ietf.org/html/rfc1808.html>`_ but instead joins the host and path as given.
Does not append any additional slashes to the final URL; just joins the host and path properly.
:param str host:
An HTTP host like ``'https://awesome-api.com/v2'``
:param str path:
The path to an endpoint on the host like ``'/some-resource/'``
:return:
The properly-joined URL of host and path, e.g. ``'https://awesome-api.com/v2/some-resource/'``
:rtype:
str
"""
host += "/" if not host.endswith("/") else ""
path = path.lstrip("/")
return parse.urljoin(host, path) | 0.006944 |
def transaction_effects(self, tx_hash, cursor=None, order='asc', limit=10):
"""This endpoint represents all effects that occurred as a result of a
given transaction.
`GET /transactions/{hash}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-transaction.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: A single transaction's effects.
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}/effects'.format(tx_hash=tx_hash)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params) | 0.003233 |
def isdigit(cls, value):
"""
ditit check for stats
:param value: stats value
:return: True or False
"""
if str(value).replace('.','').replace('-','').isdigit():
return True
return False | 0.01581 |
def _processEscapeSequences(replaceText):
"""Replace symbols like \n \\, etc
"""
def _replaceFunc(escapeMatchObject):
char = escapeMatchObject.group(0)[1]
if char in _escapeSequences:
return _escapeSequences[char]
return escapeMatchObject.group(0) # no any replacements, return original value
return _seqReplacer.sub(_replaceFunc, replaceText) | 0.005025 |
async def set_type_codec(self, typename, *,
schema='public', encoder, decoder,
format='text'):
"""Set an encoder/decoder pair for the specified data type.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``)
:param format:
The type of the argument received by the *decoder* callback,
and the type of the *encoder* callback return value.
If *format* is ``'text'`` (the default), the exchange datum is a
``str`` instance containing valid text representation of the
data type.
If *format* is ``'binary'``, the exchange datum is a ``bytes``
instance containing valid _binary_ representation of the
data type.
If *format* is ``'tuple'``, the exchange datum is a type-specific
``tuple`` of values. The table below lists supported data
types and their format for this mode.
+-----------------+---------------------------------------------+
| Type | Tuple layout |
+=================+=============================================+
| ``interval`` | (``months``, ``days``, ``microseconds``) |
+-----------------+---------------------------------------------+
| ``date`` | (``date ordinal relative to Jan 1 2000``,) |
| | ``-2^31`` for negative infinity timestamp |
| | ``2^31-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``timestamp`` | (``microseconds relative to Jan 1 2000``,) |
| | ``-2^63`` for negative infinity timestamp |
| | ``2^63-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``timestamp | (``microseconds relative to Jan 1 2000 |
| with time zone``| UTC``,) |
| | ``-2^63`` for negative infinity timestamp |
| | ``2^63-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``time`` | (``microseconds``,) |
+-----------------+---------------------------------------------+
| ``time with | (``microseconds``, |
| time zone`` | ``time zone offset in seconds``) |
+-----------------+---------------------------------------------+
:param encoder:
Callable accepting a Python object as a single argument and
returning a value encoded according to *format*.
:param decoder:
Callable accepting a single argument encoded according to *format*
and returning a decoded Python object.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> import datetime
>>> from dateutil.relativedelta import relativedelta
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... def encoder(delta):
... ndelta = delta.normalized()
... return (ndelta.years * 12 + ndelta.months,
... ndelta.days,
... ((ndelta.hours * 3600 +
... ndelta.minutes * 60 +
... ndelta.seconds) * 1000000 +
... ndelta.microseconds))
... def decoder(tup):
... return relativedelta(months=tup[0], days=tup[1],
... microseconds=tup[2])
... await con.set_type_codec(
... 'interval', schema='pg_catalog', encoder=encoder,
... decoder=decoder, format='tuple')
... result = await con.fetchval(
... "SELECT '2 years 3 mons 1 day'::interval")
... print(result)
... print(datetime.datetime(2002, 1, 1) + result)
...
>>> asyncio.get_event_loop().run_until_complete(run())
relativedelta(years=+2, months=+3, days=+1)
2004-04-02 00:00:00
.. versionadded:: 0.12.0
Added the ``format`` keyword argument and support for 'tuple'
format.
.. versionchanged:: 0.12.0
The ``binary`` keyword argument is deprecated in favor of
``format``.
.. versionchanged:: 0.13.0
The ``binary`` keyword argument was removed in favor of
``format``.
"""
self._check_open()
typeinfo = await self.fetchrow(
introspection.TYPE_BY_NAME, typename, schema)
if not typeinfo:
raise ValueError('unknown type: {}.{}'.format(schema, typename))
if not introspection.is_scalar_type(typeinfo):
raise ValueError(
'cannot use custom codec on non-scalar type {}.{}'.format(
schema, typename))
oid = typeinfo['oid']
self._protocol.get_settings().add_python_codec(
oid, typename, schema, 'scalar',
encoder, decoder, format)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache() | 0.000687 |
def scgi_request(url, methodname, *params, **kw):
""" Send a XMLRPC request over SCGI to the given URL.
@param url: Endpoint URL.
@param methodname: XMLRPC method name.
@param params: Tuple of simple python objects.
@keyword deserialize: Parse XML result? (default is True)
@return: XMLRPC response, or the equivalent Python data.
"""
xmlreq = xmlrpclib.dumps(params, methodname)
xmlresp = SCGIRequest(url).send(xmlreq)
if kw.get("deserialize", True):
# This fixes a bug with the Python xmlrpclib module
# (has no handler for <i8> in some versions)
xmlresp = xmlresp.replace("<i8>", "<i4>").replace("</i8>", "</i4>")
# Return deserialized data
return xmlrpclib.loads(xmlresp)[0][0]
else:
# Return raw XML
return xmlresp | 0.001188 |
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan | 0.004561 |
def create_or_update(ctx, model, xmlid, values):
""" Create or update a record matching xmlid with values """
if isinstance(model, basestring):
model = ctx.env[model]
record = ctx.env.ref(xmlid, raise_if_not_found=False)
if record:
record.update(values)
else:
record = model.create(values)
add_xmlid(ctx, record, xmlid)
return record | 0.002564 |
def store_meta_data(self, copy_path=None):
"""Save meta data of state model to the file system
This method generates a dictionary of the meta data of the state together with the meta data of all state
elements (data ports, outcomes, etc.) and stores it on the filesystem.
Secure that the store meta data method is called after storing the core data otherwise the last_stored_path is
maybe wrong or None.
The copy path is considered to be a state machine file system path but not the current one but e.g.
of a as copy saved state machine. The meta data will be stored in respective relative state folder in the state
machine hierarchy. This folder has to exist.
Dues the core elements of the state machine has to be stored first.
:param str copy_path: Optional copy path if meta data is not stored to the file system path of state machine
"""
if copy_path:
meta_file_path_json = os.path.join(copy_path, self.state.get_storage_path(), storage.FILE_NAME_META_DATA)
else:
if self.state.file_system_path is None:
logger.error("Meta data of {0} can be stored temporary arbitrary but by default first after the "
"respective state was stored and a file system path is set.".format(self))
return
meta_file_path_json = os.path.join(self.state.file_system_path, storage.FILE_NAME_META_DATA)
meta_data = deepcopy(self.meta)
self._generate_element_meta_data(meta_data)
storage_utils.write_dict_to_json(meta_data, meta_file_path_json) | 0.006671 |
def _split_license(license):
'''Returns all individual licenses in the input'''
return (x.strip() for x in (l for l in _regex.split(license) if l)) | 0.012903 |
def markdown(text, escape=True, **kwargs):
"""Render markdown formatted text to html.
:param text: markdown formatted text content.
:param escape: if set to False, all html tags will not be escaped.
:param use_xhtml: output with xhtml tags.
:param hard_wrap: if set to True, it will use the GFM line breaks feature.
:param parse_block_html: parse text only in block level html.
:param parse_inline_html: parse text only in inline level html.
"""
return Markdown(escape=escape, **kwargs)(text) | 0.00189 |
def _execute(self, workdir, with_mpirun=False, exec_args=None):
"""
Execute the executable in a subprocess inside workdir.
Some executables fail if we try to launch them with mpirun.
Use with_mpirun=False to run the binary without it.
"""
qadapter = self.manager.qadapter
if not with_mpirun: qadapter.name = None
if self.verbose:
print("Working in:", workdir)
script = qadapter.get_script_str(
job_name=self.name,
launch_dir=workdir,
executable=self.executable,
qout_path="qout_file.path",
qerr_path="qerr_file.path",
stdin=self.stdin_fname,
stdout=self.stdout_fname,
stderr=self.stderr_fname,
exec_args=exec_args
)
# Write the script.
script_file = os.path.join(workdir, "run" + self.name + ".sh")
with open(script_file, "w") as fh:
fh.write(script)
os.chmod(script_file, 0o740)
qjob, process = qadapter.submit_to_queue(script_file)
self.stdout_data, self.stderr_data = process.communicate()
self.returncode = process.returncode
#raise self.Error("%s returned %s\n cmd_str: %s" % (self, self.returncode, self.cmd_str))
return self.returncode | 0.003745 |
def synthesize_using_websocket(self,
text,
synthesize_callback,
accept=None,
voice=None,
timings=None,
customization_id=None,
http_proxy_host=None,
http_proxy_port=None,
**kwargs):
"""
Synthesizes text to spoken audio using web sockets. It supports the use of
the SSML <mark> element to identify the location of user-specified markers in the audio.
It can also return timing information for all strings of the input text.
Note:The service processes one request per connection.
:param str text: Provides the text that is to be synthesized. The client can pass plain
text or text that is annotated with the Speech Synthesis Markup Language (SSML). For more
information, see [Specifying input text](https://console.bluemix.net/docs/services/text-to-speech/http.html#input).
SSML input can also include the <mark> element;
see [Specifying an SSML mark](https://console.bluemix.net/docs/services/text-to-speech/word-timing.html#mark).
The client can pass a maximum of 5 KB of text with the request.
:param SynthesizeCallback synthesize_callback: The callback method for the websocket.
:param str accept: Specifies the requested format (MIME type) of the audio. For more information, see [Specifying
an audio format](https://console.bluemix.net/docs/services/text-to-speech/http.html#format). In addition to the
supported specifications, you can use */* to specify the default audio format, audio/ogg;codecs=opus.
:param str voice: The voice to use for synthesis.
:param list[str] timings: Specifies that the service is to return word timing information for all strings of the
input text. The service returns the start and end time of each string of the input. Specify words as the lone element
of the array to request word timings. Specify an empty array or omit the parameter to receive no word timings. For
more information, see [Obtaining word timings](https://console.bluemix.net/docs/services/text-to-speech/word-timing.html#timing).
Not supported for Japanese input text.
:param str customization_id: Specifies the globally unique identifier (GUID) for a custom voice model that is to be used for the
synthesis. A custom voice model is guaranteed to work only if it matches the language of the voice that is used for the synthesis.
If you include a customization ID, you must call the method with the service credentials of the custom model's owner. Omit the
parameter to use the specified voice with no customization. For more information, see [Understanding customization]
(https://console.bluemix.net/docs/services/text-to-speech/custom-intro.html#customIntro).
:param str http_proxy_host: http proxy host name.
:param str http_proxy_port: http proxy port. If not set, set to 80.
:param dict headers: A `dict` containing the request headers
:return: A `dict` containing the `SpeechRecognitionResults` response.
:rtype: dict
"""
if text is None:
raise ValueError('text must be provided')
if synthesize_callback is None:
raise ValueError('synthesize_callback must be provided')
if not isinstance(synthesize_callback, SynthesizeCallback):
raise Exception(
'Callback is not a derived class of SynthesizeCallback')
headers = {}
if self.default_headers is not None:
headers = self.default_headers.copy()
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
if self.token_manager:
access_token = self.token_manager.get_token()
headers['Authorization'] = '{0} {1}'.format(BEARER, access_token)
else:
authstring = "{0}:{1}".format(self.username, self.password)
base64_authorization = base64.b64encode(authstring.encode('utf-8')).decode('utf-8')
headers['Authorization'] = 'Basic {0}'.format(base64_authorization)
url = self.url.replace('https:', 'wss:')
params = {
'voice': voice,
'customization_id': customization_id,
}
params = dict([(k, v) for k, v in params.items() if v is not None])
url += '/v1/synthesize?{0}'.format(urlencode(params))
options = {
'text': text,
'accept': accept,
'timings': timings
}
options = dict([(k, v) for k, v in options.items() if v is not None])
SynthesizeListener(options,
synthesize_callback,
url,
headers,
http_proxy_host,
http_proxy_port,
self.verify) | 0.006005 |
def symmetric_difference(self, other):
r"""Return a new set with elements in either the set or other but not both.
>>> ms = Multiset('aab')
>>> sorted(ms.symmetric_difference('abc'))
['a', 'c']
You can also use the ``^`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms ^ Multiset('aaac'))
['a', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`symmetric_difference_update`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting symmetric difference multiset.
"""
other = self._as_multiset(other)
result = self.__class__()
_total = 0
_elements = result._elements
self_elements = self._elements
other_elements = other._elements
dist_elements = set(self_elements.keys()) | set(other_elements.keys())
for element in dist_elements:
multiplicity = self_elements.get(element, 0)
other_multiplicity = other_elements.get(element, 0)
new_multiplicity = (multiplicity - other_multiplicity
if multiplicity > other_multiplicity else other_multiplicity - multiplicity)
_total += new_multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
result._total = _total
return result | 0.004505 |
def records():
"""Load test data fixture."""
import uuid
from invenio_records.api import Record
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
indexer = RecordIndexer()
index_queue = []
# Record 1 - Live record
with db.session.begin_nested():
rec_uuid = uuid.uuid4()
pid1 = PersistentIdentifier.create(
'recid', '1', object_type='rec', object_uuid=rec_uuid,
status=PIDStatus.REGISTERED)
Record.create({
'title': 'Registered',
'description': 'This is an awesome description',
# "mint" the record as recid minter does
'control_number': '1',
}, id_=rec_uuid)
index_queue.append(pid1.object_uuid)
# Record 2 - Deleted PID with record
rec_uuid = uuid.uuid4()
pid = PersistentIdentifier.create(
'recid', '2', object_type='rec', object_uuid=rec_uuid,
status=PIDStatus.REGISTERED)
Record.create({
'title': 'Live ',
'control_number': '2',
}, id_=rec_uuid)
pid.delete()
# Record 3 - Deleted PID without a record
PersistentIdentifier.create(
'recid', '3', status=PIDStatus.DELETED)
# Record 4 - Registered PID without a record
PersistentIdentifier.create(
'recid', '4', status=PIDStatus.REGISTERED)
# Record 5 - Redirected PID
pid = PersistentIdentifier.create(
'recid', '5', status=PIDStatus.REGISTERED)
pid.redirect(pid1)
# Record 6 - Redirected non existing endpoint
doi = PersistentIdentifier.create(
'doi', '10.1234/foo', status=PIDStatus.REGISTERED)
pid = PersistentIdentifier.create(
'recid', '6', status=PIDStatus.REGISTERED)
pid.redirect(doi)
# Record 7 - Unregistered PID
PersistentIdentifier.create(
'recid', '7', status=PIDStatus.RESERVED)
for rec_idx in range(len(record_examples)):
rec_uuid = uuid.uuid4()
rec_pid = 8 + rec_idx
pid1 = PersistentIdentifier.create(
'recid', str(rec_pid), object_type='rec', object_uuid=rec_uuid,
status=PIDStatus.REGISTERED)
# "mint" the record as recid minter does
record = dict(record_examples[rec_idx])
record['control_number'] = str(rec_pid)
# create the record
Record.create(record, id_=rec_uuid)
index_queue.append(rec_uuid)
db.session.commit()
for i in index_queue:
indexer.index_by_id(i) | 0.000379 |
def configure_app(config_path=None, project=None, default_config_path=None,
default_settings=None, settings_initializer=None,
settings_envvar=None, initializer=None, allow_extras=True,
config_module_name=None, runner_name=None, on_configure=None):
"""
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
"""
global __configured
project_filename = sanitize_name(project)
if default_config_path is None:
default_config_path = '~/%s/%s.conf.py' % (project_filename, project_filename)
if settings_envvar is None:
settings_envvar = project_filename.upper() + '_CONF'
if config_module_name is None:
config_module_name = project_filename + '_config'
# normalize path
if settings_envvar in os.environ:
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if not config_path:
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if not os.path.exists(config_path):
if runner_name:
raise ValueError("Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,))
raise ValueError("Configuration file does not exist at %r" % (config_path,))
os.environ['DJANGO_SETTINGS_MODULE'] = config_module_name
def settings_callback(settings):
if initializer is None:
return
try:
initializer({
'project': project,
'config_path': config_path,
'settings': settings,
})
except Exception:
# XXX: Django doesn't like various errors in this path
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(
config_module_name, config_path, default_settings,
allow_extras=allow_extras, callback=settings_callback)
__configured = True
# HACK(dcramer): we need to force access of django.conf.settings to
# ensure we don't hit any import-driven recursive behavior
from django.conf import settings
hasattr(settings, 'INSTALLED_APPS')
if on_configure:
on_configure({
'project': project,
'config_path': config_path,
'settings': settings,
}) | 0.003663 |
def contiguous_slice(in1):
"""
This function unpads an array on the GPU in such a way as to make it contiguous.
INPUTS:
in1 (no default): Array containing data which has been padded.
OUTPUTS:
gpu_out1 Array containing unpadded, contiguous data.
"""
ker = SourceModule("""
__global__ void contiguous_slice_ker(float *in1, float *out1)
{
const int len = gridDim.x*blockDim.x;
const int col = (blockDim.x * blockIdx.x + threadIdx.x);
const int row = (blockDim.y * blockIdx.y + threadIdx.y);
const int tid2 = col + len*row;
const int first_idx = len/4;
const int last_idx = (3*len)/4;
const int out_idx = (col-first_idx)+(row-first_idx)*(len/2);
if (((col>=first_idx)&(row>=first_idx))&((col<last_idx)&(row<last_idx)))
{ out1[out_idx] = in1[tid2]; }
}
""", keep=True)
gpu_out1 = gpuarray.empty([in1.shape[0]/2,in1.shape[1]/2], np.float32)
contiguous_slice_ker = ker.get_function("contiguous_slice_ker")
contiguous_slice_ker(in1, gpu_out1, block=(32,32,1), grid=(int(in1.shape[1]//32), int(in1.shape[0]//32)))
return gpu_out1 | 0.007586 |
def s_magic(sfile, anisfile="specimens.txt", dir_path=".", atype="AMS",
coord_type="s", sigma=False, samp_con="1", specnum=0,
location="unknown", spec="unknown", sitename="unknown",
user="", data_model_num=3, name_in_file=False, input_dir_path=""):
"""
converts .s format data to measurements format.
Parameters
----------
sfile : str
.s format file, required
anisfile : str
specimen filename, default 'specimens.txt'
dir_path : str
output directory, default "."
atype : str
anisotropy type (AMS, AARM, ATRM, default AMS)
coord_type : str
coordinate system ('s' for specimen, 't' for tilt-corrected,
or 'g' for geographic, default 's')
sigma : bool
if True, last column has sigma, default False
samp_con : str
sample/site naming convention, default '1', see info below
specnum : int
number of characters to designate a specimen, default 0
location : str
location name, default "unknown"
spec : str
specimen name, default "unknown"
sitename : str
site name, default "unknown"
user : str
user name, default ""
data_model_num : int
MagIC data model 2 or 3, default 3
name_in_file : bool
first entry of each line is specimen name, default False
input_dir_path : input directory path IF different from dir_path, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Input format
--------
X11,X22,X33,X12,X23,X13 (.s format file)
X11,X22,X33,X12,X23,X13,sigma (.s format file with -sig option)
SID, X11,X22,X33,X12,X23,X13 (.s format file with -n option)
Info
--------
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
"""
con, Z = "", 1
if samp_con:
samp_con = str(samp_con)
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
if samp_con == '6':
print("option [6] is not currently supported")
return
else:
samp_con = con
coord_dict = {'s': '-1', 't': '100', 'g': '0'}
coord = coord_dict.get(coord_type, '-1')
specnum = -specnum
if data_model_num == 2:
specimen_col = "er_specimen_name"
sample_col = "er_sample_name"
site_col = "er_site_name"
loc_col = "er_location_name"
citation_col = "er_citation_names"
analyst_col = "er_analyst_mail_names"
aniso_type_col = "anisotropy_type"
experiment_col = "magic_experiment_names"
sigma_col = "anisotropy_sigma"
unit_col = "anisotropy_unit"
tilt_corr_col = "anisotropy_tilt_correction"
method_col = "magic_method_codes"
outfile_type = "rmag_anisotropy"
else:
specimen_col = "specimen"
sample_col = "sample"
site_col = "site"
loc_col = "location"
citation_col = "citations"
analyst_col = "analysts"
aniso_type_col = "aniso_type"
experiment_col = "experiments"
sigma_col = "aniso_s_sigma"
unit_col = "aniso_s_unit"
tilt_corr_col = "aniso_tilt_correction"
method_col = "method_codes"
outfile_type = "specimens"
# get down to bidness
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
sfile = pmag.resolve_file_name(sfile, input_dir_path)
anisfile = pmag.resolve_file_name(anisfile, dir_path)
try:
with open(sfile, 'r') as f:
lines = f.readlines()
except FileNotFoundError:
return False, "No such file: {}".format(sfile)
AnisRecs = []
citation = "This study"
# read in data
for line in lines:
AnisRec = {}
rec = line.split()
if name_in_file:
k = 1
spec = rec[0]
else:
k = 0
trace = float(rec[k])+float(rec[k+1])+float(rec[k+2])
s1 = '%10.9e' % (float(rec[k]) / trace)
s2 = '%10.9e' % (float(rec[k+1]) / trace)
s3 = '%10.9e' % (float(rec[k+2]) / trace)
s4 = '%10.9e' % (float(rec[k+3]) / trace)
s5 = '%10.9e' % (float(rec[k+4]) / trace)
s6 = '%10.9e' % (float(rec[k+5]) / trace)
AnisRec[citation_col] = citation
AnisRec[specimen_col] = spec
if specnum != 0:
AnisRec[sample_col] = spec[:specnum]
else:
AnisRec[sample_col] = spec
# if samp_con == "6":
# for samp in Samps:
# if samp['er_sample_name'] == AnisRec["er_sample_name"]:
# sitename = samp['er_site_name']
# location = samp['er_location_name']
if samp_con != "":
sitename = pmag.parse_site(AnisRec[sample_col], samp_con, Z)
AnisRec[loc_col] = location
AnisRec[site_col] = sitename
AnisRec[analyst_col] = user
if atype == 'AMS':
AnisRec[aniso_type_col] = "AMS"
AnisRec[experiment_col] = spec+":LP-X"
else:
AnisRec[aniso_type_col] = atype
AnisRec[experiment_col] = spec+":LP-"+atype
if data_model_num != 3:
AnisRec["anisotropy_s1"] = s1
AnisRec["anisotropy_s2"] = s2
AnisRec["anisotropy_s3"] = s3
AnisRec["anisotropy_s4"] = s4
AnisRec["anisotropy_s5"] = s5
AnisRec["anisotropy_s6"] = s6
else:
AnisRec['aniso_s'] = ":".join(
[str(s) for s in [s1, s2, s3, s4, s5, s6]])
if sigma:
AnisRec[sigma_col] = '%10.8e' % (
float(rec[k+6]) / trace)
AnisRec[unit_col] = 'SI'
AnisRec[tilt_corr_col] = coord
AnisRec[method_col] = 'LP-' + atype
AnisRecs.append(AnisRec)
pmag.magic_write(anisfile, AnisRecs, outfile_type)
print('data saved in ', anisfile)
# try to extract location/site/sample info into tables
con = cb.Contribution(dir_path, custom_filenames={"specimens": anisfile})
con.propagate_all_tables_info()
for table in con.tables:
if table in ['samples', 'sites', 'locations']:
# add in location name by hand
if table == 'sites':
con.tables['sites'].df['location'] = location
con.write_table_to_file(table)
return True, anisfile | 0.001093 |
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output | 0.001218 |
def create_profile():
"""If this is the user's first login, the create_or_login function
will redirect here so that the user can set up his profile.
"""
if g.user is not None or 'openid' not in session:
return redirect(url_for('index'))
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
if not name:
flash(u'Error: you have to provide a name')
elif '@' not in email:
flash(u'Error: you have to enter a valid email address')
else:
flash(u'Profile successfully created')
User.get_collection().insert(User(name, email, session['openid']))
return redirect(oid.get_next_url())
return render_template('create_profile.html', next_url=oid.get_next_url()) | 0.001227 |
def add(self, rule):
"""Add a new classifier rule to the classifier set. Return a list
containing zero or more rules that were deleted from the classifier
by the algorithm in order to make room for the new rule. The rule
argument should be a ClassifierRule instance. The behavior of this
method depends on whether the rule already exists in the
classifier set. When a rule is already present, the rule's
numerosity is added to that of the version of the rule already
present in the population. Otherwise, the new rule is captured.
Note that this means that for rules already present in the
classifier set, the metadata of the existing rule is not
overwritten by that of the one passed in as an argument.
Usage:
displaced_rules = model.add(rule)
Arguments:
rule: A ClassifierRule instance which is to be added to this
classifier set.
Return:
A possibly empty list of ClassifierRule instances which were
removed altogether from the classifier set (as opposed to
simply having their numerosities decremented) in order to make
room for the newly added rule.
"""
assert isinstance(rule, ClassifierRule)
condition = rule.condition
action = rule.action
# If the rule already exists in the population, then we virtually
# add the rule by incrementing the existing rule's numerosity. This
# prevents redundancy in the rule set. Otherwise we capture the
# new rule.
if condition not in self._population:
self._population[condition] = {}
if action in self._population[condition]:
existing_rule = self._population[condition][action]
existing_rule.numerosity += rule.numerosity
else:
self._population[condition][action] = rule
# Any time we add a rule, we need to call this to keep the
# population size under control.
return self._algorithm.prune(self) | 0.000949 |
def load_labeled_events(filename, delimiter=r'\s+'):
r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels
"""
# Use our universal function to load in the events
events, labels = load_delimited(filename, [float, str], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events, labels | 0.001718 |
def set_field(self, state, field_name, field_type, value):
"""
Sets an instance field.
"""
field_ref = SimSootValue_InstanceFieldRef.get_ref(state=state,
obj_alloc_id=self.heap_alloc_id,
field_class_name=self.type,
field_name=field_name,
field_type=field_type)
# store value in java memory
state.memory.store(field_ref, value) | 0.009901 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.