text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def is_wav(origin, filepath, fileobj, *args, **kwargs):
"""Identify a file as WAV
See `astropy.io.registry` for details on how this function is used.
"""
# pylint: disable=unused-argument
if origin == 'read' and fileobj is not None:
loc = fileobj.tell()
fileobj.seek(0)
try:
riff, _, fmt = struct.unpack('<4sI4s', fileobj.read(12))
if isinstance(riff, bytes):
riff = riff.decode('utf-8')
fmt = fmt.decode('utf-8')
return riff == WAV_SIGNATURE[0] and fmt == WAV_SIGNATURE[1]
except (UnicodeDecodeError, struct.error):
return False
finally:
fileobj.seek(loc)
elif filepath is not None:
return filepath.endswith(('.wav', '.wave'))
else:
try:
wave.open(args[0])
except (wave.Error, AttributeError):
return False
else:
return True | 0.001048 |
def parse_unstruct(unstruct):
"""
Convert an unstructured event JSON to a list containing one Elasticsearch-compatible key-value pair
For example, the JSON
{
"data": {
"data": {
"key": "value"
},
"schema": "iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-1"
},
"schema": "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0"
}
would become
[
(
"unstruct_com_snowplowanalytics_snowplow_link_click_1", {
"key": "value"
}
)
]
"""
my_json = json.loads(unstruct)
data = my_json['data']
schema = data['schema']
if 'data' in data:
inner_data = data['data']
else:
raise SnowplowEventTransformationException(["Could not extract inner data field from unstructured event"])
fixed_schema = fix_schema("unstruct_event", schema)
return [(fixed_schema, inner_data)] | 0.005247 |
def flexifunction_buffer_function_encode(self, target_system, target_component, func_index, func_count, data_address, data_size, data):
'''
Flexifunction type and parameters for component at function index from
buffer
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
func_index : Function index (uint16_t)
func_count : Total count of functions (uint16_t)
data_address : Address in the flexifunction data, Set to 0xFFFF to use address in target memory (uint16_t)
data_size : Size of the (uint16_t)
data : Settings data (int8_t)
'''
return MAVLink_flexifunction_buffer_function_message(target_system, target_component, func_index, func_count, data_address, data_size, data) | 0.00603 |
def write_file(self, name, path=None):
"""Write the contents of a file from the disk to the XPI."""
if path is None:
path = name
self.zf.write(path, name) | 0.010417 |
def FetchCompletedResponses(self, session_id, timestamp=None, limit=10000):
"""Fetch only completed requests and responses up to a limit."""
if timestamp is None:
timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now())
completed_requests = collections.deque(
self.FetchCompletedRequests(session_id, timestamp=timestamp))
total_size = 0
while completed_requests:
# Size reported in the status messages may be different from actual
# number of responses read from the database. Example: hunt responses
# may get deleted from the database and then worker may die before
# deleting the request. Then status.response_id will be >0, but no
# responses will be read from the DB.
projected_total_size = total_size
request_list = []
while completed_requests:
request, status = completed_requests.popleft()
# Make sure at least one response is fetched.
request_list.append(request)
# Quit if there are too many responses.
projected_total_size += status.response_id
if projected_total_size > limit:
break
for request, responses in self.data_store.ReadResponses(request_list):
yield (request, responses)
total_size += len(responses)
if total_size > limit:
raise MoreDataException() | 0.009496 |
def error_response(self, exception):
# type: (Exception) -> Tuple[Callback, Error]
"""Create an Error Response object to signal an error"""
response = Error(id=self.id, message=exception)
log.exception("Exception raised for request %s", self)
return self.callback, response | 0.009585 |
def oindex(a, selection):
"""Implementation of orthogonal indexing with slices and ints."""
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)])
selection = ix_(selection, a.shape)
result = a[selection]
if drop_axes:
result = result.squeeze(axis=drop_axes)
return result | 0.002667 |
def input(self, **kwargs):
"""
Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469
"""
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
self.options += (
('infile', self.infile.name),
)
return super(BaseCompiler, self).input(**kwargs) | 0.005682 |
def _search_stock_info(self, code):
"""
通过雪球的接口获取股票详细信息
:param code: 股票代码 000001
:return: 查询到的股票 {u'stock_id': 1000279, u'code': u'SH600325',
u'name': u'华发股份', u'ind_color': u'#d9633b', u'chg': -1.09,
u'ind_id': 100014, u'percent': -9.31, u'current': 10.62,
u'hasexist': None, u'flag': 1, u'ind_name': u'房地产', u'type': None,
u'enName': None}
** flag : 未上市(0)、正常(1)、停牌(2)、涨跌停(3)、退市(4)
"""
data = {
"code": str(code),
"size": "300",
"key": "47bce5c74f",
"market": self.account_config["portfolio_market"],
}
r = self.s.get(self.config["search_stock_url"], params=data)
stocks = json.loads(r.text)
stocks = stocks["stocks"]
stock = None
if len(stocks) > 0:
stock = stocks[0]
return stock | 0.002205 |
def sub_base_uri(self):
""" This will return the sub_base_uri parsed from the base_uri
:return: str of the sub_base_uri
"""
return self._base_uri and \
self._base_uri.split('://')[-1].split('.')[0] \
or self._base_uri | 0.014337 |
def create_xml_file_from_string(self, content, destination=None):
"""
Creates XML file from text.
:param content: C++ source code
:type content: str
:param destination: file name for xml file
:type destination: str
:rtype: returns file name of xml file
"""
header_file = utils.create_temp_file_name(suffix='.h')
try:
with open(header_file, "w+") as header:
header.write(content)
xml_file = self.create_xml_file(header_file, destination)
finally:
utils.remove_file_no_raise(header_file, self.__config)
return xml_file | 0.002994 |
def obj_from_file(filename='annotation.yaml', filetype='auto'):
''' Read object from file '''
if filetype == 'auto':
_, ext = os.path.splitext(filename)
filetype = ext[1:]
if filetype in ('yaml', 'yml'):
from ruamel.yaml import YAML
yaml = YAML(typ="unsafe")
with open(filename, encoding="utf-8") as f:
obj = yaml.load(f)
if obj is None:
obj = {}
# import yaml
# with open(filename, encoding="utf-8") as f:
# intext = f.read()
# obj = yaml.load(intext)
elif filetype in ('pickle', 'pkl', 'pklz', 'picklezip'):
fcontent = read_pkl_and_pklz(filename)
# import pickle
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
# import sPickle as pickle
if sys.version_info.major == 2:
obj = pickle.loads(fcontent)
else:
obj = pickle.loads(fcontent, encoding="latin1")
else:
logger.error('Unknown filetype ' + filetype)
return obj | 0.000907 |
def namespace(self, name=None, function=None, recursive=None):
"""
Returns reference to namespace declaration that matches
a defined criteria.
"""
return (
self._find_single(
scopedef.scopedef_t._impl_matchers[namespace_t.namespace],
name=name,
function=function,
recursive=recursive)
) | 0.004854 |
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = logging.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == logging._srcfile or filename == self._srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
break
return rv | 0.001805 |
def _normalize(schema, allow_none=True):
"""
Normalize a schema.
"""
if allow_none and schema is None:
return schema
if isinstance(schema, CommonSchema):
return schema
if isinstance(schema, StreamSchema):
return schema
if isinstance(schema, basestring):
return StreamSchema(schema)
py_types = {
_spl_object: CommonSchema.Python,
_spl_str: CommonSchema.String,
json: CommonSchema.Json,
}
if schema in py_types:
return py_types[schema]
# With Python 3 allow a named tuple with type hints
# to be used as a schema definition
if sys.version_info.major == 3:
import typing
if isinstance(schema, type) and issubclass(schema, tuple):
if hasattr(schema, '_fields') and hasattr(schema, '_field_types'):
return _from_named_tuple(schema)
raise ValueError("Unknown stream schema type:" + str(schema)) | 0.002079 |
def download_file(url, dst_path):
"""Download a file from a url"""
request = requests.get(url, stream=True)
with open(dst_path, 'wb') as downloaded_file:
request.raw.decode_content = True
shutil.copyfileobj(request.raw, downloaded_file) | 0.003788 |
def read_list(self, request):
"""
Implements the List read (get a list of objects)
maps to GET /api/objects/ in rest semantics
:param request: rip.Request
:return: rip.Response
"""
pipeline = crud_pipeline_factory.read_list_pipeline(
configuration=self.configuration)
return pipeline(request=request) | 0.005305 |
def simplify(self, options=None):
"""
provide a simple representation of this change as a dictionary
"""
# TODO: we might want to get rid of this method and just move
# it into the JSONEncoder in report.py
simple = super(GenericChange, self).simplify(options)
ld = self.pretty_ldata()
if ld is not None:
simple["old_data"] = ld
rd = self.pretty_rdata()
if rd is not None:
simple["new_data"] = rd
return simple | 0.003802 |
def load_xml(self, xmlfile, **kwargs):
"""Load sources from an XML file."""
extdir = kwargs.get('extdir', self.extdir)
coordsys = kwargs.get('coordsys', 'CEL')
if not os.path.isfile(xmlfile):
xmlfile = os.path.join(fermipy.PACKAGE_DATA, 'catalogs', xmlfile)
root = ElementTree.ElementTree(file=xmlfile).getroot()
diffuse_srcs = []
srcs = []
ra, dec = [], []
for s in root.findall('source'):
src = Source.create_from_xml(s, extdir=extdir)
if src.diffuse:
diffuse_srcs += [src]
else:
srcs += [src]
ra += [src['RAJ2000']]
dec += [src['DEJ2000']]
src_skydir = SkyCoord(ra=np.array(ra) * u.deg,
dec=np.array(dec) * u.deg)
radec = np.vstack((src_skydir.ra.deg, src_skydir.dec.deg)).T
glonlat = np.vstack((src_skydir.galactic.l.deg,
src_skydir.galactic.b.deg)).T
offset = self.skydir.separation(src_skydir).deg
offset_cel = wcs_utils.sky_to_offset(self.skydir,
radec[:, 0], radec[:, 1], 'CEL')
offset_gal = wcs_utils.sky_to_offset(self.skydir,
glonlat[:, 0], glonlat[:, 1], 'GAL')
m0 = get_skydir_distance_mask(src_skydir, self.skydir,
self.config['src_radius'])
m1 = get_skydir_distance_mask(src_skydir, self.skydir,
self.config['src_radius_roi'],
square=True, coordsys=coordsys)
m = (m0 & m1)
srcs = np.array(srcs)[m]
for i, s in enumerate(srcs):
s.data['offset'] = offset[m][i]
s.data['offset_ra'] = offset_cel[:, 0][m][i]
s.data['offset_dec'] = offset_cel[:, 1][m][i]
s.data['offset_glon'] = offset_gal[:, 0][m][i]
s.data['offset_glat'] = offset_gal[:, 1][m][i]
self.load_source(s, False,
merge_sources=self.config['merge_sources'])
for i, s in enumerate(diffuse_srcs):
self.load_source(s, False,
merge_sources=self.config['merge_sources'])
self._build_src_index()
return srcs | 0.001259 |
def rnd_date(start=date(1970, 1, 1), end=None, **kwargs):
"""
Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
"""
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _rnd_date(start, end) | 0.001658 |
def country_field(key='country'):
"""Provides a select box for country selection"""
country_list = list(countries)
title_map = []
for item in country_list:
title_map.append({'value': item.alpha_3, 'name': item.name})
widget = {
'key': key,
'type': 'uiselect',
'titleMap': title_map
}
return widget | 0.002778 |
def sort_rows(self, rows, section):
"""Sort the rows, as appropriate for the section.
:param rows: List of tuples (all same length, same values in each position)
:param section: Name of section, should match const in Differ class
:return: None; rows are sorted in-place
"""
#print("@@ SORT ROWS:\n{}".format(rows))
# Section-specific determination of sort key
if section.lower() == Differ.CHANGED.lower():
sort_key = Differ.CHANGED_DELTA
else:
sort_key = None
if sort_key is not None:
rows.sort(key=itemgetter(sort_key)) | 0.006279 |
def eaMuPlusLambda(population, toolbox, mu, lambda_, cxpb, mutpb, ngen, pbar,
stats=None, halloffame=None, verbose=0, per_generation_function=None):
"""This is the :math:`(\mu + \lambda)` evolutionary algorithm.
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param mu: The number of individuals to select for the next generation.
:param lambda\_: The number of children to produce at each generation.
:param cxpb: The probability that an offspring is produced by crossover.
:param mutpb: The probability that an offspring is produced by mutation.
:param ngen: The number of generation.
:param pbar: processing bar
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:param per_generation_function: if supplied, call this function before each generation
used by tpot to save best pipeline before each new generation
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution.
The algorithm takes in a population and evolves it in place using the
:func:`varOr` function. It returns the optimized population and a
:class:`~deap.tools.Logbook` with the statistics of the evolution. The
logbook will contain the generation number, the number of evalutions for
each generation and the statistics if a :class:`~deap.tools.Statistics` is
given as argument. The *cxpb* and *mutpb* arguments are passed to the
:func:`varOr` function. The pseudocode goes as follow ::
evaluate(population)
for g in range(ngen):
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
evaluate(offspring)
population = select(population + offspring, mu)
First, the individuals having an invalid fitness are evaluated. Second,
the evolutionary loop begins by producing *lambda_* offspring from the
population, the offspring are generated by the :func:`varOr` function. The
offspring are then evaluated and the next generation population is
selected from both the offspring **and** the population. Finally, when
*ngen* generations are done, the algorithm returns a tuple with the final
population and a :class:`~deap.tools.Logbook` of the evolution.
This function expects :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox. This algorithm uses the :func:`varOr`
variation.
"""
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Initialize statistics dict for the individuals in the population, to keep track of mutation/crossover operations and predecessor relations
for ind in population:
initialize_stats_dict(ind)
population = toolbox.evaluate(population)
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=0, nevals=len(population), **record)
# Begin the generational process
for gen in range(1, ngen + 1):
# after each population save a periodic pipeline
if per_generation_function is not None:
per_generation_function(gen)
# Vary the population
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
# Update generation statistic for all individuals which have invalid 'generation' stats
# This hold for individuals that have been altered in the varOr function
for ind in population:
if ind.statistics['generation'] == 'INVALID':
ind.statistics['generation'] = gen
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
offspring = toolbox.evaluate(offspring)
# Select the next generation population
population[:] = toolbox.select(population + offspring, mu)
# pbar process
if not pbar.disable:
# Print only the best individual fitness
if verbose == 2:
high_score = max([halloffame.keys[x].wvalues[1] for x in range(len(halloffame.keys))])
pbar.write('Generation {0} - Current best internal CV score: {1}'.format(gen, high_score))
# Print the entire Pareto front
elif verbose == 3:
pbar.write('Generation {} - Current Pareto front scores:'.format(gen))
for pipeline, pipeline_scores in zip(halloffame.items, reversed(halloffame.keys)):
pbar.write('{}\t{}\t{}'.format(
int(pipeline_scores.wvalues[0]),
pipeline_scores.wvalues[1],
pipeline
)
)
pbar.write('')
# after each population save a periodic pipeline
if per_generation_function is not None:
per_generation_function(gen)
# Update the statistics with the new population
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
return population, logbook | 0.002694 |
def to_yaml(self, ignore_none: bool=True, ignore_empty: bool=False) -> str:
"""From instance to yaml string
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Yaml string
Usage:
>>> from owlmixin.samples import Human
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> print(human.to_yaml())
favorites:
- name: Apple
names_by_lang:
de: Apfel
en: Apple
- name: Orange
id: 1
name: Tom
<BLANKLINE>
"""
return util.dump_yaml(traverse(self, ignore_none, force_value=True, ignore_empty=ignore_empty)) | 0.007505 |
def do(self, arg):
".exchain - Show the SEH chain"
thread = self.get_thread_from_prefix()
print "Exception handlers for thread %d" % thread.get_tid()
print
table = Table()
table.addRow("Block", "Function")
bits = thread.get_bits()
for (seh, seh_func) in thread.get_seh_chain():
if seh is not None:
seh = HexDump.address(seh, bits)
if seh_func is not None:
seh_func = HexDump.address(seh_func, bits)
table.addRow(seh, seh_func)
print table.getOutput() | 0.003711 |
def subdevicenames(self) -> Tuple[str, ...]:
"""A |tuple| containing the (sub)device names.
Property |NetCDFVariableFlat.subdevicenames| clarifies which
row of |NetCDFVariableAgg.array| contains which time series.
For 0-dimensional series like |lland_inputs.Nied|, the plain
device names are returned
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> from hydpy.core.netcdftools import NetCDFVariableFlat
>>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1)
>>> for element in elements:
... nied1 = element.model.sequences.inputs.nied
... ncvar.log(nied1, nied1.series)
>>> ncvar.subdevicenames
('element1', 'element2', 'element3')
For higher dimensional sequences like |lland_fluxes.NKor|, an
additional suffix defines the index of the respective subdevice.
For example contains the third row of |NetCDFVariableAgg.array|
the time series of the first hydrological response unit of the
second element:
>>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.series)
>>> ncvar.subdevicenames[1:3]
('element2_0', 'element2_1')
"""
stats: List[str] = collections.deque()
for devicename, seq in self.sequences.items():
if seq.NDIM:
temp = devicename + '_'
for prod in self._product(seq.shape):
stats.append(temp + '_'.join(str(idx) for idx in prod))
else:
stats.append(devicename)
return tuple(stats) | 0.001091 |
async def download_media(self, message, file=None,
*, thumb=None, progress_callback=None):
"""
Downloads the given media, or the media from a specified Message.
Note that if the download is too slow, you should consider installing
``cryptg`` (through ``pip install cryptg``) so that decrypting the
received data is done in C instead of Python (much faster).
message (`Message <telethon.tl.custom.message.Message>` | :tl:`Media`):
The media or message containing the media that will be downloaded.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If file is the type `bytes`, it will be downloaded in-memory
as a bytestring (e.g. ``file=bytes``).
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(received bytes, total)``.
thumb (`int` | :tl:`PhotoSize`, optional):
Which thumbnail size from the document or photo to download,
instead of downloading the document or photo itself.
If it's specified but the file does not have a thumbnail,
this method will return ``None``.
The parameter should be an integer index between ``0`` and
``len(sizes)``. ``0`` will download the smallest thumbnail,
and ``len(sizes) - 1`` will download the largest thumbnail.
You can also use negative indices.
You can also pass the :tl:`PhotoSize` instance to use.
In short, use ``thumb=0`` if you want the smallest thumbnail
and ``thumb=-1`` if you want the largest thumbnail.
Returns:
``None`` if no media was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given.
"""
# TODO This won't work for messageService
if isinstance(message, types.Message):
date = message.date
media = message.media
else:
date = datetime.datetime.now()
media = message
if isinstance(media, str):
media = utils.resolve_bot_file_id(media)
if isinstance(media, types.MessageMediaWebPage):
if isinstance(media.webpage, types.WebPage):
media = media.webpage.document or media.webpage.photo
if isinstance(media, (types.MessageMediaPhoto, types.Photo)):
return await self._download_photo(
media, file, date, thumb, progress_callback
)
elif isinstance(media, (types.MessageMediaDocument, types.Document)):
return await self._download_document(
media, file, date, thumb, progress_callback
)
elif isinstance(media, types.MessageMediaContact) and thumb is None:
return self._download_contact(
media, file
)
elif isinstance(media, (types.WebDocument, types.WebDocumentNoProxy)) and thumb is None:
return await self._download_web_document(
media, file, progress_callback
) | 0.001215 |
def add_attributes(self, data, type):
""" add required attributes """
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value) | 0.008299 |
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True) | 0.002275 |
def map_words(self, start, end):
"""Return a memory-map of the elements `start` through `end`.
The memory map will offer the 8-byte double-precision floats
("elements") in the file from index `start` through to the index
`end`, inclusive, both counting the first float as element 1.
Memory maps must begin on a page boundary, so `skip` returns the
number of extra bytes at the beginning of the return value.
"""
i, j = 8 * start - 8, 8 * end
try:
fileno = self.file.fileno()
except (AttributeError, io.UnsupportedOperation):
fileno = None
if fileno is None:
skip = 0
self.file.seek(i)
m = self.file.read(j - i)
else:
skip = i % mmap.ALLOCATIONGRANULARITY
r = mmap.ACCESS_READ
m = mmap.mmap(fileno, length=j-i+skip, access=r, offset=i-skip)
if sys.version_info > (3,):
m = memoryview(m) # so further slicing can return views
return m, skip | 0.001885 |
def get_task_filelist(cls, task_factory, courseid, taskid):
""" Returns a flattened version of all the files inside the task directory, excluding the files task.* and hidden files.
It returns a list of tuples, of the type (Integer Level, Boolean IsDirectory, String Name, String CompleteName)
"""
task_fs = task_factory.get_task_fs(courseid, taskid)
if not task_fs.exists():
return []
tmp_out = {}
entries = task_fs.list(True, True, True)
for entry in entries:
if os.path.splitext(entry)[0] == "task" and os.path.splitext(entry)[1][1:] in task_factory.get_available_task_file_extensions():
continue
data = entry.split("/")
is_directory = False
if data[-1] == "":
is_directory = True
data = data[0:len(data)-1]
cur_pos = 0
tree_pos = tmp_out
while cur_pos != len(data):
if data[cur_pos] not in tree_pos:
tree_pos[data[cur_pos]] = {} if is_directory or cur_pos != len(data) - 1 else None
tree_pos = tree_pos[data[cur_pos]]
cur_pos += 1
def recur_print(current, level, current_name):
iteritems = sorted(current.items())
# First, the files
recur_print.flattened += [(level, False, f, current_name+"/"+f) for f, t in iteritems if t is None]
# Then, the dirs
for name, sub in iteritems:
if sub is not None:
recur_print.flattened.append((level, True, name, current_name+"/"+name+"/"))
recur_print(sub, level + 1, current_name + "/" + name)
recur_print.flattened = []
recur_print(tmp_out, 0, '')
return recur_print.flattened | 0.004331 |
def fit(self, x0=None, distribution='lognormal', n=None, **kwargs):
'''Incomplete method to fit experimental values to a curve. It is very
hard to get good initial guesses, which are really required for this.
Differential evolution is promissing. This API is likely to change in
the future.
'''
dist = {'lognormal': PSDLognormal,
'GGS': PSDGatesGaudinSchuhman,
'RR': PSDRosinRammler}[distribution]
if distribution == 'lognormal':
if x0 is None:
d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)])
s = 0.4
x0 = [d_characteristic, s]
elif distribution == 'GGS':
if x0 is None:
d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)])
m = 1.5
x0 = [d_characteristic, m]
elif distribution == 'RR':
if x0 is None:
x0 = [5E-6, 1e-2]
from scipy.optimize import minimize
return minimize(self._fit_obj_function, x0, args=(dist, n), **kwargs) | 0.006066 |
def get_deck(self):
"""
Returns parent :Deck: of a :Placeable:
"""
trace = self.get_trace()
# Find decks in trace, prepend with [None] in case nothing was found
res = [None] + [item for item in trace if isinstance(item, Deck)]
# Pop last (and hopefully only Deck) or None if there is no deck
return res.pop() | 0.005348 |
def groupcountdistinctvalues(table, key, value):
"""Group by the `key` field then count the number of distinct values in the
`value` field."""
s1 = cut(table, key, value)
s2 = distinct(s1)
s3 = aggregate(s2, key, len)
return s3 | 0.007813 |
def decode_schedule(string):
"""Decodes a string into a schedule tuple.
Args:
string: The string encoding of a schedule tuple.
Returns:
A schedule tuple, see encode_schedule for details.
"""
splits = string.split()
steps = [int(x[1:]) for x in splits[1:] if x[0] == '@']
pmfs = np.reshape(
[float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1])
return splits[0], tuplize(steps), tuplize(pmfs) | 0.013825 |
def np_lst_sq(vecMdl, aryFuncChnk):
"""Least squares fitting in numpy without cross-validation.
Notes
-----
This is just a wrapper function for np.linalg.lstsq to keep piping
consistent.
"""
aryTmpBts, vecTmpRes = np.linalg.lstsq(vecMdl,
aryFuncChnk,
rcond=-1)[:2]
return aryTmpBts, vecTmpRes | 0.002421 |
def _recover_cfg(self, start=None, end=None, symbols=None, callback=None):
"""Recover CFG
"""
# Retrieve symbol name in case it is available.
if symbols and start in symbols:
name = symbols[start][0]
size = symbols[start][1] - 1 if symbols[start][1] != 0 else 0
else:
name = "sub_{:x}".format(start)
size = 0
# Compute start and end address.
start_addr = start if start else self.binary.ea_start
end_addr = end if end else self.binary.ea_end
# Set callback.
if callback:
callback(start, name, size)
# Recover basic blocks.
bbs, calls = self.bb_builder.build(start_addr, end_addr, symbols)
# Build CFG.
cfg = ControlFlowGraph(bbs, name=name)
return cfg, calls | 0.00237 |
def min_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the miniumum (counter-clockwise) position_sp. Default value
is 600. Valid values are 300 to 700. You must write to the position_sp
attribute for changes to this attribute to take effect.
"""
self._min_pulse_sp, value = self.get_attr_int(self._min_pulse_sp, 'min_pulse_sp')
return value | 0.010684 |
def parseCmdline(self, requestData):
"""
Parse the request command string.
Input:
Self with request filled in.
Output:
Request Handle updated with the parsed information so that
it is accessible via key/value pairs for later processing.
Return code - 0: successful, non-zero: error
"""
self.printSysLog("Enter ReqHandle.parseCmdline")
# Save the request data based on the type of operand.
if isinstance(requestData, list):
self.requestString = ' '.join(requestData) # Request as a string
self.request = requestData # Request as a list
elif isinstance(requestData, string_types):
self.requestString = requestData # Request as a string
self.request = shlex.split(requestData) # Request as a list
else:
# Request data type is not supported.
msg = msgs.msg['0012'][1] % (modId, type(requestData))
self.printLn("ES", msg)
self.updateResults(msgs.msg['0012'][0])
return self.results
self.totalParms = len(self.request) # Number of parms in the cmd
# Handle the request, parse it or return an error.
if self.totalParms == 0:
# Too few arguments.
msg = msgs.msg['0009'][1] % modId
self.printLn("ES", msg)
self.updateResults(msgs.msg['0009'][0])
elif self.totalParms == 1:
self.function = self.request[0].upper()
if self.function == 'HELP' or self.function == 'VERSION':
pass
else:
# Function is not HELP or VERSION.
msg = msgs.msg['0008'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0008'][0])
else:
# Process based on the function operand.
self.function = self.request[0].upper()
if self.request[0] == 'HELP' or self.request[0] == 'VERSION':
pass
else:
# Handle the function related parms by calling the function
# parser.
if self.function in ReqHandle.funcHandler:
self.funcHandler[self.function][2](self)
else:
# Unrecognized function
msg = msgs.msg['0007'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0007'][0])
self.printSysLog("Exit ReqHandle.parseCmdline, rc: " +
str(self.results['overallRC']))
return self.results | 0.000731 |
def get(self, what):
"""
Get the ANSI code for 'what'
Returns an empty string if disabled/not found
"""
if self.enabled:
if what in self.colors:
return self.colors[what]
return '' | 0.007813 |
def get_version(self, dependency):
"""Return the installed version parsing the output of 'pip show'."""
logger.debug("getting installed version for %s", dependency)
stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)])
version = [line for line in stdout if line.startswith('Version:')]
if len(version) == 1:
version = version[0].strip().split()[1]
logger.debug("Installed version of %s is: %s", dependency, version)
return version
else:
logger.error('Fades is having problems getting the installed version. '
'Run with -v or check the logs for details')
return '' | 0.004213 |
def wash_for_utf8(text, correct=True):
"""Return UTF-8 encoded binary string with incorrect characters washed away.
:param text: input string to wash (can be either a binary or Unicode string)
:param correct: whether to correct bad characters or throw exception
"""
if isinstance(text, unicode):
return text.encode('utf-8')
errors = "ignore" if correct else "strict"
return text.decode("utf-8", errors).encode("utf-8", errors) | 0.006466 |
def _render_border_line(self, t, settings):
"""
Render box border line.
"""
s = self._es(settings, self.SETTING_WIDTH, self.SETTING_MARGIN, self.SETTING_MARGIN_LEFT, self.SETTING_MARGIN_RIGHT)
w = self.calculate_width_widget(**s)
s = self._es(settings, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)
border_line = self.fmt_border(w, t, **s)
s = self._es(settings, self.SETTING_MARGIN, self.SETTING_MARGIN_LEFT, self.SETTING_MARGIN_RIGHT, self.SETTING_MARGIN_CHAR)
border_line = self.fmt_margin(border_line, **s)
return border_line | 0.008052 |
def get_ruptures_within(dstore, bbox):
"""
Extract the ruptures within the given bounding box, a string
minlon,minlat,maxlon,maxlat.
Example:
http://127.0.0.1:8800/v1/calc/30/extract/ruptures_with/8,44,10,46
"""
minlon, minlat, maxlon, maxlat = map(float, bbox.split(','))
hypo = dstore['ruptures']['hypo'].T # shape (3, N)
mask = ((minlon <= hypo[0]) * (minlat <= hypo[1]) *
(maxlon >= hypo[0]) * (maxlat >= hypo[1]))
return dstore['ruptures'][mask] | 0.001988 |
def getboolean_config(section, option, default=False):
'''
Get data from configs which store boolean records
'''
try:
return config.getboolean(section, option) or default
except ConfigParser.NoSectionError:
return default | 0.003891 |
def get_metadata_or_fail(metadata_key):
"""
Call get_metadata; halt with fail() if it raises an exception
"""
try:
return http_get_metadata(metadata_key)
except IOError as error:
fail("Exception in http_get_metadata {} {}".format(metadata_key, repr(error))) | 0.018182 |
def main(conf_file, overwrite, logger):
"""
Create configuration and log file. Restart the daemon when configuration
is done.
Args:
conf_file (str): Path to the configuration file.
overwrite (bool): Overwrite the configuration file with `clean` config?
"""
uid = pwd.getpwnam(get_username()).pw_uid
# stop the daemon
logger.info("Stopping the daemon.")
sh.service(get_service_name(), "stop")
# create files
logger.info("Creating config file.")
create_config(
cnf_file=conf_file,
uid=uid,
overwrite=overwrite
)
logger.info("Creating log file.")
create_log(
log_file=REQUIRED_SETTINGS["LogFile"],
uid=uid
)
# start the daemon
logger.info("Starting the daemon..")
sh.service(get_service_name(), "start") | 0.001196 |
def data(self, ctx=None):
"""Returns a copy of this parameter on one context. Must have been
initialized on this context before.
Parameters
----------
ctx : Context
Desired context.
Returns
-------
NDArray on ctx
"""
d = self._check_and_get(self._data, ctx)
if self._rate:
d = nd.Dropout(d, self._rate, self._mode, self._axes)
return d | 0.004386 |
def snippetWithLink(self, url):
""" This method will try to return the first
<p> or <div> that contains an <a> tag linking to
the given URL.
"""
link = self.soup.find("a", attrs={'href': url})
if link:
for p in link.parents:
if p.name in ('p', 'div'):
return ' '.join(p.text.split()[0:30])
return None | 0.004938 |
def file_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /file-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Files#API-method%3A-%2Ffile-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs) | 0.008264 |
def validate_schema(cls, tx):
"""Validate the validator election vote transaction. Since `VOTE` extends `TRANSFER`
transaction, all the validations for `CREATE` transaction should be inherited
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
_validate_schema(TX_SCHEMA_TRANSFER, tx)
_validate_schema(cls.TX_SCHEMA_CUSTOM, tx) | 0.010811 |
def add_table(self, dataframe, isStyled = False):
"""This method stores plain html string."""
if isStyled :
table_string = dataframe.render()
else :
table_string = dataframe.style.render()
table_string = table_string.replace("\n", "").replace("<table", """<table class = "table table-sm table-hover" """).replace("<thead>", """<thead class="thead-inverse">""")
self.table = table_string | 0.017241 |
def send_html(self, html, body=None, msgtype="m.text"):
"""Send an html formatted message.
Args:
html (str): The html formatted message to be sent.
body (str): The unformatted body of the message to be sent.
"""
return self.client.api.send_message_event(
self.room_id, "m.room.message", self.get_html_content(html, body, msgtype)) | 0.007519 |
def status_raw(name=None, user=None, conf_file=None, bin_env=None):
'''
Display the raw output of status
user
user to run supervisorctl as
conf_file
path to supervisord config file
bin_env
path to supervisorctl bin or path to virtualenv with supervisor
installed
CLI Example:
.. code-block:: bash
salt '*' supervisord.status_raw
'''
ret = __salt__['cmd.run_all'](
_ctl_cmd('status', name, conf_file, bin_env),
runas=user,
python_shell=False,
)
return _get_return(ret) | 0.001727 |
def getIRThreshold(self):
"""Returns the IR temperature threshold in degrees Celcius, or 0 if no Threshold is set"""
command = '$GO'
threshold = self.sendCommand(command)
if threshold[0] == 'NK':
return 0
else:
return float(threshold[2])/10 | 0.014706 |
def build_keyjar(key_conf, kid_template="", keyjar=None, owner=''):
"""
Builds a :py:class:`oidcmsg.key_jar.KeyJar` instance or adds keys to
an existing KeyJar based on a key specification.
An example of such a specification::
keys = [
{"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"},
{"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"}
]
Keys in this specification are:
type
The type of key. Presently only 'rsa' and 'ec' supported.
key
A name of a file where a key can be found. Only works with PEM encoded
RSA keys
use
What the key should be used for
crv
The elliptic curve that should be used. Only applies to elliptic curve
keys :-)
kid
Key ID, can only be used with one usage type is specified. If there
are more the one usage type specified 'kid' will just be ignored.
:param key_conf: The key configuration
:param kid_template: A template by which to build the key IDs. If no
kid_template is given then the built-in function add_kid() will be used.
:param keyjar: If an KeyJar instance the new keys are added to this key jar.
:param owner: The default owner of the keys in the key jar.
:return: A KeyJar instance
"""
if keyjar is None:
keyjar = KeyJar()
tot_kb = build_key_bundle(key_conf, kid_template)
keyjar.add_kb(owner, tot_kb)
return keyjar | 0.002543 |
def correlation(P, obs1, obs2=None, times=[1], k=None):
r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times
"""
M = P.shape[0]
T = np.asarray(times).max()
if T < M:
return correlation_matvec(P, obs1, obs2=obs2, times=times)
else:
return correlation_decomp(P, obs1, obs2=obs2, times=times, k=k) | 0.00116 |
def getPk(self):
'''
getPk - @see ForeignLinkData.getPk
'''
if not self.pk or None in self.pk:
for i in range( len(self.pk) ):
if self.pk[i]:
continue
if self.obj[i] and self.obj[i]._id:
self.pk[i] = self.obj[i]._id
return self.pk | 0.05303 |
def first_or_create(cls, parent=None, **attributes):
"""
Attempts to find the first resource with the same attributes, creates the resource if no matches are found.
This will trigger an api GET request and a POST request if the resource does not exist.
:param parent ResourceBase: the parent of the resource - used for nesting the request url, optional
:param **attributes: any number of keyword arguments as attributes to search/create the resource
:returns: a resource instance - the existing one if found, otherwise the newly created one
"""
existing_resource = cls.find_by(parent, **attributes)
if existing_resource:
return existing_resource
return cls(**attributes).create() | 0.009079 |
def still_optimizing(self):
"""True unless converged or maximum iterations/time exceeded."""
# Check if we need to give up on optimizing.
if (self.iterations > self.max_iter) or (self.time_elapsed() > self.max_time):
return False
# Always optimize for at least 'min_iter' iterations.
elif not hasattr(self, 'improvement') or (self.iterations < self.min_iter):
return True
# Check convergence.
else:
self.converged = self.improvement < self.tol
return False if self.converged else True | 0.006768 |
def _get_nonce(self, url):
"""
Get a nonce to use in a request, removing it from the nonces on hand.
"""
action = LOG_JWS_GET_NONCE()
if len(self._nonces) > 0:
with action:
nonce = self._nonces.pop()
action.add_success_fields(nonce=nonce)
return succeed(nonce)
else:
with action.context():
return (
DeferredContext(self.head(url))
.addCallback(self._add_nonce)
.addCallback(lambda _: self._nonces.pop())
.addCallback(tap(
lambda nonce: action.add_success_fields(nonce=nonce)))
.addActionFinish()) | 0.002646 |
def get_relative_positions_of_waypoints(transition_v):
"""This method takes the waypoints of a connection and returns all relative positions of these waypoints.
:param canvas: Canvas to check relative position in
:param transition_v: Transition view to extract all relative waypoint positions
:return: List with all relative positions of the given transition
"""
handles_list = transition_v.handles()
rel_pos_list = []
for handle in handles_list:
if handle in transition_v.end_handles(include_waypoints=True):
continue
rel_pos = transition_v.canvas.get_matrix_i2i(transition_v, transition_v.parent).transform_point(*handle.pos)
rel_pos_list.append(rel_pos)
return rel_pos_list | 0.00534 |
def sign_bitcoin(self, message, compressed=False):
""" Signs a message using this private key such that it
is compatible with bitcoind, bx, and other Bitcoin
clients/nodes/utilities.
Note:
0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is
prepended to the message before signing.
Args:
message (bytes or str): Message to be signed.
compressed (bool): True if the corresponding public key will be
used in compressed format. False if the uncompressed version
is used.
Returns:
bytes: A Base64-encoded byte string of the signed message.
The first byte of the encoded message contains information
about how to recover the public key. In bitcoind parlance,
this is the magic number containing the recovery ID and
whether or not the key was compressed or not.
"""
if isinstance(message, str):
msg_in = bytes(message, 'ascii')
elif isinstance(message, bytes):
msg_in = message
else:
raise TypeError("message must be either str or bytes!")
msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in
msg_hash = hashlib.sha256(msg).digest()
sig = self.sign(msg_hash)
comp_adder = 4 if compressed else 0
magic = 27 + sig.recovery_id + comp_adder
return base64.b64encode(bytes([magic]) + bytes(sig)) | 0.001318 |
def copy(self):
"""
Returns a copy of the datamat.
"""
return self.filter(np.ones(self._num_fix).astype(bool)) | 0.014085 |
def vcs_virtual_ip_address_inband_interface_ve(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ip = ET.SubElement(virtual, "ip")
address = ET.SubElement(ip, "address")
address_key = ET.SubElement(address, "address")
address_key.text = kwargs.pop('address')
inband = ET.SubElement(address, "inband")
interface = ET.SubElement(inband, "interface")
ve = ET.SubElement(interface, "ve")
ve.text = kwargs.pop('ve')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004021 |
def _add_run_info(self, idx, name='', timestamp=42.0, finish_timestamp=1.337,
runtime='forever and ever', time='>>Maybe time`s gone on strike',
completed=0, parameter_summary='Not yet my friend!',
short_environment_hexsha='N/A'):
"""Adds a new run to the `_run_information` dict."""
if idx in self._single_run_ids:
# Delete old entries, they might be replaced by a new name
old_name = self._single_run_ids[idx]
del self._single_run_ids[old_name]
del self._single_run_ids[idx]
del self._run_information[old_name]
if name == '':
name = self.f_wildcard('$', idx)
# The `_single_run_ids` dict is bidirectional and maps indices to run names and vice versa
self._single_run_ids[name] = idx
self._single_run_ids[idx] = name
info_dict = {'idx': idx,
'timestamp': timestamp,
'finish_timestamp': finish_timestamp,
'runtime': runtime,
'time': time,
'completed': completed,
'name': name,
'parameter_summary': parameter_summary,
'short_environment_hexsha': short_environment_hexsha}
self._run_information[name] = info_dict
self._length = len(self._run_information) | 0.004885 |
def mouseDoubleClickEvent(self, event):
"""
Overloads when a mouse press occurs. If in editable mode, and the
click occurs on a selected index, then the editor will be created
and no selection change will occur.
:param event | <QMousePressEvent>
"""
item = self.itemAt(event.pos())
column = self.columnAt(event.pos().x())
mid_button = event.button() == QtCore.Qt.MidButton
ctrl_click = event.button() == QtCore.Qt.LeftButton and \
event.modifiers() == QtCore.Qt.ControlModifier
if mid_button or ctrl_click:
self.itemMiddleDoubleClicked.emit(item, column)
elif event.button() == QtCore.Qt.RightButton:
self.itemRightDoubleClicked.emit(item, column)
else:
super(XTreeWidget, self).mouseDoubleClickEvent(event) | 0.006515 |
def DAVIDgetGeneAttribute(x,df,refCol="ensembl_gene_id",fieldTOretrieve="gene_name"):
"""
Returns a list of gene names for given gene ids.
:param x: a string with the list of IDs separated by ', '
:param df: a dataframe with the reference column and a the column to retrieve
:param refCol: the header of the column containing the identifiers
:param fieldTOretrieve: the field to retrieve from parsedGTF eg. 'gene_name'
:returns: list of fieldTOretrieve separeted by ', ' in the same order as the given in x
"""
l=x.split(", ")
l=[ s.upper() for s in l ]
tmpdf=pd.DataFrame({refCol:l},index=range(len(l)))
df_fix=df[[refCol,fieldTOretrieve]].drop_duplicates()
df_fix[refCol]=df_fix[refCol].apply(lambda x: x.upper())
ids=pd.merge(tmpdf,df_fix,how="left",on=[refCol])
ids=ids[fieldTOretrieve].tolist()
ids=[ str(s) for s in ids ]
ids=", ".join(ids)
return ids | 0.032051 |
def horz_offset(self, offset):
"""
Set the value of ./c:x@val to *offset* and ./c:xMode@val to "factor".
"""
self.get_or_add_xMode().val = ST_LayoutMode.FACTOR
self.get_or_add_x().val = offset | 0.008621 |
def OnClose(self, event):
"""Program exit event handler"""
# If changes have taken place save of old grid
if undo.stack().haschanged():
save_choice = self.interfaces.get_save_request_from_user()
if save_choice is None:
# Cancelled close operation
return
elif save_choice:
# User wants to save content
post_command_event(self.main_window, self.main_window.SaveMsg)
# Save the AUI state
config["window_layout"] = repr(self.main_window._mgr.SavePerspective())
# Uninit the AUI stuff
self.main_window._mgr.UnInit()
# Save config
config.save()
# Close main_window
self.main_window.Destroy()
# Set file mode to 600 to protect GPG passwd a bit
sp = wx.StandardPaths.Get()
pyspreadrc_path = sp.GetUserConfigDir() + "/." + config.config_filename
try:
os.chmod(pyspreadrc_path, 0600)
except OSError:
dummyfile = open(pyspreadrc_path, "w")
dummyfile.close()
os.chmod(pyspreadrc_path, 0600) | 0.001715 |
def from_spec(spec):
"""
Creates an exploration object from a specification dict.
"""
exploration = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.explorations.explorations
)
assert isinstance(exploration, Exploration)
return exploration | 0.006006 |
def destroy(self):
""" Destroy the underlying QWidget object.
"""
self._teardown_features()
focus_registry.unregister(self.widget)
widget = self.widget
if widget is not None:
del self.widget
super(QtGraphicsItem, self).destroy()
# If a QWidgetAction was created for this widget, then it has
# taken ownership of the widget and the widget will be deleted
# when the QWidgetAction is garbage collected. This means the
# superclass destroy() method must run before the reference to
# the QWidgetAction is dropped.
del self._widget_action | 0.003072 |
def pin_chat_message(self, chat_id, message_id, disable_notification=False):
"""
Use this method to pin a message in a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Returns True on success.
:param chat_id: Int or Str: Unique identifier for the target chat or username of the target channel
(in the format @channelusername)
:param message_id: Int: Identifier of a message to pin
:param disable_notification: Bool: Pass True, if it is not necessary to send a notification
to all group members about the new pinned message
:return:
"""
return apihelper.pin_chat_message(self.token, chat_id, message_id, disable_notification) | 0.007528 |
def draw_label(self, layout_info, ax):
"""
Draw facet label onto the axes.
This function will only draw labels if they are needed.
Parameters
----------
layout_info : dict-like
facet information
ax : axes
Axes to label
"""
label_info = layout_info[list(self.vars)]
label_info._meta = {'dimension': 'cols'}
label_info = self.labeller(label_info)
self.draw_strip_text(label_info, 'top', ax) | 0.003922 |
def water(target, temperature='pore.temperature', salinity='pore.salinity'):
r"""
Calculates thermal conductivity of pure water or seawater at atmospheric
pressure using the correlation given by Jamieson and Tudhope. Values at
temperature higher the normal boiling temperature are calculated at the
saturation pressure.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
temperature : string
The dictionary key containing the temperature values. Temperature must
be in Kelvin for this emperical equation to work
salinity : string
The dictionary key containing the salinity values. Salinity must be
expressed in g of salt per kg of solution (ppt).
Returns
-------
The thermal conductivity of water/seawater in [W/m.K]
Notes
-----
T must be in K, and S in g of salt per kg of phase, or ppt (parts per
thousand)
VALIDITY: 273 < T < 453 K; 0 < S < 160 g/kg;
ACCURACY: 3 %
References
----------
D. T. Jamieson, and J. S. Tudhope, Desalination, 8, 393-401, 1970.
"""
T = target[temperature]
if salinity in target.keys():
S = target[salinity]
else:
S = 0
T68 = 1.00024*T # convert from T_90 to T_68
SP = S/1.00472 # convert from S to S_P
k_sw = 0.001*(10**(sp.log10(240+0.0002*SP) +
0.434*(2.3-(343.5+0.037*SP)/T68) *
((1-T68/(647.3+0.03*SP)))**(1/3)))
value = k_sw
return value | 0.00059 |
def normalize_shape(shape):
"""Convenience function to normalize the `shape` argument."""
if shape is None:
raise TypeError('shape is None')
# handle 1D convenience form
if isinstance(shape, numbers.Integral):
shape = (int(shape),)
# normalize
shape = tuple(int(s) for s in shape)
return shape | 0.002941 |
def read(cls, proto):
"""
Reads deserialized data from proto object.
:param proto: (DynamicStructBuilder) Proto object
:returns: (:class:TemporalMemory) TemporalMemory instance
"""
tm = object.__new__(cls)
# capnp fails to save a tuple, so proto.columnDimensions was forced to
# serialize as a list. We prefer a tuple, however, because columnDimensions
# should be regarded as immutable.
tm.columnDimensions = tuple(proto.columnDimensions)
tm.cellsPerColumn = int(proto.cellsPerColumn)
tm.activationThreshold = int(proto.activationThreshold)
tm.initialPermanence = round(proto.initialPermanence, EPSILON_ROUND)
tm.connectedPermanence = round(proto.connectedPermanence, EPSILON_ROUND)
tm.minThreshold = int(proto.minThreshold)
tm.maxNewSynapseCount = int(proto.maxNewSynapseCount)
tm.permanenceIncrement = round(proto.permanenceIncrement, EPSILON_ROUND)
tm.permanenceDecrement = round(proto.permanenceDecrement, EPSILON_ROUND)
tm.predictedSegmentDecrement = round(proto.predictedSegmentDecrement,
EPSILON_ROUND)
tm.maxSegmentsPerCell = int(proto.maxSegmentsPerCell)
tm.maxSynapsesPerSegment = int(proto.maxSynapsesPerSegment)
tm.connections = Connections.read(proto.connections)
#pylint: disable=W0212
tm._random = Random()
tm._random.read(proto.random)
#pylint: enable=W0212
tm.activeCells = [int(x) for x in proto.activeCells]
tm.winnerCells = [int(x) for x in proto.winnerCells]
flatListLength = tm.connections.segmentFlatListLength()
tm.numActiveConnectedSynapsesForSegment = [0] * flatListLength
tm.numActivePotentialSynapsesForSegment = [0] * flatListLength
tm.lastUsedIterationForSegment = [0] * flatListLength
tm.activeSegments = []
tm.matchingSegments = []
for protoSegment in proto.activeSegments:
tm.activeSegments.append(
tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell))
for protoSegment in proto.matchingSegments:
tm.matchingSegments.append(
tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell))
for protoSegment in proto.numActivePotentialSynapsesForSegment:
segment = tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell)
tm.numActivePotentialSynapsesForSegment[segment.flatIdx] = (
int(protoSegment.number))
tm.iteration = long(proto.iteration)
for protoSegment in proto.lastUsedIterationForSegment:
segment = tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell)
tm.lastUsedIterationForSegment[segment.flatIdx] = (
long(protoSegment.number))
return tm | 0.003482 |
def show_ring(devname):
'''
Queries the specified network device for rx/tx ring parameter information
CLI Example:
.. code-block:: bash
salt '*' ethtool.show_ring <devname>
'''
try:
ring = ethtool.get_ringparam(devname)
except IOError:
log.error('Ring parameters not supported on %s', devname)
return 'Not supported'
ret = {}
for key, value in ring.items():
ret[ethtool_ring_remap[key]] = ring[key]
return ret | 0.00202 |
def uid(self, p_todo):
"""
Returns the unique text-based ID for a todo item.
"""
try:
return self._todo_id_map[p_todo]
except KeyError as ex:
raise InvalidTodoException from ex | 0.008333 |
def _determine_termination_policies(termination_policies, termination_policies_from_pillar):
'''
helper method for present. ensure that termination_policies are set
'''
pillar_termination_policies = copy.deepcopy(
__salt__['config.option'](termination_policies_from_pillar, [])
)
if not termination_policies and pillar_termination_policies:
termination_policies = pillar_termination_policies
return termination_policies | 0.00431 |
def class_balancing_oversample(X_train=None, y_train=None, printable=True):
"""Input the features and labels, return the features and labels after oversampling.
Parameters
----------
X_train : numpy.array
The inputs.
y_train : numpy.array
The targets.
Examples
--------
One X
>>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True)
Two X
>>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False)
>>> X1 = X[:, 0:5]
>>> X2 = X[:, 5:]
"""
# ======== Classes balancing
if printable:
tl.logging.info("Classes balancing for training examples...")
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage: %s' % c.most_common())
tl.logging.info('the least stage is Label %s have %s instances' % c.most_common()[-1])
tl.logging.info('the most stage is Label %s have %s instances' % c.most_common(1)[0])
most_num = c.most_common(1)[0][1]
if printable:
tl.logging.info('most num is %d, all classes tend to be this num' % most_num)
locations = {}
number = {}
for lab, num in c.most_common(): # find the index from y_train
number[lab] = num
locations[lab] = np.where(np.array(y_train) == lab)[0]
if printable:
tl.logging.info('convert list(np.array) to dict format')
X = {} # convert list to dict
for lab, num in number.items():
X[lab] = X_train[locations[lab]]
# oversampling
if printable:
tl.logging.info('start oversampling')
for key in X:
temp = X[key]
while True:
if len(X[key]) >= most_num:
break
X[key] = np.vstack((X[key], temp))
if printable:
tl.logging.info('first features of label 0 > %d' % len(X[0][0]))
tl.logging.info('the occurrence num of each stage after oversampling')
for key in X:
tl.logging.info("%s %d" % (key, len(X[key])))
if printable:
tl.logging.info('make each stage have same num of instances')
for key in X:
X[key] = X[key][0:most_num, :]
tl.logging.info("%s %d" % (key, len(X[key])))
# convert dict to list
if printable:
tl.logging.info('convert from dict to list format')
y_train = []
X_train = np.empty(shape=(0, len(X[0][0])))
for key in X:
X_train = np.vstack((X_train, X[key]))
y_train.extend([key for i in range(len(X[key]))])
# tl.logging.info(len(X_train), len(y_train))
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage after oversampling: %s' % c.most_common())
# ================ End of Classes balancing
return X_train, y_train | 0.003188 |
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret | 0.000368 |
def rec_load_all(self, zone):
"""
Lists all DNS records for the given domain
:param zone: the domain for which records are being retrieved
:type zone: str
:return:
:rtype: generator
"""
has_more = True
current_count = 0
while has_more:
records = self._request({
'a': 'rec_load_all',
'o': current_count,
'z': zone
})
try:
has_more = records['response']['recs']['has_more']
current_count += records['response']['recs']['count']
for record in records['response']['recs']['objs']:
yield record
except KeyError:
has_more = False | 0.002554 |
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
if args:
fmt = '@_{0}_%d=%s'.format(procname)
self._query('SET %s' % ','.join(fmt % (index, conn.escape(arg))
for index, arg in enumerate(args)))
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range_type(len(args))]))
self._query(q)
self._executed = q
return args | 0.001058 |
def plot_zt_mu(self, temp=600, output='eig', relaxation_time=1e-14,
xlim=None):
"""
Plot the ZT in function of Fermi level.
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
zt = self._bz.get_zt(relaxation_time=relaxation_time, output=output,
doping_levels=False)[temp]
plt.plot(self._bz.mu_steps, zt, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['ZT$_1$', 'ZT$_2$', 'ZT$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("ZT", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt | 0.00249 |
def hamiltonian_monte_carlo(
hmc_state: HamiltonianMonteCarloState,
target_log_prob_fn: PotentialFn,
step_size: Any,
num_leapfrog_steps: IntTensor,
momentum: State = None,
kinetic_energy_fn: PotentialFn = None,
momentum_sample_fn: MomentumSampleFn = None,
leapfrog_trace_fn: Callable[[LeapFrogStepState, LeapFrogStepExtras],
TensorNest] = lambda *args: (),
seed=None,
) -> Tuple[HamiltonianMonteCarloState, HamiltonianMonteCarloExtra]:
"""Hamiltonian Monte Carlo `TransitionOperator`.
#### Example
```python
step_size = 0.2
num_steps = 2000
num_leapfrog_steps = 10
state = tf.ones([16, 2])
base_mean = [1., 0]
base_cov = [[1, 0.5], [0.5, 1]]
bijector = tfb.Softplus()
base_dist = tfd.MultivariateNormalFullCovariance(
loc=base_mean, covariance_matrix=base_cov)
target_dist = bijector(base_dist)
def orig_target_log_prob_fn(x):
return target_dist.log_prob(x), ()
target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(
orig_target_log_prob_fn, bijector, state)
kernel = tf.function(lambda state: fun_mcmc.hamiltonian_monte_carlo(
state,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
seed=tfp_test_util.test_seed()))
_, chain = fun_mcmc.trace(
state=fun_mcmc.HamiltonianMonteCarloState(
state=state,
state_grads=None,
target_log_prob=None,
state_extra=None),
fn=kernel,
num_steps=num_steps,
trace_fn=lambda state, extra: state.state_extra[0])
```
Args:
hmc_state: HamiltonianMonteCarloState.
target_log_prob_fn: Target log prob fn.
step_size: Step size, structure broadcastable to the `target_log_prob_fn`
state.
num_leapfrog_steps: Number of leapfrog steps to take.
momentum: Initial momentum, passed to `momentum_sample_fn`. Default: zeroes.
kinetic_energy_fn: Kinetic energy function.
momentum_sample_fn: Sampler for the momentum.
leapfrog_trace_fn: Trace function for the leapfrog integrator.
seed: For reproducibility.
Returns:
hmc_state: HamiltonianMonteCarloState
hmc_extra: HamiltonianMonteCarloExtra
"""
state = hmc_state.state
state_grads = hmc_state.state_grads
target_log_prob = hmc_state.target_log_prob
state_extra = hmc_state.state_extra
if kinetic_energy_fn is None:
# pylint: disable=function-redefined
def kinetic_energy_fn(*momentum):
return tf.add_n([
tf.reduce_sum(input_tensor=tf.square(x), axis=-1) / 2.
for x in tf.nest.flatten(momentum)
]), ()
if momentum_sample_fn is None:
# pylint: disable=function-redefined
def momentum_sample_fn(*momentum):
ret = tf.nest.map_structure(
lambda x: tf.random.normal(tf.shape(input=x), dtype=x.dtype),
momentum)
if len(ret) == 1:
return ret[0]
else:
return ret
if momentum is None:
momentum = call_fn(momentum_sample_fn,
tf.nest.map_structure(tf.zeros_like, state))
if target_log_prob is None:
target_log_prob, state_extra, state_grads = call_and_grads(
target_log_prob_fn, state)
kinetic_energy, _ = call_fn(kinetic_energy_fn, momentum)
current_energy = -target_log_prob + kinetic_energy
current_state = HamiltonianMonteCarloState(
state=state,
state_grads=state_grads,
state_extra=state_extra,
target_log_prob=target_log_prob)
def leapfrog_wrapper(leapfrog_state, target_log_prob, state_extra):
"""Leapfrog wrapper that tracks extra state."""
del target_log_prob
del state_extra
leapfrog_state, leapfrog_extra = leapfrog_step(
leapfrog_state,
step_size=step_size,
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
return [
leapfrog_state, leapfrog_extra.target_log_prob,
leapfrog_extra.state_extra
], leapfrog_extra
def leapfrog_trace_wrapper_fn(args, leapfrog_extra):
return leapfrog_trace_fn(args[0], leapfrog_extra)
leapfrog_wrapper_state = (LeapFrogStepState(state, state_grads, momentum),
target_log_prob, state_extra)
[[leapfrog_state, target_log_prob, state_extra], _], leapfrog_trace = trace(
leapfrog_wrapper_state,
leapfrog_wrapper,
num_leapfrog_steps,
trace_fn=leapfrog_trace_wrapper_fn)
kinetic_energy, _ = call_fn(kinetic_energy_fn, leapfrog_state.momentum)
proposed_energy = -target_log_prob + kinetic_energy
proposed_state = HamiltonianMonteCarloState(
state=leapfrog_state.state,
state_grads=leapfrog_state.state_grads,
target_log_prob=target_log_prob,
state_extra=state_extra)
energy_change = proposed_energy - current_energy
hmc_state, is_accepted, _ = metropolis_hastings_step(
current_state, proposed_state, energy_change, seed=seed)
hmc_state = hmc_state # type: HamiltonianMonteCarloState
return hmc_state, HamiltonianMonteCarloExtra(
is_accepted=is_accepted,
proposed_hmc_state=proposed_state,
log_accept_ratio=-energy_change,
leapfrog_trace=leapfrog_trace) | 0.005567 |
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]] | 0.005495 |
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
r2= R**2.+z**2.
r= nu.sqrt(r2)
return (1./r2*(1.-R**2./r2*(3.*self._a2+2.*r2)/(self._a2+r2))\
+self._a/r2/r*(3.*R**2./r2-1.)*nu.arctan(r/self._a))\
/self._a | 0.019787 |
def bestLabel(self, prefLanguage="en", qname_allowed=True, quotes=True):
"""
facility for extrating the best available label for an entity
..This checks RFDS.label, SKOS.prefLabel and finally the qname local component
"""
test = self.getValuesForProperty(rdflib.RDFS.label)
out = ""
if test:
out = firstEnglishStringInList(test)
else:
test = self.getValuesForProperty(rdflib.namespace.SKOS.prefLabel)
if test:
out = firstEnglishStringInList(test)
else:
if qname_allowed:
out = self.locale
if quotes and out:
return addQuotes(out)
else:
return out | 0.003979 |
def pull_request(self, number):
"""Get the pull request indicated by ``number``.
:param int number: (required), number of the pull request.
:returns: :class:`PullRequest <github3.pulls.PullRequest>`
"""
json = None
if int(number) > 0:
url = self._build_url('pulls', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return PullRequest(json, self) if json else None | 0.004283 |
def _process_fields(self):
"""Default info massage to appropiate format/style.
This processing is called on preprocess and postprocess, AKA
before and after conversion of fields to appropiate
format/style.
Perfect example: custom fields on certain objects is a mess
(IMHO) when retrieved from Mambu, so some easiness is
implemented here to access them. See some of this objects
modules and pydocs for further info.
Tasks done here:
- Each custom field is given a 'name' key that holds the field
name, and for each keyed name, the value of the custom field is
assigned. Each pair of custom field name/value is entered as a
new property on the main dictionary, allowing an easy access to
them, not nested inside a pretty dark 'customInformation/Values'
list.
- Every item on the attrs dictionary gets stripped from trailing
spaces (useful when users make typos).
PLEASE REMEMBER! whenever you call postprocess on inherited
classes you should call this method too, or else you lose the
effect of the tasks done here.
"""
try:
try:
if self.has_key(self.customFieldName):
self[self.customFieldName] = [ c for c in self[self.customFieldName] if c['customField']['state']!="DEACTIVATED" ]
for custom in self[self.customFieldName]:
field_name = custom['customField']['name']
field_id = custom['customField']['id']
if custom['customFieldSetGroupIndex'] != -1:
field_name += '_'+str(custom['customFieldSetGroupIndex'])
field_id += '_'+str(custom['customFieldSetGroupIndex'])
custom['name'] = field_name
custom['id'] = field_id
try:
self[field_name] = custom['value']
self[field_id] = custom['value']
except KeyError:
self[field_name] = custom['linkedEntityKeyValue']
self[field_id] = custom['linkedEntityKeyValue']
custom['value'] = custom['linkedEntityKeyValue']
# in case you don't have any customFieldName, don't do anything here
except (AttributeError, TypeError):
pass
for k,v in self.items():
try:
self[k] = v.strip()
except Exception:
pass
except NotImplementedError:
pass | 0.004378 |
def _resolve_requirements(self, requirements):
"""
Internal method for resolving requirements into resource configurations.
:param requirements: Resource requirements from test case configuration as dictionary.
:return: Empty list if dut_count cannot be resolved, or nothing
"""
try:
dut_count = requirements["duts"]["*"]["count"]
except KeyError:
return []
default_values = {
"type": "hardware",
"allowed_platforms": [],
"nick": None,
}
default_values.update(requirements["duts"]["*"])
del default_values["count"]
dut_keys = list(default_values.keys())
dut_keys.extend(["application", "location", "subtype"])
dut_requirements = self.__generate_indexed_requirements(dut_count,
default_values,
requirements)
# Match groups of duts defined with 1..40 notation.
for key in requirements["duts"].keys():
if not isinstance(key, string_types):
continue
match = re.search(r'([\d]{1,})\.\.([\d]{1,})', key)
if match:
first_dut_idx = int(match.group(1))
last_dut_idx = int(match.group(2))
for i in range(first_dut_idx, last_dut_idx+1):
for k in dut_keys:
if k in requirements["duts"][key]:
dut_requirements[i-1].set(k, copy.copy(requirements["duts"][key][k]))
for idx, req in enumerate(dut_requirements):
if isinstance(req.get("nick"), string_types):
nick = req.get("nick")
req.set("nick", ResourceConfig.__replace_base_variables(nick,
len(dut_requirements),
idx))
self._solve_location(req, len(dut_requirements), idx)
self._dut_requirements = dut_requirements
return None | 0.002751 |
def create_dockwidget(self):
"""Add to parent QMainWindow as a dock widget"""
# Creating dock widget
dock = SpyderDockWidget(self.get_plugin_title(), self.main)
# Set properties
dock.setObjectName(self.__class__.__name__+"_dw")
dock.setAllowedAreas(self.ALLOWED_AREAS)
dock.setFeatures(self.FEATURES)
dock.setWidget(self)
self.update_margins()
dock.visibilityChanged.connect(self.visibility_changed)
dock.topLevelChanged.connect(self.on_top_level_changed)
dock.sig_plugin_closed.connect(self.plugin_closed)
self.dockwidget = dock
if self.shortcut is not None:
sc = QShortcut(QKeySequence(self.shortcut), self.main,
self.switch_to_plugin)
self.register_shortcut(sc, "_", "Switch to %s" % self.CONF_SECTION)
return (dock, self.LOCATION) | 0.003308 |
def read_packet(self, timeout=3.0):
"""read one packet, timeout if one packet is not available in the timeout period"""
try:
return self.queue.get(timeout=timeout)
except Empty:
raise InternalTimeoutError("Timeout waiting for packet in AsyncPacketBuffer") | 0.013158 |
def setmin(self, window_name, object_name):
"""
Set min value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
object_handle.AXValue = 0
return 1 | 0.003478 |
def un_camel_case(text):
r"""
Splits apart words that are written in CamelCase.
Bugs:
- Non-ASCII characters are treated as lowercase letters, even if they are
actually capital letters.
Examples:
>>> un_camel_case('1984ZXSpectrumGames')
'1984 ZX Spectrum Games'
>>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA')
'aa Aa aa Aa A 0a A AA Aa! AAA'
>>> un_camel_case('MotörHead')
'Mot\xf6r Head'
>>> un_camel_case('MSWindows3.11ForWorkgroups')
'MS Windows 3.11 For Workgroups'
This should not significantly affect text that is not camel-cased:
>>> un_camel_case('ACM_Computing_Classification_System')
'ACM Computing Classification System'
>>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth')
'Anne Blunt, 15th Baroness Wentworth'
>>> un_camel_case('Hindi-Urdu')
'Hindi-Urdu'
"""
revtext = text[::-1]
pieces = []
while revtext:
match = CAMEL_RE.match(revtext)
if match:
pieces.append(match.group(1))
revtext = revtext[match.end():]
else:
pieces.append(revtext)
revtext = ''
revstr = ' '.join(piece.strip(' _') for piece in pieces
if piece.strip(' _'))
return revstr[::-1].replace('- ', '-') | 0.00077 |
def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train) | 0.011561 |
def parse_config_path(args=sys.argv):
"""
Preprocess sys.argv and extract --config argument.
"""
config = CONFIG_PATH
if '--config' in args:
idx = args.index('--config')
if len(args) > idx + 1:
config = args.pop(idx + 1)
args.pop(idx)
return config | 0.003236 |
def _handle_results(self, success, result, expected_failures=tuple()):
"""
Given a bool and a ResultSet (the form returned per result from
Connection.wait_for_responses), return a dictionary containing the
results. Used to process results from asynchronous queries to system
tables.
``expected_failures`` will usually be used to allow callers to ignore
``InvalidRequest`` errors caused by a missing system keyspace. For
example, some DSE versions report a 4.X server version, but do not have
virtual tables. Thus, running against 4.X servers, SchemaParserV4 uses
expected_failures to make a best-effort attempt to read those
keyspaces, but treat them as empty if they're not found.
:param success: A boolean representing whether or not the query
succeeded
:param result: The resultset in question.
:expected_failures: An Exception class or an iterable thereof. If the
query failed, but raised an instance of an expected failure class, this
will ignore the failure and return an empty list.
"""
if not success and isinstance(result, expected_failures):
return []
elif success:
return dict_factory(*result.results) if result else []
else:
raise result | 0.001476 |
def ReadArtifactDefinitionValues(self, artifact_definition_values):
"""Reads an artifact definition from a dictionary.
Args:
artifact_definition_values (dict[str, object]): artifact definition
values.
Returns:
ArtifactDefinition: an artifact definition.
Raises:
FormatError: if the format of the artifact definition is not set
or incorrect.
"""
if not artifact_definition_values:
raise errors.FormatError('Missing artifact definition values.')
different_keys = (
set(artifact_definition_values) - definitions.TOP_LEVEL_KEYS)
if different_keys:
different_keys = ', '.join(different_keys)
raise errors.FormatError('Undefined keys: {0:s}'.format(different_keys))
name = artifact_definition_values.get('name', None)
if not name:
raise errors.FormatError('Invalid artifact definition missing name.')
# The description is assumed to be mandatory.
description = artifact_definition_values.get('doc', None)
if not description:
raise errors.FormatError(
'Invalid artifact definition: {0:s} missing description.'.format(
name))
artifact_definition = artifact.ArtifactDefinition(
name, description=description)
if artifact_definition_values.get('collectors', []):
raise errors.FormatError(
'Invalid artifact definition: {0:s} still uses collectors.'.format(
name))
urls = artifact_definition_values.get('urls', [])
if not isinstance(urls, list):
raise errors.FormatError(
'Invalid artifact definition: {0:s} urls is not a list.'.format(
name))
# TODO: check conditions.
artifact_definition.conditions = artifact_definition_values.get(
'conditions', [])
artifact_definition.provides = artifact_definition_values.get(
'provides', [])
self._ReadLabels(artifact_definition_values, artifact_definition, name)
self._ReadSupportedOS(artifact_definition_values, artifact_definition, name)
artifact_definition.urls = urls
self._ReadSources(artifact_definition_values, artifact_definition, name)
return artifact_definition | 0.004098 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.