text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def parse_genemap2(lines):
"""Parse the omim source file called genemap2.txt
Explanation of Phenotype field:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations that
lead to apparently abnormal laboratory test values.
Braces, "{ }", indicate mutations that contribute to susceptibility to
multifactorial disorders (e.g., diabetes, asthma) or to susceptibility
to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
Args:
lines(iterable(str))
Yields:
parsed_entry(dict)
"""
LOG.info("Parsing the omim genemap2")
header = []
for i,line in enumerate(lines):
line = line.rstrip()
if line.startswith('#'):
if i < 10:
if line.startswith('# Chromosome'):
header = line[2:].split('\t')
continue
if len(line) < 5:
continue
parsed_entry = parse_omim_line(line, header)
parsed_entry['mim_number'] = int(parsed_entry['Mim Number'])
parsed_entry['raw'] = line
# This is the approved symbol for the entry
hgnc_symbol = parsed_entry.get("Approved Symbol")
# If no approved symbol could be found choose the first of
# the gene symbols
gene_symbols = []
if parsed_entry.get('Gene Symbols'):
gene_symbols = [symbol.strip() for symbol in parsed_entry['Gene Symbols'].split(',')]
parsed_entry['hgnc_symbols'] = gene_symbols
if not hgnc_symbol and gene_symbols:
hgnc_symbol = gene_symbols[0]
parsed_entry['hgnc_symbol'] = hgnc_symbol
# Gene inheritance is a construct. It is the union of all inheritance
# patterns found in the associated phenotypes
gene_inheritance = set()
parsed_phenotypes = []
# Information about the related phenotypes
# Each related phenotype is separated by ';'
for phenotype_info in parsed_entry.get('Phenotypes', '').split(';'):
if not phenotype_info:
continue
phenotype_info = phenotype_info.lstrip()
# First symbol in description indicates phenotype status
# If no special symbol is used the phenotype is 'established'
phenotype_status = OMIM_STATUS_MAP.get(phenotype_info[0], 'established')
# Skip phenotype entries that not associated to disease
if phenotype_status == 'nondisease':
continue
phenotype_description = ""
# We will try to save the description
splitted_info = phenotype_info.split(',')
for i, text in enumerate(splitted_info):
# Everything before ([1,2,3])
# We check if we are in the part where the mim number exists
match = entry_pattern.search(text)
if not match:
phenotype_description += text
else:
# If we find the end of the entry
mimnr_match = mimnr_pattern.search(phenotype_info)
# Then if the entry have a mim number we choose that
if mimnr_match:
phenotype_mim = int(mimnr_match.group())
else:
phenotype_mim = parsed_entry['mim_number']
phenotype_description += text[:-4]
break
# Find the inheritance
inheritance = set()
inheritance_text = ','.join(splitted_info[i:])
for term in mim_inheritance_terms:
if term in inheritance_text:
inheritance.add(TERMS_MAPPER[term])
gene_inheritance.add(TERMS_MAPPER[term])
parsed_phenotypes.append(
{
'mim_number':phenotype_mim,
'inheritance': inheritance,
'description': phenotype_description.strip('?\{\}'),
'status': phenotype_status,
}
)
parsed_entry['phenotypes'] = parsed_phenotypes
parsed_entry['inheritance'] = gene_inheritance
yield parsed_entry | 0.007225 |
def root_item_selected(self, item):
"""Root item has been selected: expanding it and collapsing others"""
if self.show_all_files:
return
for root_item in self.get_top_level_items():
if root_item is item:
self.expandItem(root_item)
else:
self.collapseItem(root_item) | 0.005479 |
def format(self, record):
"""
Formats a given log record to include the timestamp, log level, thread
ID and message. Colorized if coloring is available.
"""
if not self.is_tty:
return super(CLIHandler, self).format(record)
level_abbrev = record.levelname[0]
time_and_level = color_string(
color_for_level(record.levelno),
"[%(asctime)s " + level_abbrev + "]"
)
thread = color_string(
color_for_thread(record.thread),
"[%(threadName)s]"
)
formatter = logging.Formatter(
time_and_level + thread + " %(message)s", "%Y-%m-%d %H:%M:%S"
)
return formatter.format(record) | 0.002688 |
def _from_p(self, mode):
"""Convert the image from P or PA to RGB or RGBA."""
self._check_modes(("P", "PA"))
if not self.palette:
raise RuntimeError("Can't convert palettized image, missing palette.")
pal = np.array(self.palette)
pal = da.from_array(pal, chunks=pal.shape)
if pal.shape[1] == 4:
# colormap's alpha overrides data alpha
mode = "RGBA"
alpha = None
elif self.mode.endswith("A"):
# add a new/fake 'bands' dimension to the end
alpha = self.data.sel(bands="A").data[..., None]
mode = mode + "A" if not mode.endswith("A") else mode
else:
alpha = None
flat_indexes = self.data.sel(bands='P').data.ravel().astype('int64')
dim_sizes = ((key, val) for key, val in self.data.sizes.items() if key != 'bands')
dims, new_shape = zip(*dim_sizes)
dims = dims + ('bands',)
new_shape = new_shape + (pal.shape[1],)
new_data = pal[flat_indexes].reshape(new_shape)
coords = dict(self.data.coords)
coords["bands"] = list(mode)
if alpha is not None:
new_arr = da.concatenate((new_data, alpha), axis=-1)
data = xr.DataArray(new_arr, coords=coords, attrs=self.data.attrs, dims=dims)
else:
data = xr.DataArray(new_data, coords=coords, attrs=self.data.attrs, dims=dims)
return data | 0.004115 |
async def delete(self):
"""Delete boot source selection."""
await self._handler.delete(
boot_source_id=self.boot_source.id, id=self.id) | 0.01227 |
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes | 0.004702 |
def pkt_check(*args, func=None):
"""Check if arguments are valid packets."""
func = func or inspect.stack()[2][3]
for var in args:
dict_check(var, func=func)
dict_check(var.get('frame'), func=func)
enum_check(var.get('protocol'), func=func)
real_check(var.get('timestamp'), func=func)
ip_check(var.get('src'), var.get('dst'), func=func)
bool_check(var.get('syn'), var.get('fin'), func=func)
int_check(var.get('srcport'), var.get('dstport'), var.get('index'), func=func) | 0.003724 |
def maybeReceiveAck(self, ackPacket):
"""
Receive an L{ack} or L{synAck} input from the given packet.
"""
ackPredicate = self.ackPredicate
self.ackPredicate = lambda packet: False
if ackPacket.syn:
# New SYN packets are always news.
self.synAck()
return
if ackPredicate(ackPacket):
self.ack() | 0.005051 |
def DiffArrayObjects(self, oldObj, newObj, isElementLinks=False):
"""Method which deligates the diffing of arrays based on the type"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
return False
if len(oldObj) != len(newObj):
__Log__.debug('DiffArrayObjects: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
firstObj = oldObj[0]
if IsPrimitiveType(firstObj):
return self.DiffPrimitiveArrays(oldObj, newObj)
elif isinstance(firstObj, types.ManagedObject):
return self.DiffAnyArrays(oldObj, newObj, isElementLinks)
elif isinstance(firstObj, types.DataObject):
return self.DiffDoArrays(oldObj, newObj, isElementLinks)
else:
raise TypeError("Unknown type: %s" % oldObj.__class__) | 0.023283 |
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj) | 0.00491 |
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function.
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super()._downsample(how, **kwargs)
how = self._is_cython_func(how) or how
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# Downsampling
return self._groupby_and_aggregate(how, grouper=self.grouper,
**kwargs)
elif is_superperiod(ax.freq, self.freq):
if how == 'ohlc':
# GH #13083
# upsampling to subperiods is handled as an asfreq, which works
# for pure aggregating/reducing methods
# OHLC reduces along the time dimension, but creates multiple
# values for each period -> handle by _groupby_and_aggregate()
return self._groupby_and_aggregate(how, grouper=self.grouper)
return self.asfreq()
elif ax.freq == self.freq:
return self.asfreq()
raise IncompatibleFrequency(
'Frequency {} cannot be resampled to {}, as they are not '
'sub or super periods'.format(ax.freq, self.freq)) | 0.001402 |
def create_queue(self, vhost, name, **kwargs):
"""
Create a queue. The API documentation specifies that all of the body
elements are optional, so this method only requires arguments needed
to form the URI
:param string vhost: The vhost to create the queue in.
:param string name: The name of the queue
More on these operations can be found at:
http://www.rabbitmq.com/amqp-0-9-1-reference.html
"""
vhost = quote(vhost, '')
name = quote(name, '')
path = Client.urls['queues_by_name'] % (vhost, name)
body = json.dumps(kwargs)
return self._call(path,
'PUT',
body,
headers=Client.json_headers) | 0.003717 |
def run_ppm_server(pdb_file, outfile, force_rerun=False):
"""Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
url = 'http://sunshine.phar.umich.edu/upload_file.php'
files = {'userfile': open(pdb_file, 'rb')}
r = requests.post(url, files=files)
info = r.text
# Save results in raw HTML format
with open(outfile, 'w') as f:
f.write(info)
else:
# Utilize existing saved results
with open(outfile, 'r') as f:
info = f.read()
# Clean up the HTML stuff
t = info.replace('\n', '')
tt = t.replace('\r', '')
ttt = tt.replace('\t', '')
soup = BeautifulSoup(ttt, "lxml")
# Find all tables in the HTML code
tables = soup.find_all("table", attrs={"class": "data"})
info_dict = {}
# There are multiple tables with information
table_index = 0
for t in tables:
data_index = 0
# "row1" contains data
for data in t.find_all('tr', attrs={"class": "row1"}):
data_list = list(data.strings)
if table_index == 0:
info_dict['Depth/Hydrophobic Thickness'] = data_list[0]
info_dict['deltaG_transfer'] = data_list[2]
info_dict['Tilt Angle'] = data_list[3]
if table_index == 1 and data_index == 0:
info_dict['Embedded_residues_Tilt'] = data_list[0]
info_dict['Embedded_residues'] = data_list[1]
if table_index == 1 and data_index == 1:
info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]
info_dict['Transmembrane_secondary_structure_segments'] = data_list[1]
if table_index == 2:
info_dict['Output Messages'] = data_list[1]
if table_index == 3:
baseurl = 'http://sunshine.phar.umich.edu/'
a = data.find('a', href=True)
download_url = baseurl + a['href'].replace('./', '')
info_dict['Output file download link'] = download_url
data_index += 1
table_index += 1
return info_dict | 0.001965 |
def add_mea(mea_yaml_path):
'''Adds the mea design defined by the yaml file in the install folder
Parameters
----------
mea_yaml_file
Returns
-------
'''
path = os.path.abspath(mea_yaml_path)
if path.endswith('.yaml') or path.endswith('.yml') and os.path.isfile(path):
with open(path, 'r') as meafile:
if use_loader:
elinfo = yaml.load(meafile, Loader=yaml.FullLoader)
else:
elinfo = yaml.load(meafile)
if 'pos' not in elinfo.keys():
if 'dim' in elinfo.keys():
if elinfo['dim'] != 1 and 'pitch' not in elinfo.keys():
raise AttributeError("The yaml file should contin either a list of 3d or 2d positions 'pos' or "
"intormation about dimension and pitch ('dim' and 'pitch')")
else:
raise AttributeError("The yaml file should contin either a list of 3d or 2d positions 'pos' or "
"intormation about dimension and pitch ('dim' and 'pitch') - unless dim=1")
this_dir, this_filename = os.path.split(__file__)
shutil.copy(path, os.path.join(this_dir, 'electrodes'))
if path.endswith('.yaml'):
electrodes = [f[:-5] for f in os.listdir(os.path.join(this_dir, "electrodes"))]
elif path.endswith('.yml'):
electrodes = [f[:-4] for f in os.listdir(os.path.join(this_dir, "electrodes"))]
print('Available MEA: \n', electrodes)
return | 0.005028 |
def change_type(self, bucket, key, storage_type):
"""修改文件的存储类型
修改文件的存储类型为普通存储或者是低频存储,参考文档:
https://developer.qiniu.com/kodo/api/3710/modify-the-file-type
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为普通存储,1为低频存储
"""
resource = entry(bucket, key)
return self.__rs_do('chtype', resource, 'type/{0}'.format(storage_type)) | 0.006652 |
def join(self, other, on=None, how='left', lsuffix=None, rsuffix=None,
algorithm='merge', is_on_sorted=True, is_on_unique=True):
"""Database-like join this DataFrame with the other DataFrame.
Currently assumes the `on` columns are sorted and the on-column(s) values are unique!
Next work handles the other cases.
Note there's no automatic cast if the type of the on columns differs.
Check DataFrame.merge() for more details.
Parameters
----------
other : DataFrame
With which to merge.
on : str or list or None, optional
The columns from both DataFrames on which to join.
If None, will join on the index if it has the same name.
how : {'inner', 'left', 'right', 'outer'}, optional
Which kind of join to do.
lsuffix : str, optional
Suffix to use on columns that overlap from self.
rsuffix : str, optional
Suffix to use on columns that overlap from other.
algorithm : {'merge', 'hash'}, optional
Which algorithm to use. Note that for 'hash', the `other` DataFrame is the one hashed.
is_on_sorted : bool, optional
If we know that the on columns are already sorted, can employ faster algorithm.
is_on_unique : bool, optional
If we know that the values are unique, can employ faster algorithm.
Returns
-------
DataFrame
DataFrame containing the merge result, with the `on` columns as index.
"""
check_type(lsuffix, str)
check_type(rsuffix, str)
self_names = self._gather_column_names()
other_names = other._gather_column_names()
common_names = set(self_names).intersection(set(other_names))
if len(common_names) > 0 and lsuffix is None and rsuffix is None:
raise ValueError('Columns overlap but no suffixes supplied')
# need to ensure that some str suffixes are passed to merge
lsuffix = '' if lsuffix is None else lsuffix
rsuffix = '' if rsuffix is None else rsuffix
# TODO: pandas is more flexible, e.g. allows the index names to be different when joining on index
# TODO i.e. df(ind + a, b) join df(ind2 + b, c) does work and the index is now called ind
return self.merge(other, how, on, (lsuffix, rsuffix), algorithm, is_on_sorted, is_on_unique) | 0.004087 |
def update(name=None,
pkgs=None,
refresh=True,
skip_verify=False,
normalize=True,
minimal=False,
obsoletes=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Calls :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` with
``obsoletes=False``. Mirrors the CLI behavior of ``yum update``.
See :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` for
further documentation.
.. code-block:: bash
salt '*' pkg.update
'''
return upgrade(name, pkgs, refresh, skip_verify, normalize, minimal, obsoletes, **kwargs) | 0.00319 |
def add_tileset(self, tileset):
""" Add a tileset to the map
:param tileset: TiledTileset
"""
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) | 0.009662 |
def convert_elementwise_sub(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert elementwise subtraction.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting elementwise_sub ...')
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if names == 'short':
tf_name = 'S' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
sub = keras.layers.Subtract(name=tf_name)
layers[scope_name] = sub([model0, model1]) | 0.001205 |
def get_live_weather(lat, lon, writer):
"""Gets the live weather via lat and long"""
requrl = FORECAST_BASE_URL+forecast_api_token+'/'+str(lat)+','+str(lon)
req = requests.get(requrl)
if req.status_code == requests.codes.ok:
weather = req.json()
if not weather['currently']:
click.secho("No live weather currently", fg="red", bold=True)
return
writer.live_weather(weather)
else:
click.secho("There was problem getting live weather", fg="red", bold=True) | 0.003781 |
def _get_queries(self, migration, method):
"""
Get all of the queries that would be run for a migration.
:param migration: The migration
:type migration: eloquent.migrations.migration.Migration
:param method: The method to execute
:type method: str
:rtype: list
"""
connection = migration.get_connection()
db = self.resolve_connection(connection)
logger = logging.getLogger('eloquent.connection.queries')
level = logger.level
logger.setLevel(logging.DEBUG)
handler = MigratorHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
db.pretend(lambda _: getattr(migration, method)())
logger.removeHandler(handler)
logger.setLevel(level)
return handler.queries | 0.002387 |
def lookup(self, request_class: Request) -> Callable[[Request], BrightsideMessage]:
"""
Looks up the message mapper function associated with this class. Function should take in a Request derived class
and return a BrightsideMessage derived class, for sending on the wire
:param request_class:
:return:
"""
key = request_class.__class__.__name__
if key not in self._registry:
raise ConfigurationException("There is no message mapper associated with this key; we require a mapper")
else:
return self._registry[key] | 0.00821 |
def main(ctx, config, debug): # pragma: no cover
""" gilt - A GIT layering tool. """
ctx.obj = {}
ctx.obj['args'] = {}
ctx.obj['args']['debug'] = debug
ctx.obj['args']['config'] = config | 0.004831 |
def check_par(chrom, pos):
"""Check if a coordinate is in the PAR region
Args:
chrom(str)
pos(int)
Returns:
par(bool)
"""
par = False
for interval in PAR.get(chrom,[]):
if (pos >= interval[0] and pos <= interval[1]):
par = True
return par | 0.017143 |
def evaluate(self,scope,local_vars,block=None):
''' Execute the compiled template and return the result string. Template
evaluation is guaranteed to be performed in the scope object with the
locals specified and with support for yielding to the block.
This method is only used by source generating templates. Subclasses that
override render() may not support all features.
'''
method = self.compiled_method(local_vars.keys())
setattr(scope ,'compiled',method)
return scope.compiled(local_vars,block=block) | 0.044444 |
def save_settings(cls, project=None, user=None, settings=None):
"""
Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved.
"""
if (isinstance(settings, dict)):
_to_update = settings
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
cls.http_post(
'update_settings',
json={
'project_preferences': {
'user_id': _user_id,
'project_id': _project_id,
'settings': _to_update,
}
}
)
else:
raise TypeError | 0.001457 |
def registerSimulator(self, name=None, hdl=None, analyze_cmd=None, elaborate_cmd=None, simulate_cmd=None):
''' Registers an HDL _simulator
name - str, user defined name, used to identify this _simulator record
hdl - str, case insensitive, (verilog, vhdl), the HDL to which the simulated MyHDL code will be converted
analyze_cmd - str, system command that will be run to analyze the generated HDL
elaborate_cmd - str, optional, system command that will be run after the analyze phase
simulate_cmd - str, system command that will be run to simulate the analyzed and elaborated design
Before execution of a command string the following substitutions take place:
{topname} is substituted with the name of the simulated MyHDL function
'''
if not isinstance(name, str) or (name.strip() == ""):
raise ValueError("Invalid _simulator name")
if hdl.lower() not in ("vhdl", "verilog"):
raise ValueError("Invalid hdl {}".format(hdl))
if not isinstance(analyze_cmd, str) or (analyze_cmd.strip() == ""):
raise ValueError("Invalid analyzer command")
if elaborate_cmd is not None:
if not isinstance(elaborate_cmd, str) or (elaborate_cmd.strip() == ""):
raise ValueError("Invalid elaborate_cmd command")
if not isinstance(simulate_cmd, str) or (simulate_cmd.strip() == ""):
raise ValueError("Invalid _simulator command")
self.sim_reg[name] = (hdl.lower(), analyze_cmd, elaborate_cmd, simulate_cmd) | 0.00733 |
def _format_list(result):
"""Format list responses into a table."""
if not result:
return result
if isinstance(result[0], dict):
return _format_list_objects(result)
table = Table(['value'])
for item in result:
table.add_row([iter_to_table(item)])
return table | 0.003226 |
def _getPayload(self, record):
"""
The data that will be sent to loggly.
"""
payload = super(LogglyHandler, self)._getPayload(record)
payload['tags'] = self._implodeTags()
return payload | 0.008511 |
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
visit_type = event_values.get('visit_type', 0)
transition = self._URL_TRANSITIONS.get(visit_type, None)
if transition:
transition_str = 'Transition: {0!s}'.format(transition)
extra = event_values.get('extra', None)
if extra:
if transition:
extra.append(transition_str)
event_values['extra_string'] = ' '.join(extra)
elif transition:
event_values['extra_string'] = transition_str
return self._ConditionalFormatMessages(event_values) | 0.00495 |
def escape_tags(value, valid_tags):
"""
Strips text from the given html string, leaving only tags.
This functionality requires BeautifulSoup, nothing will be
done otherwise.
This isn't perfect. Someone could put javascript in here:
<a onClick="alert('hi');">test</a>
So if you use valid_tags, you still need to trust your data entry.
Or we could try:
- only escape the non matching bits
- use BeautifulSoup to understand the elements, escape everything
else and remove potentially harmful attributes (onClick).
- Remove this feature entirely. Half-escaping things securely is
very difficult, developers should not be lured into a false
sense of security.
"""
# 1. escape everything
value = conditional_escape(value)
# 2. Reenable certain tags
if valid_tags:
# TODO: precompile somewhere once?
tag_re = re.compile(r'<(\s*/?\s*(%s))(.*?\s*)>' %
'|'.join(re.escape(tag) for tag in valid_tags))
value = tag_re.sub(_replace_quot, value)
# Allow comments to be hidden
value = value.replace("<!--", "<!--").replace("-->", "-->")
return mark_safe(value) | 0.000796 |
def configure (command = None, condition = None, options = None):
"""
Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found.
"""
rc_type = feature.get_values('<rc-type>', options)
if rc_type:
assert(len(rc_type) == 1)
rc_type = rc_type[0]
if command and condition and rc_type:
flags('rc.compile.resource', '.RC', condition, command)
flags('rc.compile.resource', '.RC_TYPE', condition, [rc_type.lower()])
flags('rc.compile.resource', 'DEFINES', [], ['<define>'])
flags('rc.compile.resource', 'INCLUDES', [], ['<include>'])
if debug():
print 'notice: using rc compiler ::', condition, '::', command | 0.008709 |
def generate(self, mA=1, age=9.6, feh=0.0, n=1e5, ichrone='mist',
orbpop=None, bands=None, **kwargs):
"""
Generates population.
Called if :class:`MultipleStarPopulation` is initialized without
providing ``stars``, and if ``mA`` is provided.
"""
ichrone = get_ichrone(ichrone, bands=bands)
n = int(n)
#star with m1 orbits (m2+m3). So mA (most massive)
# will correspond to either m1 or m2.
m1, m2, m3 = self.multmass_fn(mA, f_binary=self.f_binary,
f_triple=self.f_triple,
qmin=self.qmin, minmass=self.minmass,
n=n)
#reset n if need be
n = len(m1)
feh = np.ascontiguousarray(np.atleast_1d(feh))
age = np.ascontiguousarray(age)
#generate stellar properties
primary = ichrone(np.ascontiguousarray(m1), age, feh,
bands=bands)
secondary = ichrone(np.ascontiguousarray(m2),age,feh,
bands=bands)
tertiary = ichrone(np.ascontiguousarray(m3),age,feh,
bands=bands)
#clean up columns that become nan when called with mass=0
# Remember, we want mass=0 and mags=inf when something doesn't exist
no_secondary = (m2==0)
no_tertiary = (m3==0)
for c in secondary.columns: #
if re.search('_mag',c):
secondary[c][no_secondary] = np.inf
tertiary[c][no_tertiary] = np.inf
secondary['mass'][no_secondary] = 0
tertiary['mass'][no_tertiary] = 0
if kwargs['period_short'] is None:
if kwargs['period_long'] is None:
period_1 = self.period_long_fn(n)
period_2 = self.period_short_fn(n)
kwargs['period_short'] = np.minimum(period_1, period_2)
kwargs['period_long'] = np.maximum(period_1, period_2)
else:
kwargs['period_short'] = self.period_short_fn(n)
#correct any short periods that are longer than period_long
bad = kwargs['period_short'] > kwargs['period_long']
n_bad = bad.sum()
good_inds = np.where(~bad)[0]
inds = np.random.randint(len(good_inds),size=n_bad)
kwargs['period_short'][bad] = \
kwargs['period_short'][good_inds[inds]]
else:
if kwargs['period_long'] is None:
kwargs['period_long'] = self.period_long_fn(n)
#correct any long periods that are shorter than period_short
bad = kwargs['period_long'] < kwargs['period_short']
n_bad = bad.sum()
good_inds = np.where(~bad)[0]
inds = np.random.randint(len(good_inds),size=n_bad)
kwargs['period_long'][bad] = \
kwargs['period_long'][good_inds[inds]]
if 'ecc_short' not in kwargs:
kwargs['ecc_short'] = self.ecc_fn(n, kwargs['period_short'])
if 'ecc_long' not in kwargs:
kwargs['ecc_long'] = self.ecc_fn(n, kwargs['period_long'])
TriplePopulation.__init__(self, primary=primary,
secondary=secondary, tertiary=tertiary,
orbpop=orbpop, **kwargs)
return self | 0.005496 |
def _get_attr_by_name_and_dimension(name, dimension_id):
"""
Search for an attribute with the given name and dimension_id.
If such an attribute does not exist, create one.
"""
attr = db.DBSession.query(Attr).filter(Attr.name==name, Attr.dimension_id==dimension_id).first()
if attr is None:
# In this case the attr does not exists so we must create it
attr = Attr()
attr.dimension_id = dimension_id
attr.name = name
log.debug("Attribute not found, creating new attribute: name:%s, dimen:%s",
attr.name, attr.dimension_id)
db.DBSession.add(attr)
return attr | 0.011887 |
def Brkic_2011_1(Re, eD):
r'''Calculates Darcy friction factor using the method in Brkic
(2011) [2]_ as shown in [1]_.
.. math::
f_d = [-2\log(10^{-0.4343\beta} + \frac{\epsilon}{3.71D})]^{-2}
.. math::
\beta = \ln \frac{Re}{1.816\ln\left(\frac{1.1Re}{\ln(1+1.1Re)}\right)}
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
No range of validity specified for this equation.
Examples
--------
>>> Brkic_2011_1(1E5, 1E-4)
0.01812455874141297
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Brkic, Dejan."Review of Explicit Approximations to the Colebrook
Relation for Flow Friction." Journal of Petroleum Science and
Engineering 77, no. 1 (April 2011): 34-48.
doi:10.1016/j.petrol.2011.02.006.
'''
beta = log(Re/(1.816*log(1.1*Re/log(1+1.1*Re))))
return (-2*log10(10**(-0.4343*beta)+eD/3.71))**-2 | 0.000773 |
def filter_by_months_per_hour(self, months_per_hour):
"""Filter the Data Collection based on a list of months per hour (as strings).
Args:
months_per_hour: A list of tuples representing months per hour.
Each tuple should possess two values: the first is the month
and the second is the hour. (eg. (12, 23) = December at 11 PM)
Return:
A new Data Collection with filtered data
"""
_filt_values = []
_filt_datetimes = []
for i, d in enumerate(self.datetimes):
if d in months_per_hour:
_filt_datetimes.append(d)
_filt_values.append(self._values[i])
return MonthlyPerHourCollection(
self.header.duplicate(), _filt_values, _filt_datetimes) | 0.003708 |
def get_option_int(self, name, section=None, vars=None, expect=None):
"""Just like ``get_option`` but parse as an integer."""
val = self.get_option(name, section, vars, expect)
if val:
return int(val) | 0.008475 |
def write_ln(self, *text, sep=' '):
"""
Write line
:param text:
:param sep:
:return:
"""
if self.text and self.text[-1] != '\n':
self.text += '\n'
self.text += markdown.text(*text, sep) + '\n'
return self | 0.00692 |
def run(self, host: str="localhost", port: int=8000, debug: bool=False):
"""
start the http server
:param host: The listening host
:param port: The listening port
:param debug: whether it is in debug mod or not
"""
self.debug = debug
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self.start_server(host, port))
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(self.signal_manager.activate('before_close'))
loop.run_until_complete(self.close_server_async())
loop.run_until_complete(self.signal_manager.activate('after_close'))
loop.run_until_complete(asyncio.gather(*asyncio.Task.all_tasks()))
loop.close() | 0.012422 |
def _ParseValueData(self, parser_mediator, registry_key, registry_value):
"""Extracts event objects from a Explorer ProgramsCache value data.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Raises:
ParseError: if the value data could not be parsed.
"""
value_data = registry_value.data
value_data_size = len(value_data)
if value_data_size < 4:
return
header_map = self._GetDataTypeMap('programscache_header')
try:
header = self._ReadStructureFromByteStream(
value_data, 0, header_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse header value with error: {0!s}'.format(
exception))
return
if header.format_version not in (1, 9, 12, 19):
parser_mediator.ProduceExtractionWarning(
'unsupported format version: {0:d}'.format(header.format_version))
return
known_folder_identifier = None
if header.format_version == 1:
value_data_offset = 8
elif header.format_version == 9:
value_data_offset = 6
elif header.format_version in (12, 19):
known_folder_identifier = uuid.UUID(bytes_le=value_data[4:20])
value_data_offset = 20
entry_header_map = self._GetDataTypeMap('programscache_entry_header')
entry_footer_map = self._GetDataTypeMap('programscache_entry_footer')
sentinel = 0
if header.format_version != 9:
try:
entry_footer = self._ReadStructureFromByteStream(
value_data[value_data_offset:], value_data_offset, entry_footer_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse sentinel at offset: 0x{0:08x} '
'with error: {1!s}').format(value_data_offset, exception))
return
value_data_offset += entry_footer_map.GetByteSize()
sentinel = entry_footer.sentinel
link_targets = []
while sentinel in (0x00, 0x01):
if value_data_offset >= value_data_size:
break
try:
entry_header = self._ReadStructureFromByteStream(
value_data[value_data_offset:], value_data_offset, entry_header_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse entry header at offset: 0x{0:08x} '
'with error: {1!s}').format(value_data_offset, exception))
break
value_data_offset += entry_header_map.GetByteSize()
display_name = '{0:s} {1:s}'.format(
registry_key.path, registry_value.name)
shell_items_parser = shell_items.ShellItemsParser(display_name)
shell_items_parser.ParseByteStream(
parser_mediator, value_data[value_data_offset:],
codepage=parser_mediator.codepage)
link_target = shell_items_parser.CopyToPath()
link_targets.append(link_target)
value_data_offset += entry_header.data_size
try:
entry_footer = self._ReadStructureFromByteStream(
value_data[value_data_offset:], value_data_offset, entry_footer_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse entry footer at offset: 0x{0:08x} '
'with error: {1!s}').format(value_data_offset, exception))
return
value_data_offset += entry_footer_map.GetByteSize()
sentinel = entry_footer.sentinel
# TODO: recover remaining items.
if known_folder_identifier:
known_folder_identifier = '{0!s}'.format(known_folder_identifier)
event_data = windows_events.WindowsRegistryListEventData()
event_data.key_path = registry_key.path
event_data.known_folder_identifier = known_folder_identifier
event_data.list_name = registry_value.name
event_data.list_values = ' '.join([
'{0:d}: {1:s}'.format(index, link_target)
for index, link_target in enumerate(link_targets)])
event_data.value_name = registry_value.name
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.007312 |
def report(usaf):
"""generate report for usaf base"""
fig = plt.figure()
ax = fig.add_subplot(111)
station_info = geo.station_info(usaf)
y = {}
for i in range(1991, 2011):
monthData = monthly(usaf, i)
t = sum(monthData)
y[i] = t
print t
tmy3tot = tmy3.total(usaf)
average = sum([v for k, v in y.items()])/20.
s = sorted(y.items(), key=lambda t: t[1])
o = sorted(y.items(), key=lambda t: t[0])
twohigh = s[-1][1] + s[-2][1]
twolow = s[0][1] + s[1][1]
mintol = 1-twolow/2./average
plustol = twohigh/2./average-1
txt = ""
txt += "%s\n" % station_info['Site Name']
txt += 'TMY3/hist: %s/' % int(round(tmy3tot))
txt += '%s\n' % int(round(average))
txt += "high/low av: %s/" % int(round(twohigh/2.))
txt += "%s\n" % int(round(twolow/2.))
txt += "+%s/-%s%% " % (round(plustol*100, 0), round(mintol*100, 0))
txt += "(-%s%% of TMY3)" % round((1-twolow/2./tmy3tot)*100, 0)
print txt
x = np.array([k for k, v in o])
y = np.array([v for k, v in o])
rx = x[1:]
ry = [(v + y[i+1])/2 for i, v in enumerate(y[:-1])]
fit = pylab.polyfit(x, y, 3)
fit_fn = pylab.poly1d(fit)
f = interp1d(x, y, kind='cubic')
f2 = interp1d(rx, ry, kind='cubic')
xnew = np.linspace(min(x), max(x), 200)
x2 = np.linspace(min(rx), max(rx), 200)
# ax.plot(x,y)
ax.plot(xnew, f(xnew), label="Annual GHI")
ax.plot(xnew, fit_fn(xnew), label='trendline')
ax.plot(x2, f2(x2), label='2 Year Ave')
ax.plot([min(x), max(x)], [tmy3tot, tmy3tot], linestyle='--')
leg = plt.legend(title=txt, loc=4, fancybox=True)
leg.get_frame().set_alpha(0.5)
# fig.text(min(x),min(y)-min(y)*.1,txt)
# fig.text(.1,.1,txt)
plt.tight_layout()
fig.savefig('%s_annual_GHI.pdf' % (usaf), format='pdf') | 0.000544 |
def angle(self, other):
"""Return the angle to the vector other"""
return math.acos(self.dot(other) / (self.magnitude() * other.magnitude())) | 0.019108 |
def check_sim_in(self):
'''check for FDM packets from runsim'''
try:
pkt = self.sim_in.recv(17*8 + 4)
except socket.error as e:
if not e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
raise
return
if len(pkt) != 17*8 + 4:
# wrong size, discard it
print("wrong size %u" % len(pkt))
return
(latitude, longitude, altitude, heading, v_north, v_east, v_down,
ax, ay, az,
phidot, thetadot, psidot,
roll, pitch, yaw,
vcas, check) = struct.unpack('<17dI', pkt)
(p, q, r) = self.convert_body_frame(radians(roll), radians(pitch), radians(phidot), radians(thetadot), radians(psidot))
try:
self.hil_state_msg = self.master.mav.hil_state_encode(int(time.time()*1e6),
radians(roll),
radians(pitch),
radians(yaw),
p,
q,
r,
int(latitude*1.0e7),
int(longitude*1.0e7),
int(altitude*1.0e3),
int(v_north*100),
int(v_east*100),
0,
int(ax*1000/9.81),
int(ay*1000/9.81),
int(az*1000/9.81))
except Exception:
return | 0.015117 |
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result | 0.000962 |
def _add_res(line):
'''
Analyse the line of local resource of ``drbdadm status``
'''
global resource
fields = line.strip().split()
if resource:
ret.append(resource)
resource = {}
resource["resource name"] = fields[0]
resource["local role"] = fields[1].split(":")[1]
resource["local volumes"] = []
resource["peer nodes"] = [] | 0.002618 |
def search(self):
"""Search for a url by returning the value from the first callback that
returns a non-None value"""
for cb in SearchUrl.search_callbacks:
try:
v = cb(self)
if v is not None:
return v
except Exception as e:
raise | 0.005764 |
def iter_chunks(l, size):
"""
Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*
smaller than 1 results in no chunking at all.
"""
if isinstance(l, six.integer_types):
l = six.moves.range(l)
if is_lazy_iterable(l):
if size < 1:
yield list(l)
else:
chunk = []
for elem in l:
if len(chunk) < size:
chunk.append(elem)
else:
yield chunk
chunk = [elem]
else:
if chunk:
yield chunk
else:
if size < 1:
yield l
else:
for i in six.moves.range(0, len(l), size):
yield l[i:i + size] | 0.005 |
def get_processed_events(self) -> List[Event]:
"""Get all processed events.
This method is intended to be used to recover events stuck in the
processed state which could happen if an event handling processing
an processed event goes down before completing the event processing.
Returns:
list[Events], list of event objects.
"""
event_ids = DB.get_list(self._processed_key)
events = []
for event_id in event_ids:
event_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_str)
event_dict['id'] = event_id
event_dict['subscriber'] = self._subscriber
events.append(Event.from_config(event_dict))
return events | 0.002519 |
def maskname(mask):
"""
Returns the event name associated to mask. IN_ISDIR is appended to
the result when appropriate. Note: only one event is returned, because
only one event can be raised at a given time.
@param mask: mask.
@type mask: int
@return: event name.
@rtype: str
"""
ms = mask
name = '%s'
if mask & IN_ISDIR:
ms = mask - IN_ISDIR
name = '%s|IN_ISDIR'
return name % EventsCodes.ALL_VALUES[ms] | 0.003752 |
def split_sentences(text):
'''
The regular expression matches all sentence ending punctuation and splits the string at those points.
At this point in the code, the list looks like this ["Hello, world", "!" ... ]. The punctuation and all quotation marks
are separated from the actual text. The first s_iter line turns each group of two items in the list into a tuple,
excluding the last item in the list (the last item in the list does not need to have this performed on it). Then,
the second s_iter line combines each tuple in the list into a single item and removes any whitespace at the beginning
of the line. Now, the s_iter list is formatted correctly but it is missing the last item of the sentences list. The
second to last line adds this item to the s_iter list and the last line returns the full list.
'''
sentences = regex_split(u'(?<![A-ZА-ЯЁ])([.!?]"?)(?=\s+\"?[A-ZА-ЯЁ])', text, flags=REGEX_UNICODE)
s_iter = zip(*[iter(sentences[:-1])] * 2)
s_iter = [''.join(map(unicode,y)).lstrip() for y in s_iter]
s_iter.append(sentences[-1])
return s_iter | 0.010743 |
def task_table(self, task_id=None):
"""Fetch and parse the task table information for one or more task IDs.
Args:
task_id: A hex string of the task ID to fetch information about. If
this is None, then the task object table is fetched.
Returns:
Information from the task table.
"""
self._check_connected()
if task_id is not None:
task_id = ray.TaskID(hex_to_binary(task_id))
return self._task_table(task_id)
else:
task_table_keys = self._keys(
ray.gcs_utils.TablePrefix_RAYLET_TASK_string + "*")
task_ids_binary = [
key[len(ray.gcs_utils.TablePrefix_RAYLET_TASK_string):]
for key in task_table_keys
]
results = {}
for task_id_binary in task_ids_binary:
results[binary_to_hex(task_id_binary)] = self._task_table(
ray.TaskID(task_id_binary))
return results | 0.001938 |
def alignment_chart(data):
"""Make the HighCharts HTML to plot the alignment rates """
keys = OrderedDict()
keys['reads_mapped'] = {'color': '#437bb1', 'name': 'Mapped'}
keys['reads_unmapped'] = {'color': '#b1084c', 'name': 'Unmapped'}
# Config for the plot
plot_conf = {
'id': 'samtools_alignment_plot',
'title': 'Samtools stats: Alignment Scores',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(data, keys, plot_conf) | 0.001916 |
def defer_entity_syncing(wrapped, instance, args, kwargs):
"""
A decorator that can be used to defer the syncing of entities until after the method has been run
This is being introduced to help avoid deadlocks in the meantime as we attempt to better understand
why they are happening
"""
# Defer entity syncing while we run our method
sync_entities.defer = True
# Run the method
try:
return wrapped(*args, **kwargs)
# After we run the method disable the deferred syncing
# and sync all the entities that have been buffered to be synced
finally:
# Enable entity syncing again
sync_entities.defer = False
# Get the models that need to be synced
model_objs = list(sync_entities.buffer.values())
# If none is in the model objects we need to sync all
if None in sync_entities.buffer:
model_objs = list()
# Sync the entities that were deferred if any
if len(sync_entities.buffer):
sync_entities(*model_objs)
# Clear the buffer
sync_entities.buffer = {} | 0.002695 |
def print_app_tb_only(self, file):
"NOT_RPYTHON"
tb = self._application_traceback
if tb:
import linecache
print >> file, "Traceback (application-level):"
while tb is not None:
co = tb.frame.pycode
lineno = tb.get_lineno()
fname = co.co_filename
if fname.startswith('<inline>\n'):
lines = fname.split('\n')
fname = lines[0].strip()
try:
l = lines[lineno]
except IndexError:
l = ''
else:
l = linecache.getline(fname, lineno)
print >> file, " File \"%s\"," % fname,
print >> file, "line", lineno, "in", co.co_name
if l:
if l.endswith('\n'):
l = l[:-1]
l = " " + l.lstrip()
print >> file, l
tb = tb.next | 0.006737 |
def get_events(self):
"""Get events from the cloud node."""
to_send = {'limit': 50}
response = self._send_data('POST', 'admin', 'get-events', to_send)
output = {'message': ""}
for event in response['events']:
desc = "Source IP: {ip}\n"
desc += "Datetime: {time}\n"
desc += "Indicator: {match}\n"
desc += "Method: {method}\n"
desc += "URL: {url}\n"
desc += "Request Type: {type}\n"
desc += "User-Agent: {userAgent}\n"
desc += "Contact: {contact}\n"
desc += "\n"
output['message'] += desc.format(**event)
return output | 0.002915 |
def _get_basilisp_bytecode(
fullname: str, mtime: int, source_size: int, cache_data: bytes
) -> List[types.CodeType]:
"""Unmarshal the bytes from a Basilisp bytecode cache file, validating the
file header prior to returning. If the file header does not match, throw
an exception."""
exc_details = {"name": fullname}
magic = cache_data[:4]
raw_timestamp = cache_data[4:8]
raw_size = cache_data[8:12]
if magic != MAGIC_NUMBER:
message = (
f"Incorrect magic number ({magic}) in {fullname}; expected {MAGIC_NUMBER}"
)
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore
elif len(raw_timestamp) != 4:
message = f"Reached EOF while reading timestamp in {fullname}"
logger.debug(message)
raise EOFError(message)
elif _r_long(raw_timestamp) != mtime:
message = f"Non-matching timestamp ({_r_long(raw_timestamp)}) in {fullname} bytecode cache; expected {mtime}"
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore
elif len(raw_size) != 4:
message = f"Reached EOF while reading size of source in {fullname}"
logger.debug(message)
raise EOFError(message)
elif _r_long(raw_size) != source_size:
message = f"Non-matching filesize ({_r_long(raw_size)}) in {fullname} bytecode cache; expected {source_size}"
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore
return marshal.loads(cache_data[12:]) | 0.002559 |
def event_env_updated(app, env):
"""Called by Sphinx during phase 3 (resolving).
* Find Imgur IDs that need to be queried.
* Query the Imgur API for new/outdated albums/images.
:param sphinx.application.Sphinx app: Sphinx application object.
:param sphinx.environment.BuildEnvironment env: Sphinx build environment.
"""
client_id = app.config['imgur_client_id']
ttl = app.config['imgur_api_cache_ttl']
album_cache = app.builder.env.imgur_album_cache
image_cache = app.builder.env.imgur_image_cache
album_whitelist = {v.imgur_id for v in album_cache.values() if v.mod_time == 0}
image_whitelist = {v.imgur_id for v in image_cache.values() if v.mod_time == 0}
# Build whitelist of Imgur IDs in just new/updated docs.
for doctree in (env.get_doctree(n) for n in app.builder.get_outdated_docs()):
for node in (n for c in (ImgurTextNode, ImgurImageNode) for n in doctree.traverse(c)):
if node.album:
album_whitelist.add(node.imgur_id)
else:
image_whitelist.add(node.imgur_id)
# Update the cache only if an added/changed doc has an Imgur album/image.
if album_whitelist or image_whitelist:
update_cache(album_cache, image_cache, app, client_id, ttl, album_whitelist, image_whitelist)
prune_cache(album_cache, image_cache, app) | 0.004386 |
def CsvToTable(self, buf, header=True, separator=","):
"""Parses buffer into tabular format.
Strips off comments (preceded by '#').
Optionally parses and indexes by first line (header).
Args:
buf: String file buffer containing CSV data.
header: Is the first line of buffer a header.
separator: String that CSV is separated by.
Returns:
int, the size of the table created.
Raises:
TableError: A parsing error occurred.
"""
self.Reset()
header_row = self.row_class()
if header:
line = buf.readline()
header_str = ""
while not header_str:
# Remove comments.
header_str = line.split("#")[0].strip()
if not header_str:
line = buf.readline()
header_list = header_str.split(separator)
header_length = len(header_list)
for entry in header_list:
entry = entry.strip()
if entry in header_row:
raise TableError("Duplicate header entry %r." % entry)
header_row[entry] = entry
header_row.row = 0
self._table[0] = header_row
# xreadlines would be better but not supported by StringIO for testing.
for line in buf:
# Support commented lines, provide '#' is first character of line.
if line.startswith("#"):
continue
lst = line.split(separator)
lst = [l.strip() for l in lst]
if header and len(lst) != header_length:
# Silently drop illegal line entries
continue
if not header:
header_row = self.row_class()
header_length = len(lst)
header_row.values = dict(
zip(range(header_length), range(header_length))
)
self._table[0] = header_row
header = True
continue
new_row = self.NewRow()
new_row.values = lst
header_row.row = self.size + 1
self._table.append(new_row)
return self.size | 0.001353 |
def hash(self):
'''
:rtype: int
:return: hash of the container
'''
hashed = super(Repeat, self).hash()
return khash(hashed, self._min_times, self._max_times, self._step, self._repeats) | 0.012931 |
def average_last_builds(connection, package, limit=5):
"""
Find the average duration time for the last couple of builds.
:param connection: txkoji.Connection
:param package: package name
:returns: deferred that when fired returns a datetime.timedelta object, or
None if there were no previous builds for this package.
"""
# TODO: take branches (targets, or tags, etc) into account when estimating
# a package's build time.
state = build_states.COMPLETE
opts = {'limit': limit, 'order': '-completion_time'}
builds = yield connection.listBuilds(package, state=state, queryOpts=opts)
if not builds:
defer.returnValue(None)
durations = [build.duration for build in builds]
average = sum(durations, timedelta()) / len(durations)
# print('average duration for %s is %s' % (package, average))
defer.returnValue(average) | 0.001112 |
def wrann(self, write_fs=False, write_dir=''):
"""
Write a WFDB annotation file from this object.
Parameters
----------
write_fs : bool, optional
Whether to write the `fs` attribute to the file.
"""
for field in ['record_name', 'extension']:
if getattr(self, field) is None:
raise Exception('Missing required field for writing annotation file: ',field)
present_label_fields = self.get_label_fields()
if not present_label_fields:
raise Exception('At least one annotation label field is required to write the annotation: ', ann_label_fields)
# Check the validity of individual fields
self.check_fields()
# Standardize the format of the custom_labels field
self.standardize_custom_labels()
# Create the label map used in this annotaion
self.create_label_map()
# Check the cohesion of fields
self.check_field_cohesion(present_label_fields)
# Calculate the label_store field if necessary
if 'label_store' not in present_label_fields:
self.convert_label_attribute(source_field=present_label_fields[0],
target_field='label_store')
# Write the header file using the specified fields
self.wr_ann_file(write_fs=write_fs, write_dir=write_dir)
return | 0.003499 |
def tree_is_in_collection(collection, study_id=None, tree_id=None):
"""Takes a collection object (or a filepath to collection object), returns
True if it includes a decision to include the specified tree
"""
included = collection_to_included_trees(collection)
study_id = study_id.strip()
tree_id = tree_id.strip()
for decision in included:
if decision['studyID'] == study_id and decision['treeID'] == tree_id:
return True
return False | 0.002058 |
def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_advantage(
reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
episodes = (observ, action, old_policy_params, reward, advantage)
value_loss, policy_loss, summary = parts.iterate_sequences(
self._update_step, [0., 0., ''], episodes, length,
self._config.chunk_length,
self._config.batch_size,
self._config.update_epochs,
padding_value=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2] | 0.002041 |
def Sum(idx, *args, **kwargs):
"""Instantiator for an arbitrary indexed sum.
This returns a function that instantiates the appropriate
:class:`QuantumIndexedSum` subclass for a given term expression. It is the
preferred way to "manually" create indexed sum expressions, closely
resembling the normal mathematical notation for sums.
Args:
idx (IdxSym): The index symbol over which the sum runs
args: arguments that describe the values over which `idx` runs,
kwargs: keyword-arguments, used in addition to `args`
Returns:
callable: an instantiator function that takes a
arbitrary `term` that should generally contain the `idx` symbol, and
returns an indexed sum over that `term` with the index range specified
by the original `args` and `kwargs`.
There is considerable flexibility to specify concise `args` for a variety
of index ranges.
Assume the following setup::
>>> i = IdxSym('i'); j = IdxSym('j')
>>> ket_i = BasisKet(FockIndex(i), hs=0)
>>> ket_j = BasisKet(FockIndex(j), hs=0)
>>> hs0 = LocalSpace('0')
Giving `i` as the only argument will sum over the indices of the basis
states of the Hilbert space of `term`::
>>> s = Sum(i)(ket_i)
>>> unicode(s)
'∑_{i ∈ ℌ₀} |i⟩⁽⁰⁾'
You may also specify a Hilbert space manually::
>>> Sum(i, hs0)(ket_i) == Sum(i, hs=hs0)(ket_i) == s
True
Note that using :func:`Sum` is vastly more readable than the equivalent
"manual" instantiation::
>>> s == KetIndexedSum.create(ket_i, IndexOverFockSpace(i, hs=hs0))
True
By nesting calls to `Sum`, you can instantiate sums running over multiple
indices::
>>> unicode( Sum(i)(Sum(j)(ket_i * ket_j.dag())) )
'∑_{i,j ∈ ℌ₀} |i⟩⟨j|⁽⁰⁾'
Giving two integers in addition to the index `i` in `args`, the index will
run between the two values::
>>> unicode( Sum(i, 1, 10)(ket_i) )
'∑_{i=1}^{10} |i⟩⁽⁰⁾'
>>> Sum(i, 1, 10)(ket_i) == Sum(i, 1, to=10)(ket_i)
True
You may also include an optional step width, either as a third integer or
using the `step` keyword argument.
>>> #unicode( Sum(i, 1, 10, step=2)(ket_i) ) # TODO
Lastly, by passing a tuple or list of values, the index will run over all
the elements in that tuple or list::
>>> unicode( Sum(i, (1, 2, 3))(ket_i))
'∑_{i ∈ {1,2,3}} |i⟩⁽⁰⁾'
"""
from qnet.algebra.core.hilbert_space_algebra import LocalSpace
from qnet.algebra.core.scalar_algebra import ScalarValue
from qnet.algebra.library.spin_algebra import SpinSpace
dispatch_table = {
tuple(): _sum_over_fockspace,
(LocalSpace, ): _sum_over_fockspace,
(SpinSpace, ): _sum_over_fockspace,
(list, ): _sum_over_list,
(tuple, ): _sum_over_list,
(int, ): _sum_over_range,
(int, int): _sum_over_range,
(int, int, int): _sum_over_range,
}
key = tuple((type(arg) for arg in args))
try:
idx_range_func = dispatch_table[key]
except KeyError:
raise TypeError("No implementation for args of type %s" % str(key))
def sum(term):
if isinstance(term, ScalarValue._val_types):
term = ScalarValue.create(term)
idx_range = idx_range_func(term, idx, *args, **kwargs)
return term._indexed_sum_cls.create(term, idx_range)
return sum | 0.000287 |
def process_nxml(nxml_filename, pmid=None, extra_annotations=None,
cleanup=True, add_grounding=True):
"""Process an NXML file using the ISI reader
First converts NXML to plain text and preprocesses it, then runs the ISI
reader, and processes the output to extract INDRA Statements.
Parameters
----------
nxml_filename : str
nxml file to process
pmid : Optional[str]
pmid of this nxml file, to be added to the Evidence object of the
extracted INDRA statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted Statements
"""
if extra_annotations is None:
extra_annotations = {}
# Create a temporary directory to store the proprocessed input
pp_dir = tempfile.mkdtemp('indra_isi_pp_output')
pp = IsiPreprocessor(pp_dir)
extra_annotations = {}
pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations)
# Run the ISI reader and extract statements
ip = process_preprocessed(pp)
if add_grounding:
ip.add_grounding()
if cleanup:
# Remove temporary directory with processed input
shutil.rmtree(pp_dir)
else:
logger.info('Not cleaning up %s' % pp_dir)
return ip | 0.000546 |
def get_kernel_ports(self, kernel_id):
"""Return a dictionary of ports for a kernel.
Parameters
==========
kernel_id : uuid
The id of the kernel.
Returns
=======
port_dict : dict
A dict of key, value pairs where the keys are the names
(stdin_port,iopub_port,shell_port) and the values are the
integer port numbers for those channels.
"""
# this will raise a KeyError if not found:
km = self.get_kernel(kernel_id)
return dict(shell_port=km.shell_port,
iopub_port=km.iopub_port,
stdin_port=km.stdin_port,
hb_port=km.hb_port,
) | 0.00411 |
def to_long_time_string(self) -> str:
""" Return the iso time string only """
hour = self.time.hour
minute = self.time.minute
second = self.time.second
return f"{hour:02}:{minute:02}:{second:02}" | 0.008511 |
def list_objects(self, bucket_name, prefix='', recursive=False):
"""
List objects in the given bucket.
Examples:
objects = minio.list_objects('foo')
for current_object in objects:
print(current_object)
# hello
# hello/
# hello/
# world/
objects = minio.list_objects('foo', prefix='hello/')
for current_object in objects:
print(current_object)
# hello/world/
objects = minio.list_objects('foo', recursive=True)
for current_object in objects:
print(current_object)
# hello/world/1
# world/world/2
# ...
objects = minio.list_objects('foo', prefix='hello/',
recursive=True)
for current_object in objects:
print(current_object)
# hello/world/1
# hello/world/2
:param bucket_name: Bucket to list objects from
:param prefix: String specifying objects returned must begin with
:param recursive: If yes, returns all objects for a specified prefix
:return: An iterator of objects in alphabetical order.
"""
is_valid_bucket_name(bucket_name)
# If someone explicitly set prefix to None convert it to empty string.
if prefix is None:
prefix = ''
method = 'GET'
# Initialize query parameters.
query = {
'max-keys': '1000',
'prefix': prefix
}
# Delimited by default.
if not recursive:
query['delimiter'] = '/'
marker = ''
is_truncated = True
while is_truncated:
if marker:
query['marker'] = marker
headers = {}
response = self._url_open(method,
bucket_name=bucket_name,
query=query,
headers=headers)
objects, is_truncated, marker = parse_list_objects(response.data,
bucket_name=bucket_name)
for obj in objects:
yield obj | 0.001297 |
def _validate_datetime_from_to(cls, start, end):
"""
validate from-to
:param start: Start Day(YYYYMMDD)
:param end: End Day(YYYYMMDD)
:return: None or MlbAmException
"""
if not start <= end:
raise MlbAmBadParameter("not Start Day({start}) <= End Day({end})".format(start=start, end=end)) | 0.008451 |
def _send(self, ip, port, data):
"""
Send an UDP message
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:return: Number of bytes sent
:rtype: int
"""
return self._listen_socket.sendto(data, (ip, port)) | 0.00627 |
def get_relationship_lookup_session_for_family(self, family_id=None, proxy=None, *args, **kwargs):
"""Gets the ``OsidSession`` associated with the relationship lookup service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the family
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.RelationshipLookupSession) - a
``RelationshipLookupSession``
raise: NotFound - no ``Family`` found by the given ``Id``
raise: NullArgument - ``family_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_relationship_lookup()``
and ``supports_visible_federation()`` are ``true``*
"""
if not family_id:
raise NullArgument
if not self.supports_relationship_lookup():
raise Unimplemented()
##
# Need to include check to see if the familyId is found otherwise raise NotFound
##
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.RelationshipLookupSession(family_id, proxy=proxy, runtime=self._runtime, **kwargs)
except AttributeError:
raise OperationFailed()
return session | 0.004461 |
def addMatch(self, callback, mtype=None, sender=None, interface=None,
member=None, path=None, path_namespace=None, destination=None,
arg=None, arg_path=None, arg0namespace=None):
"""
Creates a message matching rule, associates it with the specified
callback function, and sends the match rule to the DBus daemon.
The arguments to this function are exactly follow the DBus
specification. Refer to the \"Message Bus Message Routing\" section of
the DBus specification for details.
@rtype: C{int}
@returns: a L{Deferred} to an integer id that may be used to unregister
the match rule
"""
l = []
def add(k, v):
if v is not None:
l.append("%s='%s'" % (k, v))
add('type', mtype)
add('sender', sender)
add('interface', interface)
add('member', member)
add('path', path)
add('path_namespace', path_namespace)
add('destination', destination)
if arg:
for idx, v in arg:
add('arg%d' % (idx,), v)
if arg_path:
for idx, v in arg_path:
add('arg%dpath' % (idx,), v)
add('arg0namespace', arg0namespace)
rule = ','.join(l)
d = self.callRemote(
'/org/freedesktop/DBus',
'AddMatch',
interface='org.freedesktop.DBus',
destination='org.freedesktop.DBus',
body=[rule],
signature='s',
)
def ok(_):
rule_id = self.router.addMatch(
callback,
mtype,
sender,
interface,
member,
path,
path_namespace,
destination,
arg,
arg_path,
arg0namespace,
)
self.match_rules[rule_id] = rule
return rule_id
d.addCallbacks(ok)
return d | 0.002441 |
def run_apidoc(_):
"""This method is required by the setup method below."""
import os
dirname = os.path.dirname(__file__)
ignore_paths = [os.path.join(dirname, '../../aaf2/model'),]
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/apidoc.py
argv = [
'--force',
'--no-toc',
'--separate',
'--module-first',
'--output-dir',
os.path.join(dirname, 'api'),
os.path.join(dirname, '../../aaf2'),
] + ignore_paths
from sphinx.ext import apidoc
apidoc.main(argv) | 0.003578 |
def _modify(item, func):
"""
Modifies each item.keys() string based on the func passed in.
Often used with inflection's camelize or underscore methods.
:param item: dictionary representing item to be modified
:param func: function to run on each key string
:return: dictionary where each key has been modified by func.
"""
result = dict()
for key in item:
result[func(key)] = item[key]
return result | 0.002232 |
def guess_mime_type(content, deftype):
"""Description: Guess the mime type of a block of text
:param content: content we're finding the type of
:type str:
:param deftype: Default mime type
:type str:
:rtype: <type>:
:return: <description>
"""
#Mappings recognized by cloudinit
starts_with_mappings={
'#include' : 'text/x-include-url',
'#!' : 'text/x-shellscript',
'#cloud-config' : 'text/cloud-config',
'#upstart-job' : 'text/upstart-job',
'#part-handler' : 'text/part-handler',
'#cloud-boothook' : 'text/cloud-boothook'
}
rtype = deftype
for possible_type,mimetype in starts_with_mappings.items():
if content.startswith(possible_type):
rtype = mimetype
break
return(rtype) | 0.012315 |
def object_to_dict(cls, obj):
"""
This function converts Objects into Dictionary
"""
dict_obj = dict()
if obj is not None:
if type(obj) == list:
dict_list = []
for inst in obj:
dict_list.append(cls.object_to_dict(inst))
dict_obj["list"] = dict_list
elif not cls.is_primitive(obj):
for key in obj.__dict__:
# is an object
if type(obj.__dict__[key]) == list:
dict_list = []
for inst in obj.__dict__[key]:
dict_list.append(cls.object_to_dict(inst))
dict_obj[key] = dict_list
elif not cls.is_primitive(obj.__dict__[key]):
temp_dict = cls.object_to_dict(obj.__dict__[key])
dict_obj[key] = temp_dict
else:
dict_obj[key] = obj.__dict__[key]
elif cls.is_primitive(obj):
return obj
return dict_obj | 0.001771 |
def normalize(arg=None):
"""Normalizes an argument for signing purpose.
This is used for normalizing the arguments of RPC method calls.
:param arg: The argument to normalize
:return: A string representating the normalized argument.
.. doctest::
>>> from cloud.rpc import normalize
>>> normalize(['foo', 42, 'bar'])
'foo42bar'
>>> normalize({'yellow': 1, 'red': 2, 'pink' : 3})
'pink3red2yellow1'
>>> normalize(['foo', 42, {'yellow': 1, 'red': 2, 'pink' : 3}, 'bar'])
'foo42pink3red2yellow1bar'
>>> normalize(None)
''
>>> normalize([None, 1,2])
'12'
>>> normalize({2: [None, 1,2], 3: None, 4:5})
'212345'
"""
res = ''
t_arg = type(arg)
if t_arg in (list, tuple):
for i in arg:
res += normalize(i)
elif t_arg is dict:
keys = arg.keys()
keys.sort()
for key in keys:
res += '%s%s' % (normalize(key), normalize(arg[key]))
elif t_arg is unicode:
res = arg.encode('utf8')
elif t_arg is bool:
res = 'true' if arg else 'false'
elif arg != None:
res = str(arg)
return res | 0.001711 |
def cluster_path(cls, project, instance, cluster):
"""Return a fully-qualified cluster string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/clusters/{cluster}",
project=project,
instance=instance,
cluster=cluster,
) | 0.006042 |
def address_line_1(self):
"""
This method returns the first line of the address.
:return:
"""
formalised_address = self.formalised_address
if formalised_address is None:
return
try:
address = formalised_address.split(',')
except Exception as e:
if self._debug:
logging.error(
"Error getting address_line_1. Error message: " + e.args[0])
return
return address[0].strip() | 0.005703 |
def link_to(self, source, transformation=None):
"""
Kervi values may be linked together.
A KerviValue is configured to be either an input or output.
When an output value is linked to an input value the input will become
an observer of the output. Every time the output value change it will notify the input
about the change.
:param source:
It is possible to make direct and indirect link.
If the type of the source parameter is of type KerviValue a direct link is created.
this is a fast link where the output can notify directly to input.
This type of link is possible if both output and input resides in the same process.
If the type of the source parameter is a string it is expected to hold the id of a another KerviValue.
In this mode the input value will listen on the kervi spine for events from the output value.
This mode is useful if the output and input does not exists in the same process.
This type of link must be made by the input.
:type source: ``str`` or ``KerviValue``
:param transformation:
A function or lambda expression that transform the value before the output is update.
This is usefull if the input expects other ranges that the output produce
or need to change the sign of the value.
"""
if isinstance(source, KerviValue):
if source.is_input and not self.is_input:
self.add_observer(source, transformation)
elif not source.is_input and self.is_input:
source.add_observer(self, transformation)
else:
raise Exception("input/output mismatch in kervi value link:{0} - {1}".format(source.name, self.name))
elif isinstance(source, str):
if len(self._spine_observers) == 0:
self.spine.register_event_handler("valueChanged", self._link_changed_event)
self._spine_observers[source] = transformation | 0.00674 |
def is_image(file):
"""
Returns ``True`` if the file extension looks like an image file to Telegram.
"""
match = re.match(r'\.(png|jpe?g)', _get_extension(file), re.IGNORECASE)
if match:
return True
else:
return isinstance(resolve_bot_file_id(file), types.Photo) | 0.006623 |
def bootstrap_main(args):
"""
Main function explicitly called from the C++ code.
Return the main application object.
"""
version_info = sys.version_info
if version_info.major != 3 or version_info.minor < 6:
return None, "python36"
main_fn = load_module_as_package("nionui_app.nionswift")
if main_fn:
return main_fn(["nionui_app.nionswift"] + args, {"pyqt": None}), None
return None, "main" | 0.002268 |
def get_objects_without_object(self, obj_type, *child_types):
"""
:param obj_type: requested object type.
:param child_type: unrequested child types.
:return: all children of the requested type that do not have the unrequested child types.
"""
return [o for o in self.get_objects_by_type(obj_type) if
not o.get_objects_by_type(*child_types)] | 0.007407 |
def ftr_process(url=None, content=None, config=None, base_url=None):
u""" process an URL, or some already fetched content from a given URL.
:param url: The URL of article to extract. Can be
``None``, but only if you provide both ``content`` and
``config`` parameters.
:type url: str, unicode or ``None``
:param content: the HTML content already downloaded. If given,
it will be used for extraction, and the ``url`` parameter will
be used only for site config lookup if ``config`` is not given.
Please, only ``unicode`` to avoid charset errors.
:type content: unicode or ``None``
:param config: if ``None``, it will be looked up from ``url`` with as
much love and AI as possible. But don't expect too much.
:type config: a :class:`SiteConfig` instance or ``None``
:param base_url: reserved parameter, used when fetching multi-pages URLs.
It will hold the base URL (the first one fetched), and will serve as
base for fixing non-schemed URLs or query_string-only links to next
page(s). Please do not set this parameter until you very know what you
are doing. Default: ``None``.
:type base_url: str or unicode or None
:raises:
- :class:`RuntimeError` in all parameters-incompatible situations.
Please RFTD carefully, and report strange unicornic edge-cases.
- :class:`SiteConfigNotFound` if no five-filter site config can
be found.
- any raw ``requests.*`` exception, network related, if anything
goes wrong during url fetching.
:returns:
- either a :class:`ContentExtractor` instance with extracted
(and :attr:`.failures`) attributes set, in case a site config
could be found.
When the extractor knows how to handle multiple-pages articles,
all pages contents will be extracted and cleaned — if relevant —
and concatenated into the instance :attr:`body` attribute.
The :attr:`next_page_link` attribute will be a ``list``
containing all sub-pages links. Note: the first link is the one
you fed the extractor with ; it will not be repeated in the list.
- or ``None``, if content was not given and url fetching returned
a non-OK HTTP code, or if no site config could be found (in that
particular case, no extraction at all is performed).
"""
if url is None and content is None and config is None:
raise RuntimeError('At least one of url or the couple content/config '
'argument must be present.')
if content is not None and url is None and config is None:
raise RuntimeError('Passing content only will not give any result.')
if content is None:
if url is None:
raise RuntimeError('When content is unset, url must be set.')
try:
result = requests_get(url)
if result.status_code != requests.codes.ok:
LOGGER.error(u'Wrong status code in return while getting '
u'“%s”.', url)
return None
# Override before accessing result.text ; see `requests` doc.
result.encoding = detect_encoding_from_requests_response(result)
LOGGER.info(u'Downloaded %s bytes as %s text.',
len(result.text), result.encoding)
# result.text is always unicode
content = result.text
except:
LOGGER.error(u'Content could not be fetched from URL %s.', url)
raise
if config is None:
# This can eventually raise SiteConfigNotFound
config_string, matched_host = ftr_get_config(url)
config = SiteConfig(site_config_text=config_string, host=matched_host)
extractor = ContentExtractor(config)
if base_url is None:
base_url = url
if extractor.process(html=content):
# This is recursive. Yeah.
if extractor.next_page_link is not None:
next_page_link = sanitize_next_page_link(extractor.next_page_link,
base_url)
next_extractor = ftr_process(url=next_page_link,
base_url=base_url)
extractor.body += next_extractor.body
extractor.next_page_link = [next_page_link]
if next_extractor.next_page_link is not None:
extractor.next_page_link.extend(next_extractor.next_page_link)
return extractor
return None | 0.000434 |
def median(ls):
"""
Takes a list and returns the median.
"""
ls = sorted(ls)
return ls[int(floor(len(ls)/2.0))] | 0.007634 |
def set_process(self, process = None):
"""
Manually set the parent process. Use with care!
@type process: L{Process}
@param process: (Optional) Process object. Use C{None} for no process.
"""
if process is None:
self.__process = None
else:
global Process # delayed import
if Process is None:
from winappdbg.process import Process
if not isinstance(process, Process):
msg = "Parent process must be a Process instance, "
msg += "got %s instead" % type(process)
raise TypeError(msg)
self.__process = process | 0.007225 |
def paschen_back_energies(fine_state, Bz):
r"""Return Paschen-Back regime energies for a given fine state and\
magnetic field.
>>> ground_state = State("Rb", 87, 5, 0, 1/Integer(2))
>>> Bz = 200.0
>>> Bz = Bz/10000
>>> for f_group in paschen_back_energies(ground_state, Bz):
... print(f_group)
[1.51284728917866e-24 3.80485568127324e-25 -7.51876152924007e-25
-1.88423787397534e-24]
[-1.51229355210131e-24 -3.80300989101543e-25 7.51691573898227e-25
1.88368413689800e-24]
"""
element = fine_state.element
isotope = fine_state.isotope
N = fine_state.n
L = fine_state.l
J = fine_state.j
II = Atom(element, isotope).nuclear_spin
MJ = [-J+i for i in range(2*J+1)]
MI = [-II+i for i in range(2*II+1)]
Ahfs = fine_state.Ahfs
Bhfs = fine_state.Bhfs
gL, gS, gI, gJ = lande_g_factors(element, isotope, L, J)
energiesPBack = []
for mj in MJ:
energiesMJ = []
unperturbed_energy = hbar*State(element, isotope, N, L, J).omega
for mi in MI:
energyMI = unperturbed_energy
energyMI += 2*pi*hbar*Ahfs*mi*mj
if J != 1/Integer(2) and II != 1/Integer(2):
num = 9*(mi*mj)**2 - 3*J*(J+1)*mi**2
num += -3*II*(II+1)*mj**2 + II*(II+1)*J*(J+1)
den = 4*J*(2*J-1)*II*(2*II-1)
energyMI += 2*pi*hbar*Bhfs*num/den
energyMI += muB*(gJ*mj+gI*mi)*Bz
energiesMJ += [energyMI]
energiesPBack += [energiesMJ]
return array(energiesPBack) | 0.000619 |
def get_img_data(image, copy=True):
"""Return the voxel matrix of the Nifti file.
If safe_mode will make a copy of the img before returning the data, so the input image is not modified.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
copy: bool
If safe_mode will make a copy of the img before returning the data, so the input image is not modified.
Returns
-------
array_like
"""
try:
img = check_img(image)
if copy:
return get_data(img)
else:
return img.get_data()
except Exception as exc:
raise Exception('Error when reading file {0}.'.format(repr_imgs(image))) from exc | 0.004735 |
def sources(scheduled=False):
'''List all harvest sources'''
sources = actions.list_sources()
if scheduled:
sources = [s for s in sources if s.periodic_task]
if sources:
for source in sources:
msg = '{source.name} ({source.backend}): {cron}'
if source.periodic_task:
cron = source.periodic_task.schedule_display
else:
cron = 'not scheduled'
log.info(msg.format(source=source, cron=cron))
elif scheduled:
log.info('No sources scheduled yet')
else:
log.info('No sources defined yet') | 0.001621 |
def difference(self,other):
"""
Return a new DiscreteSet with the difference of the two sets, i.e.
all elements that are in self but not in other.
:param DiscreteSet other: Set to subtract
:rtype: DiscreteSet
:raises ValueError: if self is a set of everything
"""
if self.everything:
raise ValueError("Can not remove from everything")
elif other.everything:
return DiscreteSet([])
else:
return DiscreteSet(self.elements.difference(other.elements)) | 0.005319 |
def run_cdk(self, command='deploy'): # pylint: disable=too-many-branches
"""Run CDK."""
response = {'skipped_configs': False}
cdk_opts = [command]
if not which('npm'):
LOGGER.error('"npm" not found in path or is not executable; '
'please ensure it is installed correctly.')
sys.exit(1)
if 'DEBUG' in self.context.env_vars:
cdk_opts.append('-v') # Increase logging if requested
warn_on_boto_env_vars(self.context.env_vars)
if cdk_module_matches_env(self.context.env_name,
self.options.get('environments', {}),
self.context.env_vars):
if os.path.isfile(os.path.join(self.path, 'package.json')):
with change_dir(self.path):
run_npm_install(self.path, self.options, self.context)
if self.options.get('options', {}).get('build_steps',
[]):
LOGGER.info("Running build steps for %s...",
os.path.basename(self.path))
run_commands(
commands=self.options.get('options',
{}).get('build_steps',
[]),
directory=self.path,
env=self.context.env_vars
)
cdk_context_opts = []
if isinstance(self.options.get('environments',
{}).get(self.context.env_name), # noqa
dict):
for (key, val) in self.options['environments'][self.context.env_name].items(): # noqa pylint: disable=line-too-long
cdk_context_opts.extend(['-c', "%s=%s" % (key, val)])
cdk_opts.extend(cdk_context_opts)
if command == 'diff':
LOGGER.info("Running cdk %s on each stack in %s",
command,
os.path.basename(self.path))
for i in get_cdk_stacks(self.path,
self.context.env_vars,
cdk_context_opts):
subprocess.call(
generate_node_command(
'cdk',
cdk_opts + [i], # 'diff <stack>'
self.path
),
env=self.context.env_vars
)
else:
if command == 'deploy':
if 'CI' in self.context.env_vars:
cdk_opts.append('--ci')
cdk_opts.append('--require-approval=never')
bootstrap_command = generate_node_command(
'cdk',
['bootstrap'] + cdk_context_opts,
self.path
)
LOGGER.info('Running cdk bootstrap...')
run_module_command(cmd_list=bootstrap_command,
env_vars=self.context.env_vars)
elif command == 'destroy' and 'CI' in self.context.env_vars: # noqa
cdk_opts.append('-f') # Don't prompt
cdk_command = generate_node_command(
'cdk',
cdk_opts,
self.path
)
LOGGER.info("Running cdk %s on %s (\"%s\")",
command,
os.path.basename(self.path),
format_npm_command_for_logging(cdk_command)) # noqa
run_module_command(cmd_list=cdk_command,
env_vars=self.context.env_vars)
else:
LOGGER.info(
"Skipping cdk %s of %s; no \"package.json\" "
"file was found (need a package file specifying "
"aws-cdk in devDependencies)",
command,
os.path.basename(self.path))
else:
LOGGER.info(
"Skipping cdk %s of %s; no config for "
"this environment found or current account/region does not "
"match configured environment",
command,
os.path.basename(self.path))
response['skipped_configs'] = True
return response | 0.000592 |
def convert_differend_width(src_reg, dst_reg):
"""
e.g.:
8bit $cd TFR into 16bit, results in: $ffcd
16bit $1234 TFR into 8bit, results in: $34
>>> reg8 = ValueStorage8Bit(name="bar", initial_value=0xcd)
>>> reg16 = ValueStorage16Bit(name="foo", initial_value=0x0000)
>>> hex(convert_differend_width(src_reg=reg8, dst_reg=reg16))
'0xffcd'
>>> reg16 = ValueStorage16Bit(name="foo", initial_value=0x1234)
>>> reg8 = ValueStorage8Bit(name="bar", initial_value=0xcd)
>>> hex(convert_differend_width(src_reg=reg16, dst_reg=reg8))
'0x34'
TODO: verify this behaviour on real hardware
see: http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4886
"""
src_value = src_reg.value
if src_reg.WIDTH == 8 and dst_reg.WIDTH == 16:
# e.g.: $cd -> $ffcd
src_value += 0xff00
elif src_reg.WIDTH == 16 and dst_reg.WIDTH == 8:
# This not not really needed, because all 8Bit register will
# limit the value, too.
# e.g.: $1234 -> $34
src_value = src_value & 0xff
return src_value | 0.000912 |
def parseArgs(args):
"""Parse Arguments
Used to parse the arguments passed to the script
Args:
args (list): A list of strings representing arguments to a script
Returns:
dict: Returns a dictionary with args as keys and the values sent with
them or True for valueless arguments
Raises:
ValueError: If args is not a list or tuple
"""
# If args is not a list
if not isinstance(args, (list,tuple)):
raise ValueError('args is not a list or tuple')
# Init the return value
dRet = {}
# Go through each argument
for s in args:
# Check the string matches the format
oRes = re.match(u'^--([^=]+)(?:=(.+))?$', s)
# If we have a match
if oRes:
# Store it by name and value
mGroup2 = oRes.group(2)
dRet[oRes.group(1)] = (not mGroup2 and True or mGroup2)
# Else add it to the unknowns
else:
try: dRet['?'].append(s)
except KeyError: dRet['?'] = [s]
# Return the dict
return dRet | 0.040816 |
def rotationType(self, value):
"""gets/sets the rotationType"""
if self._rotationType.lower() in self._rotationTypes and \
self._rotationType != value:
self._rotationType = value | 0.009217 |
def render(self, writer_options=None):
"""Renders the barcode using `self.writer`.
:parameters:
writer_options : Dict
Options for `self.writer`, see writer docs for details.
:returns: Output of the writers render method.
"""
options = Barcode.default_writer_options.copy()
options.update(writer_options or {})
if options['write_text']:
options['text'] = self.get_fullcode()
self.writer.set_options(options)
code = self.build()
raw = Barcode.raw = self.writer.render(code)
return raw | 0.003268 |
def get_exporter(obj, name):
"""
Get an exporter for the
:param obj: object to export
:type obj: :class:`Component <cqparts.Component>`
:param name: registered name of exporter
:type name: :class:`str`
:return: an exporter instance of the given type
:rtype: :class:`Exporter`
:raises TypeError: if exporter cannot be found
"""
if name not in exporter_index:
raise TypeError(
("exporter type '%s' is not registered: " % name) +
("registered types: %r" % sorted(exporter_index.keys()))
)
for base_class in exporter_index[name]:
if isinstance(obj, base_class):
return exporter_index[name][base_class](obj)
raise TypeError("exporter type '%s' for a %r is not registered" % (
name, type(obj)
)) | 0.001225 |
def call(self, inputs):
"""Runs the model to generate an intermediate representation of x_t.
Args:
inputs: A batch of image sequences `x_{1:T}` of shape
`[sample_shape, batch_size, timesteps, height, width,
channels]`.
Returns:
A batch of intermediate representations of shape [sample_shape,
batch_size, timesteps, hidden_size].
"""
image_shape = tf.shape(input=inputs)[-3:]
collapsed_shape = tf.concat(([-1], image_shape), axis=0)
out = tf.reshape(inputs, collapsed_shape) # (sample*batch*T, h, w, c)
out = self.conv1(out)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
expanded_shape = tf.concat((tf.shape(input=inputs)[:-3], [-1]), axis=0)
return tf.reshape(out, expanded_shape) | 0.001267 |
def list_rules(self):
"""Print a list of all rules"""
for rule in sorted(self.all_rules, key=lambda rule: rule.name):
print(rule)
if self.args.verbose:
for line in rule.doc.split("\n"):
print(" ", line) | 0.007117 |
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates | 0.001927 |
def save_dtrajs(self, prefix='', output_dir='.',
output_format='ascii', extension='.dtraj'):
r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
clustering = self._chain[-1]
reader = self._chain[0]
from pyemma.coordinates.clustering.interface import AbstractClustering
assert isinstance(clustering, AbstractClustering)
trajfiles = None
if isinstance(reader, FeatureReader):
trajfiles = reader.filenames
clustering.save_dtrajs(
trajfiles, prefix, output_dir, output_format, extension) | 0.003393 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.