text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def OnChar(self, event):
""" on Character event"""
key = event.GetKeyCode()
entry = wx.TextCtrl.GetValue(self).strip()
pos = wx.TextCtrl.GetSelection(self)
# really, the order here is important:
# 1. return sends to ValidateEntry
if key == wx.WXK_RETURN:
if not self.is_valid:
wx.TextCtrl.SetValue(self, self.format % set_float(self.__bound_val))
else:
self.SetValue(entry)
return
# 2. other non-text characters are passed without change
if (key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255):
event.Skip()
return
# 3. check for multiple '.' and out of place '-' signs and ignore these
# note that chr(key) will now work due to return at #2
has_minus = '-' in entry
ckey = chr(key)
if ((ckey == '.' and (self.__prec == 0 or '.' in entry) ) or
(ckey == '-' and (has_minus or pos[0] != 0)) or
(ckey != '-' and has_minus and pos[0] == 0)):
return
# 4. allow digits, but not other characters
if chr(key) in self.__digits:
event.Skip() | 0.007401 |
def _merge_default_values(self):
"""Merge default values with resource data."""
values = self._get_default_values()
for key, value in values.items():
if not self.data.get(key):
self.data[key] = value | 0.007968 |
def startCc(CallControlCapabilities_presence=0):
"""START CC Section 9.3.23a"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x9) # 00001001
packet = a / b
if CallControlCapabilities_presence is 1:
c = CallControlCapabilitiesHdr(ieiCCC=0x15, eightBitCCC=0x0)
packet = paclet / c
return packet | 0.00304 |
def get_summary_stats(items, attr):
"""
Returns a dictionary of aggregated statistics for 'items' filtered by
"attr'. For example, it will aggregate statistics for a host across all
the playbook runs it has been a member of, with the following structure:
data[host.id] = {
'ok': 4
'changed': 4
...
}
"""
data = {}
for item in items:
stats = models.Stats.query.filter_by(**{attr: item.id})
data[item.id] = {
'ok': sum([int(stat.ok) for stat in stats]),
'changed': sum([int(stat.changed) for stat in stats]),
'failed': sum([int(stat.failed) for stat in stats]),
'skipped': sum([int(stat.skipped) for stat in stats]),
'unreachable': sum([int(stat.unreachable) for stat in stats])
}
# If we're aggregating stats for a playbook, also infer status
if attr is "playbook_id":
data[item.id]['status'] = _infer_status(item, data[item.id])
return data | 0.000962 |
def _get_output_path(self):
"""Checks if a base file label / path is set. Returns absolute path."""
if self.Parameters['-o'].isOn():
output_path = self._absolute(str(self.Parameters['-o'].Value))
else:
raise ValueError("No output path specified.")
return output_path | 0.006211 |
def _clear_context(context):
'''
Clear variables stored in __context__. Run this function when a new version
of chocolatey is installed.
'''
for var in (x for x in __context__ if x.startswith('chocolatey.')):
context.pop(var) | 0.003953 |
def add_hyperedges(self, hyperedges, attr_dict=None, **attr):
"""Adds multiple hyperedges to the graph, along with any related
attributes of the hyperedges.
If any node in the tail or head of any hyperedge has not
previously been added to the hypergraph, it will automatically
be added here. Hyperedges without a "weight" attribute specified
will be assigned the default value of 1.
:param hyperedges: iterable container to either tuples of
(tail reference, head reference) OR tuples of
(tail reference, head reference, attribute dictionary);
if an attribute dictionary is provided in the tuple,
its values will override both attr_dict's and attr's
values.
:param attr_dict: dictionary of attributes shared by all
the hyperedges.
:param attr: keyword arguments of attributes of the hyperedges;
attr's values will override attr_dict's values
if both are provided.
:returns: list -- the IDs of the hyperedges added in the order
specified by the hyperedges container's iterator.
See also:
add_hyperedge
Examples:
::
>>> H = DirectedHypergraph()
>>> xyz = hyperedge_list = ((["A", "B"], ["C", "D"]),
(("A", "C"), ("B"), {'weight': 2}),
(set(["D"]), set(["A", "C"])))
>>> H.add_hyperedges(hyperedge_list)
"""
attr_dict = self._combine_attribute_arguments(attr_dict, attr)
hyperedge_ids = []
for hyperedge in hyperedges:
if len(hyperedge) == 3:
# See ("A", "C"), ("B"), {weight: 2}) in the
# documentation example
tail, head, hyperedge_attr_dict = hyperedge
# Create a new dictionary and load it with node_attr_dict and
# attr_dict, with the former (node_attr_dict) taking precedence
new_dict = attr_dict.copy()
new_dict.update(hyperedge_attr_dict)
hyperedge_id = self.add_hyperedge(tail, head, new_dict)
else:
# See (["A", "B"], ["C", "D"]) in the documentation example
tail, head = hyperedge
hyperedge_id = \
self.add_hyperedge(tail, head, attr_dict.copy())
hyperedge_ids.append(hyperedge_id)
return hyperedge_ids | 0.000764 |
def call_api_fetch(self, params, get_latest_only=True):
"""
GET https: // myserver / piwebapi / assetdatabases / D0NxzXSxtlKkGzAhZfHOB - KAQLhZ5wrU - UyRDQnzB_zGVAUEhMQUZTMDRcTlVHUkVFTg HTTP / 1.1
Host: myserver
Accept: application / json"""
output_format = 'application/json'
url_string = self.request_info.url_string()
# passing the username and required output format
headers_list = {"Accept": output_format, "Host": self.request_info.host}
try:
hub_result = requests.get(url_string, headers=headers_list, timeout=10.000, verify=False)
if hub_result.ok == False:
raise ConnectionRefusedError("Connection to Triangulum hub refused: " + hub_result.reason)
except:
raise ConnectionError("Error connecting to Triangulum hub - check internet connection.")
result = {}
result_content_json = hub_result.json()
result['ok'] = hub_result.ok
result['content'] = json.dumps(result_content_json)
if "Items" in result_content_json:
available_matches = len(result_content_json['Items'])
else:
available_matches = 1
# No Date params allowed in call to hub, so apply get latest only to hub results here...
if (get_latest_only and self.request_info.last_fetch_time != None):
try:
# Filter python objects with list comprehensions
new_content = [x for x in result_content_json['Items']
if self.get_date_time(x['Timestamp']) > self.request_info.last_fetch_time]
result_content_json['Items'] = new_content
result['content'] = json.dumps(result_content_json)
result['ok'] = True
except ValueError as e:
result['ok'] = False
result['reason'] = str(e)
except Exception as e:
result['ok'] = False
result['reason'] = 'Problem sorting results by date to get latest only. ' + str(e)
result['available_matches'] = available_matches
if 'Items' in result_content_json:
result['returned_matches'] = len(result_content_json['Items'])
else:
result['returned_matches'] = 1
# Set last_fetch_time for next call
if (get_latest_only):
if (len(result_content_json['Items']) > 0):
try:
newlist = sorted(result_content_json['Items'],
key=lambda k: self.get_date_time(k["Timestamp"]),
reverse=True)
most_recent = newlist[0]["Timestamp"]
self.request_info.last_fetch_time = self.get_date_time(most_recent)
except ValueError as e:
result['ok'] = False
result['reason'] = str(e)
except Exception as e:
result['ok'] = False
result['reason'] = 'Problem sorting results by date to get latest only. ' + str(e)
return result | 0.006011 |
def get_sample_window(self, type_tag, size=10):
"""Get a window of samples not to exceed size (in MB).
Args:
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...).
size: Size of samples in MBs.
Returns:
a list of md5s.
"""
# Convert size to MB
size = size * 1024 * 1024
# Grab all the samples of type=type_tag, sort by import_time (newest to oldest)
cursor = self.database[self.sample_collection].find({'type_tag': type_tag},
{'md5': 1,'length': 1}).sort('import_time',pymongo.DESCENDING)
total_size = 0
md5_list = []
for item in cursor:
if total_size > size:
return md5_list
md5_list.append(item['md5'])
total_size += item['length']
# If you get this far you don't have 'size' amount of data
# so just return what you've got
return md5_list | 0.007209 |
def dumps(self, fd, **kwargs):
"""
Returns the concrete content for a file descriptor.
BACKWARD COMPATIBILITY: if you ask for file descriptors 0 1 or 2, it will return the data from stdin, stdout,
or stderr as a flat string.
:param fd: A file descriptor.
:return: The concrete content.
:rtype: str
"""
if 0 <= fd <= 2:
data = [self.stdin, self.stdout, self.stderr][fd].concretize(**kwargs)
if type(data) is list:
data = b''.join(data)
return data
return self.get_fd(fd).concretize(**kwargs) | 0.006329 |
def request_object(self):
"""Grab an object from the pool. If the pool is empty, a new object will be generated and returned."""
obj_to_return = None
if self.queue.count > 0:
obj_to_return = self.__dequeue()
else:
#The queue is empty, generate a new item.
self.__init_object()
object_to_return = self.__dequeue()
self.active_objects += 1
return obj_to_return | 0.008811 |
def to_json(self):
"""Convert the Humidity Condition to a dictionary."""
return {
'hum_type': self.hum_type,
'hum_value': self.hum_value,
'barometric_pressure': self.barometric_pressure,
'schedule': self.schedule,
'wet_bulb_range': self.wet_bulb_range,
} | 0.005917 |
def run_project(
project_directory: str,
output_directory: str = None,
logging_path: str = None,
reader_path: str = None,
reload_project_libraries: bool = False,
**kwargs
) -> ExecutionResult:
"""
Runs a project as a single command directly within the current Python
interpreter.
:param project_directory:
The fully-qualified path to the directory where the Cauldron project is
located
:param output_directory:
The fully-qualified path to the directory where the results will be
written. All of the results files will be written within this
directory. If the directory does not exist, it will be created.
:param logging_path:
The fully-qualified path to a file that will be used for logging. If a
directory is specified instead of a file, a file will be created using
the default filename of cauldron_run.log. If a file already exists at
that location it will be removed and a new file created in its place.
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:param kwargs:
Any variables to be available in the cauldron.shared object during
execution of the project can be specified here as keyword arguments.
:return:
A response object that contains information about the run process
and the shared data from the final state of the project.
"""
from cauldron.cli import batcher
return batcher.run_project(
project_directory=project_directory,
output_directory=output_directory,
log_path=logging_path,
reader_path=reader_path,
reload_project_libraries=reload_project_libraries,
shared_data=kwargs
) | 0.000439 |
def _validate_frow(self, frow):
"""Validate frow argument."""
is_int = isinstance(frow, int) and (not isinstance(frow, bool))
pexdoc.exh.addai("frow", not (is_int and (frow >= 0)))
return frow | 0.008929 |
def unwrap(klass, value):
"""Unpack a Value into an augmented python type (selected from the 'value' field)
"""
assert isinstance(value, Value), value
V = value.value
try:
T = klass.typeMap[type(V)]
except KeyError:
raise ValueError("Can't unwrap value of type %s" % type(V))
try:
return T(value.value)._store(value)
except Exception as e:
raise ValueError("Can't construct %s around %s (%s): %s" % (T, value, type(value), e)) | 0.007435 |
def get_files(self):
"""stub"""
files_map = {}
try:
files_map['choices'] = self.get_choices_file_urls_map()
try:
files_map.update(self.get_file_urls_map())
except IllegalState:
pass
except Exception:
files_map['choices'] = self.get_choices_files_map()
try:
files_map.update(self.get_files_map())
except IllegalState:
pass
return files_map | 0.003891 |
def do_serial(self, p):
"""Set the serial port, e.g.: /dev/tty.usbserial-A4001ib8"""
try:
self.serial.port = p
self.serial.open()
print 'Opening serial port: %s' % p
except Exception, e:
print 'Unable to open serial port: %s' % p | 0.036437 |
def parse(url_str):
"""
Extract all parts from a URL string and return them as a dictionary
"""
url_str = to_unicode(url_str)
result = urlparse(url_str)
netloc_parts = result.netloc.rsplit('@', 1)
if len(netloc_parts) == 1:
username = password = None
host = netloc_parts[0]
else:
user_and_pass = netloc_parts[0].split(':')
if len(user_and_pass) == 2:
username, password = user_and_pass
elif len(user_and_pass) == 1:
username = user_and_pass[0]
password = None
host = netloc_parts[1]
if host and ':' in host:
host = host.split(':')[0]
return {'host': host,
'username': username,
'password': password,
'scheme': result.scheme,
'port': result.port,
'path': result.path,
'query': result.query,
'fragment': result.fragment} | 0.001068 |
def gen_method_keys(self, *args, **kwargs):
'''Given a node, return the string to use in computing the
matching visitor methodname. Can also be a generator of strings.
'''
token = args[0]
for mro_type in type(token).__mro__[:-1]:
name = mro_type.__name__
yield name | 0.006079 |
def download_layers(self, repo_name, digest=None, destination=None):
''' download layers is a wrapper to do the following for a client loaded
with a manifest for an image:
1. use the manifests to retrieve list of digests (get_digests)
2. atomically download the list to destination (get_layers)
This function uses the MultiProcess client to download layers
at the same time.
'''
from sregistry.main.workers import Workers
from sregistry.main.workers.aws import download_task
# Obtain list of digets, and destination for download
self._get_manifest(repo_name, digest)
digests = self._get_digests(repo_name, digest)
destination = self._get_download_cache(destination)
# Create multiprocess download client
workers = Workers()
# Download each layer atomically
tasks = []
layers = []
# Start with a fresh token
self._update_token()
for digest in digests:
targz = "%s/%s.tar.gz" % (destination, digest['digest'])
url = '%s/%s/blobs/%s' % (self.base, repo_name, digest['digest'])
# Only download if not in cache already
if not os.path.exists(targz):
tasks.append((url, self.headers, targz))
layers.append(targz)
# Download layers with multiprocess workers
if len(tasks) > 0:
download_layers = workers.run(func=download_task,
tasks=tasks)
return layers, url | 0.002012 |
def _check_chained_comparison(self, node):
"""Check if there is any chained comparison in the expression.
Add a refactoring message if a boolOp contains comparison like a < b and b < c,
which can be chained as a < b < c.
Care is taken to avoid simplifying a < b < c and b < d.
"""
if node.op != "and" or len(node.values) < 2:
return
def _find_lower_upper_bounds(comparison_node, uses):
left_operand = comparison_node.left
for operator, right_operand in comparison_node.ops:
for operand in (left_operand, right_operand):
value = None
if isinstance(operand, astroid.Name):
value = operand.name
elif isinstance(operand, astroid.Const):
value = operand.value
if value is None:
continue
if operator in ("<", "<="):
if operand is left_operand:
uses[value]["lower_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operator in (">", ">="):
if operand is left_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["lower_bound"].add(comparison_node)
left_operand = right_operand
uses = collections.defaultdict(
lambda: {"lower_bound": set(), "upper_bound": set()}
)
for comparison_node in node.values:
if isinstance(comparison_node, astroid.Compare):
_find_lower_upper_bounds(comparison_node, uses)
for _, bounds in uses.items():
num_shared = len(bounds["lower_bound"].intersection(bounds["upper_bound"]))
num_lower_bounds = len(bounds["lower_bound"])
num_upper_bounds = len(bounds["upper_bound"])
if num_shared < num_lower_bounds and num_shared < num_upper_bounds:
self.add_message("chained-comparison", node=node)
break | 0.001736 |
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result) | 0.002981 |
def site_models(self, app_label=None):
"""Returns a dictionary of registered models.
"""
site_models = {}
app_configs = (
django_apps.get_app_configs()
if app_label is None
else [django_apps.get_app_config(app_label)]
)
for app_config in app_configs:
model_list = [
model
for model in app_config.get_models()
if model._meta.label_lower in self.registry
]
if model_list:
model_list.sort(key=lambda m: m._meta.verbose_name)
site_models.update({app_config.name: model_list})
return site_models | 0.002865 |
def update(self):
"""Update the device values."""
node = self._fritz.get_device_element(self.ain)
self._update_from_node(node) | 0.013333 |
def save(self, fn:PathOrStr):
"Save the image to `fn`."
x = image2np(self.data*255).astype(np.uint8)
PIL.Image.fromarray(x).save(fn) | 0.019231 |
def logging_raslog_module_modId_modId(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras")
raslog = ET.SubElement(logging, "raslog")
module = ET.SubElement(raslog, "module")
modId = ET.SubElement(module, "modId")
modId = ET.SubElement(modId, "modId")
modId.text = kwargs.pop('modId')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.005435 |
def p_sens_all(self, p):
'senslist : AT TIMES'
p[0] = SensList(
(Sens(None, 'all', lineno=p.lineno(1)),), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | 0.010526 |
def _text_to_string(self, text):
""" Provides for escape characters and converting to
pdf text object (pdf strings are in parantheses).
Mainly for use in the information block here, this
functionality is also present in the text object.
"""
if text:
for i,j in [("\\","\\\\"),(")","\\)"),("(", "\\(")]:
text = text.replace(i, j)
text = "(%s)" % text
else:
text = 'None'
return text | 0.013384 |
def get_default_credentials(scopes):
"""Gets the Application Default Credentials."""
credentials, _ = google.auth.default(scopes=scopes)
return credentials | 0.005988 |
def snapshot(self,
label,
snapshot_type='statevector',
qubits=None,
params=None):
"""Take a statevector snapshot of the internal simulator representation.
Works on all qubits, and prevents reordering (like barrier).
For other types of snapshots use the Snapshot extension directly.
Args:
label (str): a snapshot label to report the result
snapshot_type (str): the type of the snapshot.
qubits (list or None): the qubits to apply snapshot to [Default: None].
params (list or None): the parameters for snapshot_type [Default: None].
Returns:
QuantumCircuit: with attached command
Raises:
ExtensionError: malformed command
"""
# Convert label to string for backwards compatibility
if not isinstance(label, str):
warnings.warn(
"Snapshot label should be a string, "
"implicit conversion is depreciated.", DeprecationWarning)
label = str(label)
# If no qubits are specified we add all qubits so it acts as a barrier
# This is needed for full register snapshots like statevector
if isinstance(qubits, QuantumRegister):
qubits = qubits[:]
if not qubits:
tuples = []
if isinstance(self, QuantumCircuit):
for register in self.qregs:
tuples.append(register)
if not tuples:
raise ExtensionError('no qubits for snapshot')
qubits = []
for tuple_element in tuples:
if isinstance(tuple_element, QuantumRegister):
for j in range(tuple_element.size):
qubits.append((tuple_element, j))
else:
qubits.append(tuple_element)
return self.append(
Snapshot(
label,
snapshot_type=snapshot_type,
num_qubits=len(qubits),
params=params), qubits) | 0.001033 |
def __split_file(self):
'''
Splits combined SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
'''
# Filename passed checks through __init__
if (self.__filename and os.access(self.__filename, os.R_OK)):
fhandle = None
try:
fhandle = os.open(self.__filename, os.O_RDONLY)
except OSError:
print(("Couldn't open file %s" % (self.__filename)))
fhandle = None
if (fhandle):
try:
sarmap = mmap.mmap(fhandle, length=0, prot=mmap.PROT_READ)
except (TypeError, IndexError):
os.close(fhandle)
traceback.print_exc()
#sys.exit(-1)
return False
sfpos = sarmap.find(PATTERN_MULTISPLIT, 0)
while (sfpos > -1):
'''Split by day found'''
self.__splitpointers.append(sfpos)
# Iterate for new position
try:
sfpos = sarmap.find(PATTERN_MULTISPLIT, (sfpos + 1))
except ValueError:
print("ValueError on mmap.find()")
return True
if (self.__splitpointers):
# Not sure if this will work - if empty set
# goes back as True here
return True
return False | 0.001771 |
def _CopyFromDateTimeString(self, time_string):
"""Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
"""
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
microseconds = date_time_values.get('microseconds', None)
timestamp = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
timestamp *= definitions.NANOSECONDS_PER_SECOND
if microseconds:
nanoseconds = microseconds * definitions.MILLISECONDS_PER_SECOND
timestamp += nanoseconds
self._normalized_timestamp = None
self._timestamp = timestamp | 0.002394 |
async def jsk_debug(self, ctx: commands.Context, *, command_string: str):
"""
Run a command timing execution and catching exceptions.
"""
alt_ctx = await copy_context_with(ctx, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
start = time.perf_counter()
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
await alt_ctx.command.invoke(alt_ctx)
end = time.perf_counter()
return await ctx.send(f"Command `{alt_ctx.command.qualified_name}` finished in {end - start:.3f}s.") | 0.007236 |
def get_mimetype(self):
"""
Mimetype is calculated based on the file's content. If ``_mimetype``
attribute is available, it will be returned (backends which store
mimetypes or can easily recognize them, should set this private
attribute to indicate that type should *NOT* be calculated).
"""
if hasattr(self, '_mimetype'):
if (isinstance(self._mimetype, (tuple, list,)) and
len(self._mimetype) == 2):
return self._mimetype
else:
raise NodeError('given _mimetype attribute must be an 2 '
'element list or tuple')
mtype, encoding = mimetypes.guess_type(self.name)
if mtype is None:
if self.is_binary:
mtype = 'application/octet-stream'
encoding = None
else:
mtype = 'text/plain'
encoding = None
return mtype, encoding | 0.004049 |
def connect_options_node_proxy(self, name, **kwargs): # noqa: E501
"""connect_options_node_proxy # noqa: E501
connect OPTIONS requests to proxy of Node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_options_node_proxy(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NodeProxyOptions (required)
:param str path: Path is the URL path to use for the current proxy request to node.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_options_node_proxy_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.connect_options_node_proxy_with_http_info(name, **kwargs) # noqa: E501
return data | 0.001871 |
def stop(ctx, yes):
"""Stop job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job stop
```
\b
```bash
$ polyaxon job -xp 2 stop
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
if not yes and not click.confirm("Are sure you want to stop "
"job `{}`".format(_job)):
click.echo('Existing without stopping job.')
sys.exit(0)
try:
PolyaxonClient().job.stop(user, project_name, _job)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Job is being stopped.") | 0.003421 |
def ggplot_color_wheel(n, start = 15, saturation_adjustment = None, saturation = 0.65, lightness = 1.0, prefix = ''):
'''Returns a list of colors with the same distributed spread as used in ggplot2.
A saturation of 0.5 will leave the input color at the usual saturation e.g. if start is 240 (240/360 = 0.66 = blue) and saturation is 0.5 then #0000ff will be returned.
'''
hues = range(start, start + 360, 360/n)
rgbcolors = ['%.2x%.2x%.2x' % (255 * hlscol[0], 255 * hlscol[1], 255 * hlscol[2]) for hlscol in [colorsys.hls_to_rgb(float(h % 360) / 360.0, saturation, lightness) for h in hues]]
if saturation_adjustment:
return [saturate_hex_color(prefix + rgbcol, saturation_adjustment) for rgbcol in rgbcolors]
else:
return rgbcolors | 0.020539 |
def update_dataset(dataset_id, name, data_type, val, unit_id, metadata={}, flush=True, **kwargs):
"""
Update an existing dataset
"""
if dataset_id is None:
raise HydraError("Dataset must have an ID to be updated.")
user_id = kwargs.get('user_id')
dataset = db.DBSession.query(Dataset).filter(Dataset.id==dataset_id).one()
#This dataset been seen before, so it may be attached
#to other scenarios, which may be locked. If they are locked, we must
#not change their data, so new data must be created for the unlocked scenarios
locked_scenarios = []
unlocked_scenarios = []
for dataset_rs in dataset.resourcescenarios:
if dataset_rs.scenario.locked == 'Y':
locked_scenarios.append(dataset_rs)
else:
unlocked_scenarios.append(dataset_rs)
#Are any of these scenarios locked?
if len(locked_scenarios) > 0:
#If so, create a new dataset and assign to all unlocked datasets.
dataset = add_dataset(data_type,
val,
unit_id,
metadata=metadata,
name=name,
user_id=kwargs['user_id'])
for unlocked_rs in unlocked_scenarios:
unlocked_rs.dataset = dataset
else:
dataset.type = data_type
dataset.value = val
dataset.set_metadata(metadata)
dataset.unit_id = unit_id
dataset.name = name
dataset.created_by = kwargs['user_id']
dataset.hash = dataset.set_hash()
#Is there a dataset in the DB already which is identical to the updated dataset?
existing_dataset = db.DBSession.query(Dataset).filter(Dataset.hash==dataset.hash, Dataset.id != dataset.id).first()
if existing_dataset is not None and existing_dataset.check_user(user_id):
log.warning("An identical dataset %s has been found to dataset %s."
" Deleting dataset and returning dataset %s",
existing_dataset.id, dataset.id, existing_dataset.id)
db.DBSession.delete(dataset)
dataset = existing_dataset
if flush==True:
db.DBSession.flush()
return dataset | 0.011404 |
async def find(
self,
*,
types=None,
data=None,
countries=None,
post=False,
strict=False,
dnsbl=None,
limit=0,
**kwargs
):
"""Gather and check proxies from providers or from a passed data.
:ref:`Example of usage <proxybroker-examples-find>`.
:param list types:
Types (protocols) that need to be check on support by proxy.
Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25
And levels of anonymity (HTTP only): Transparent, Anonymous, High
:param data:
(optional) String or list with proxies. Also can be a file-like
object supports `read()` method. Used instead of providers
:param list countries:
(optional) List of ISO country codes where should be located
proxies
:param bool post:
(optional) Flag indicating use POST instead of GET for requests
when checking proxies
:param bool strict:
(optional) Flag indicating that anonymity levels of types
(protocols) supported by a proxy must be equal to the requested
types and levels of anonymity. By default, strict mode is off and
for a successful check is enough to satisfy any one of the
requested types
:param list dnsbl:
(optional) Spam databases for proxy checking.
`Wiki <https://en.wikipedia.org/wiki/DNSBL>`_
:param int limit: (optional) The maximum number of proxies
:raises ValueError:
If :attr:`types` not given.
.. versionchanged:: 0.2.0
Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`.
Changed: :attr:`types` is required.
"""
ip = await self._resolver.get_real_ext_ip()
types = _update_types(types)
if not types:
raise ValueError('`types` is required')
self._checker = Checker(
judges=self._judges,
timeout=self._timeout,
verify_ssl=self._verify_ssl,
max_tries=self._max_tries,
real_ext_ip=ip,
types=types,
post=post,
strict=strict,
dnsbl=dnsbl,
loop=self._loop,
)
self._countries = countries
self._limit = limit
tasks = [asyncio.ensure_future(self._checker.check_judges())]
if data:
task = asyncio.ensure_future(self._load(data, check=True))
else:
task = asyncio.ensure_future(self._grab(types, check=True))
tasks.append(task)
self._all_tasks.extend(tasks) | 0.001111 |
def _get_existing_logical_drives(raid_adapter):
"""Collect existing logical drives on the server.
:param raid_adapter: raid adapter info
:returns: existing_logical_drives: get logical drive on server
"""
existing_logical_drives = []
logical_drives = raid_adapter['Server']['HWConfigurationIrmc'][
'Adapters']['RAIDAdapter'][0].get('LogicalDrives')
if logical_drives is not None:
for drive in logical_drives['LogicalDrive']:
existing_logical_drives.append(drive['@Number'])
return existing_logical_drives | 0.00177 |
def _set_show_mpls_lsp_name_debug(self, v, load=False):
"""
Setter method for show_mpls_lsp_name_debug, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_debug (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_lsp_name_debug is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_lsp_name_debug() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_lsp_name_debug.show_mpls_lsp_name_debug, is_leaf=True, yang_name="show-mpls-lsp-name-debug", rest_name="show-mpls-lsp-name-debug", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsLspCmdPoint'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_mpls_lsp_name_debug must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_mpls_lsp_name_debug.show_mpls_lsp_name_debug, is_leaf=True, yang_name="show-mpls-lsp-name-debug", rest_name="show-mpls-lsp-name-debug", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsLspCmdPoint'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__show_mpls_lsp_name_debug = t
if hasattr(self, '_set'):
self._set() | 0.006159 |
def correct_inverted_amphibrachs(self, scansion: str) -> str:
"""
The 'inverted amphibrach': stressed_unstressed_stressed syllable pattern is invalid
in hexameters, so here we coerce it to stressed: - U - -> - - -
:param scansion: the scansion stress pattern
:return: a string with the corrected scansion pattern
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... " - U - - U - U U U U - U - x")) # doctest: +NORMALIZE_WHITESPACE
- - - - - - U U U U - - - x
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... " - - - U - - U U U U U- - U - x")) # doctest: +NORMALIZE_WHITESPACE
- - - - - - U U U U U- - - - x
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... "- - - - - U - U U - U U - -")) # doctest: +NORMALIZE_WHITESPACE
- - - - - - - U U - U U - -
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... "- UU- U - U - - U U U U- U")) # doctest: +NORMALIZE_WHITESPACE
- UU- - - - - - U U U U- U
"""
new_line = scansion
while list(self.inverted_amphibrach_re.finditer(new_line)):
matches = list(self.inverted_amphibrach_re.finditer(new_line))
for match in matches:
(start, end) = match.span() # pylint: disable=unused-variable
unstressed_idx = new_line.index(self.constants.UNSTRESSED, start)
new_line = new_line[:unstressed_idx] + \
self.constants.STRESSED + new_line[unstressed_idx + 1:]
return new_line | 0.005879 |
def _relation(self, id, join_on, join_to, level=None, featuretype=None,
order_by=None, reverse=False, completely_within=False,
limit=None):
# The following docstring will be included in the parents() and
# children() docstrings to maintain consistency, since they both
# delegate to this method.
"""
Parameters
----------
id : string or a Feature object
level : None or int
If `level=None` (default), then return all children regardless
of level. If `level` is an integer, then constrain to just that
level.
{_method_doc}
Returns
-------
A generator object that yields :class:`Feature` objects.
"""
if isinstance(id, Feature):
id = id.id
other = '''
JOIN relations
ON relations.{join_on} = features.id
WHERE relations.{join_to} = ?
'''.format(**locals())
args = [id]
level_clause = ''
if level is not None:
level_clause = 'relations.level = ?'
args.append(level)
query, args = helpers.make_query(
args=args,
other=other,
extra=level_clause,
featuretype=featuretype,
order_by=order_by,
reverse=reverse,
limit=limit,
completely_within=completely_within,
)
# modify _SELECT so that only unique results are returned
query = query.replace("SELECT", "SELECT DISTINCT")
for i in self._execute(query, args):
yield self._feature_returner(**i) | 0.004177 |
def _goto_playing_station(self, changing_playlist=False):
""" make sure playing station is visible """
if (self.player.isPlaying() or self.operation_mode == PLAYLIST_MODE) and \
(self.selection != self.playing or changing_playlist):
if changing_playlist:
self.startPos = 0
max_lines = self.bodyMaxY - 2
if logger.isEnabledFor(logging.INFO):
logger.info('max_lines = {0}, self.playing = {1}'.format(max_lines, self.playing))
if self.number_of_items < max_lines:
self.startPos = 0
elif self.playing < self.startPos or \
self.playing >= self.startPos + max_lines:
if logger.isEnabledFor(logging.INFO):
logger.info('=== _goto:adjusting startPos')
if self.playing < max_lines:
self.startPos = 0
if self.playing - int(max_lines/2) > 0:
self.startPos = self.playing - int(max_lines/2)
elif self.playing > self.number_of_items - max_lines:
self.startPos = self.number_of_items - max_lines
else:
self.startPos = int(self.playing+1/max_lines) - int(max_lines/2)
if logger.isEnabledFor(logging.INFO):
logger.info('===== _goto:startPos = {0}, changing_playlist = {1}'.format(self.startPos, changing_playlist))
self.selection = self.playing
self.refreshBody() | 0.00454 |
def execute_job(self, obj):
"""
Execute the BMDS model and parse outputs if successful.
"""
# get executable path
exe = session.BMDS.get_model(obj["bmds_version"], obj["model_name"]).get_exe_path()
# write dfile
dfile = self.tempfiles.get_tempfile(prefix="bmds-dfile-", suffix=".(d)")
with open(dfile, "w") as f:
f.write(obj["dfile"])
outfile = self.get_outfile(dfile, obj["model_name"])
oo2 = outfile.replace(".out", ".002")
proc = subprocess.Popen([exe, dfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = None
stdout = ""
stderr = ""
try:
stdout, stderr = proc.communicate(timeout=settings.BMDS_MODEL_TIMEOUT_SECONDS)
if os.path.exists(outfile):
with open(outfile, "r") as f:
output = f.read()
status = RunStatus.SUCCESS.value
stdout = stdout.decode().strip()
stderr = stderr.decode().strip()
except subprocess.TimeoutExpired:
proc.kill()
status = RunStatus.FAILURE.value
stdout, stderr = proc.communicate()
finally:
if os.path.exists(outfile):
self.tempfiles.append(outfile)
if os.path.exists(oo2):
self.tempfiles.append(oo2)
self.tempfiles.cleanup()
return dict(status=status, output=output, stdout=stdout, stderr=stderr) | 0.003992 |
async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse HTML and get results
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("latin-1"), parser)
for rank, result in enumerate(__class__.RESULTS_SELECTOR(html), 1):
# extract url
metadata_div = result.find("div")
metadata_json = lxml.etree.tostring(metadata_div, encoding="unicode", method="text")
metadata_json = json.loads(metadata_json)
google_url = result.find("a").get("href")
if google_url is not None:
query = urllib.parse.urlsplit(google_url).query
else:
query = None
if not query:
img_url = metadata_json["ou"]
else:
query = urllib.parse.parse_qs(query)
img_url = query["imgurl"][0]
# extract format
check_metadata = CoverImageMetadata.NONE
format = metadata_json["ity"].lower()
try:
format = SUPPORTED_IMG_FORMATS[format]
except KeyError:
# format could not be identified or is unknown
format = None
check_metadata = CoverImageMetadata.FORMAT
# extract size
if not query:
size = metadata_json["ow"], metadata_json["oh"]
else:
size = tuple(map(int, (query["w"][0], query["h"][0])))
# extract thumbnail url
thumbnail_url = metadata_json["tu"]
# result
results.append(GoogleImagesCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results | 0.012685 |
def lstm_state_tuples(num_nodes, name):
"""Convenience so that the names of the vars are defined in the same file."""
if not isinstance(num_nodes, tf.compat.integral_types):
raise ValueError('num_nodes must be an integer: %s' % num_nodes)
return [(STATE_NAME % name + '_0', tf.float32, num_nodes),
(STATE_NAME % name + '_1', tf.float32, num_nodes)] | 0.01087 |
def parse_tagLength_dist(self):
"""parses and plots tag length distribution files"""
# Find and parse homer tag length distribution reports
for f in self.find_log_files('homer/LengthDistribution', filehandles=True):
s_name = os.path.basename(f['root'])
s_name = self.clean_s_name(s_name, f['root'])
parsed_data = self.parse_length_dist(f)
if parsed_data is not None:
if s_name in self.tagdir_data['length']:
log.debug("Duplicate Length Distribution sample log found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name, section='length')
self.tagdir_data['length'][s_name] = parsed_data
self.tagdir_data['length'] = self.ignore_samples(self.tagdir_data['length'])
if len(self.tagdir_data['length']) > 0:
self.add_section (
name = 'Tag Length Distribution',
anchor = 'homer-tagLength',
description = 'This plot shows the distribution of tag length.',
helptext = 'This is a good quality control for tag length inputed into Homer.',
plot = self.length_dist_chart()
) | 0.014563 |
def initUI(self):
#self.setMinimumSize(WIDTH,HEIGTH)
#self.setMaximumSize(WIDTH,HEIGTH)
'''Radio buttons for Original/RGB/HSV/YUV images'''
self.origButton = QRadioButton("Original")
self.rgbButton = QRadioButton("RGB")
self.hsvButton = QRadioButton("HSV")
self.yuvButton = QRadioButton("YUV")
'''HSV status scheck'''
self.hsvCheck = QCheckBox('HSV Color Disc')
self.hsvCheck.setObjectName("hsvCheck")
self.hsvCheck.stateChanged.connect(self.showHSVWidget)
'''Signals for toggled radio buttons'''
self.origButton.toggled.connect(lambda:self.origButtonState())
self.rgbButton.toggled.connect(lambda:self.rgbButtonState())
self.hsvButton.toggled.connect(lambda:self.hsvButtonState())
self.yuvButton.toggled.connect(lambda:self.yuvButtonState())
self.origButton.setChecked(True)
'''Main layout of the widget will contain several vertical layouts'''
self.gLayout = QGridLayout(self)
self.gLayout.setObjectName("gLayout")
''' Vertical Layout for radio buttons '''
self.radio1Layout = QHBoxLayout()
self.radio2Layout = QVBoxLayout()
self.radio3Layout = QVBoxLayout()
self.radioLayout = QVBoxLayout()
self.radioLayout.setObjectName("radioLayout")
self.radio1Layout.addWidget(self.origButton)
self.radio1Layout.addWidget(self.rgbButton)
self.radio1Layout.addWidget(self.hsvButton)
self.radio1Layout.addWidget(self.yuvButton)
self.radio1Layout.addWidget(self.hsvCheck)
#self.radio1Layout.addLayout(self.radio2Layout)
#self.radio1Layout.addLayout(self.radio3Layout)
self.radioLayout.addLayout(self.radio1Layout)
self.vSpacer = QSpacerItem(10, 10, QSizePolicy.Ignored, QSizePolicy.Ignored);
self.radioLayout.addItem(self.vSpacer)
hmin,smin,vmin = HSVMIN
hmax,smax,vmax = HSVMAX
''' Vertical Layout for HMIN Slider'''
self.hminLayout = QVBoxLayout()
self.hminLayout.setObjectName("hminLayout")
self.hminLabel = QLabel("HMin")
self.hminValue = QLineEdit(str(hmin),self)
self.hminValue.setValidator(QIntValidator(hmin, hmax, self));
self.hminValue.setFixedWidth(40)
self.hminValue.setFixedHeight(27)
self.hminValue.setAlignment(Qt.AlignCenter);
self.hminSlider = QSlider(Qt.Vertical)
self.hminSlider.setMinimum(hmin)
self.hminSlider.setMaximum(hmax)
self.hminSlider.setValue(hmin)
self.hminLayout.addWidget(self.hminLabel, Qt.AlignCenter)
self.hminLayout.addWidget(self.hminValue,Qt.AlignCenter)
self.hminLayout.addWidget(self.hminSlider)
''' Vertical Layout for HMAX Slider'''
self.hmaxLayout = QVBoxLayout()
self.hmaxLayout.setObjectName("hmaxLayout")
self.hmaxLabel = QLabel("HMax")
self.hmaxValue = QLineEdit(str(hmax),self)
self.hmaxValue.setValidator(QIntValidator(hmin, hmax, self));
self.hmaxValue.setFixedWidth(40)
self.hmaxValue.setFixedHeight(27)
self.hmaxValue.setAlignment(Qt.AlignCenter);
self.hmaxSlider = QSlider(Qt.Vertical)
self.hmaxSlider.setMinimum(hmin)
self.hmaxSlider.setMaximum(hmax)
self.hmaxSlider.setValue(hmax)
self.hmaxLayout.addWidget(self.hmaxLabel)
self.hmaxLayout.addWidget(self.hmaxValue)
self.hmaxLayout.addWidget(self.hmaxSlider)
''' Vertical Layout for SMIN Slider'''
self.sminLayout = QVBoxLayout()
self.sminLayout.setObjectName("sminLayout")
self.sminLabel = QLabel("SMin")
self.sminValue = QLineEdit(str(smin),self)
self.sminValue.setValidator(QIntValidator(smin, smax, self));
self.sminValue.setFixedWidth(40)
self.sminValue.setFixedHeight(27)
self.sminValue.setAlignment(Qt.AlignCenter);
self.sminSlider = QSlider(Qt.Vertical)
self.sminSlider.setMinimum(smin)
self.sminSlider.setMaximum(smax)
self.sminSlider.setValue(smin)
self.sminLayout.addWidget(self.sminLabel)
self.sminLayout.addWidget(self.sminValue)
self.sminLayout.addWidget(self.sminSlider)
''' Vertical Layout for SMAX Slider'''
self.smaxLayout = QVBoxLayout()
self.smaxLayout.setObjectName("smaxLayout")
self.smaxLabel = QLabel("SMax")
self.smaxValue = QLineEdit(str(smax),self)
self.smaxValue.setValidator(QIntValidator(smin, smax, self));
self.smaxValue.setFixedWidth(40)
self.smaxValue.setFixedHeight(27)
self.smaxValue.setAlignment(Qt.AlignCenter);
self.smaxSlider = QSlider(Qt.Vertical)
self.smaxSlider.setMinimum(smin)
self.smaxSlider.setMaximum(smax)
self.smaxSlider.setValue(smax)
self.smaxLayout.addWidget(self.smaxLabel)
self.smaxLayout.addWidget(self.smaxValue)
self.smaxLayout.addWidget(self.smaxSlider)
''' Vertical Layout for VMIN Slider'''
self.vminLayout = QVBoxLayout()
self.vminLayout.setObjectName("vminLayout")
self.vminLabel = QLabel("VMin")
self.vminValue = QLineEdit(str(vmin),self)
self.vminValue.setValidator(QIntValidator(vmin, vmax, self));
self.vminValue.setFixedWidth(40)
self.vminValue.setFixedHeight(27)
self.vminValue.setAlignment(Qt.AlignCenter);
self.vminSlider = QSlider(Qt.Vertical)
self.vminSlider.setMinimum(vmin)
self.vminSlider.setMaximum(vmax)
self.vminSlider.setValue(vmin)
self.vminLayout.addWidget(self.vminLabel)
self.vminLayout.addWidget(self.vminValue)
self.vminLayout.addWidget(self.vminSlider)
''' Vertical Layout for VMAX Slider'''
self.vmaxLayout = QVBoxLayout()
self.vmaxLayout.setObjectName("vmaxLayout")
self.vmaxLabel = QLabel("VMax")
self.vmaxValue = QLineEdit(str(vmax),self)
self.vmaxValue.setValidator(QIntValidator(vmin, vmax, self));
self.vmaxValue.setFixedWidth(40)
self.vmaxValue.setFixedHeight(27)
self.vmaxValue.setAlignment(Qt.AlignCenter);
self.vmaxSlider = QSlider(Qt.Vertical)
self.vmaxSlider.setMinimum(vmin)
self.vmaxSlider.setMaximum(vmax)
self.vmaxSlider.setValue(vmax)
self.vmaxLayout.addWidget(self.vmaxLabel)
self.vmaxLayout.addWidget(self.vmaxValue)
self.vmaxLayout.addWidget(self.vmaxSlider)
'''Adding all the vertical layouts to the main horizontal layout'''
self.gLayout.addLayout(self.radioLayout,1,0,1,6,Qt.AlignCenter)
self.gLayout.addLayout(self.hminLayout,2,0,Qt.AlignCenter)
self.gLayout.addLayout(self.hmaxLayout,2,1,Qt.AlignCenter)
self.gLayout.addLayout(self.sminLayout,2,2,Qt.AlignCenter)
self.gLayout.addLayout(self.smaxLayout,2,3,Qt.AlignCenter)
self.gLayout.addLayout(self.vminLayout,2,4,Qt.AlignCenter)
self.gLayout.addLayout(self.vmaxLayout,2,5,Qt.AlignCenter)
self.setLayout(self.gLayout)
'''Signals for sliders value changes'''
self.hminSlider.valueChanged.connect(self.changeHmin)
self.hmaxSlider.valueChanged.connect(self.changeHmax)
self.sminSlider.valueChanged.connect(self.changeSmin)
self.smaxSlider.valueChanged.connect(self.changeSmax)
self.vminSlider.valueChanged.connect(self.changeVmin)
self.vmaxSlider.valueChanged.connect(self.changeVmax)
self.hminValue.textChanged.connect(self.changeHmin2)
self.hmaxValue.textChanged.connect(self.changeHmax2)
self.sminValue.textChanged.connect(self.changeSmin2)
self.smaxValue.textChanged.connect(self.changeSmax2)
self.vminValue.textChanged.connect(self.changeVmin2)
self.vmaxValue.textChanged.connect(self.changeVmax2) | 0.008914 |
def qn_df(df, axis='row', keep_orig=False):
'''
do quantile normalization of a dataframe dictionary, does not write to net
'''
df_qn = {}
for mat_type in df:
inst_df = df[mat_type]
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
missing_values = inst_df.isnull().values.any()
# make mask of missing values
if missing_values:
# get nan mask
missing_mask = pd.isnull(inst_df)
# tmp fill in na with zero, will not affect qn
inst_df = inst_df.fillna(value=0)
# calc common distribution
common_dist = calc_common_dist(inst_df)
# swap in common distribution
inst_df = swap_in_common_dist(inst_df, common_dist)
# swap back in missing values
if missing_values:
inst_df = inst_df.mask(missing_mask, other=np.nan)
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
df_qn[mat_type] = inst_df
return df_qn | 0.01227 |
def set_inlets(self, pores=[], overwrite=False):
r"""
Set the locations from which the invader enters the network
Parameters
----------
pores : array_like
Locations that are initially filled with invader, from which
clusters grow and invade into the network
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False`` (default),
then supplied locations are added to any already existing inlet
locations.
"""
Ps = self._parse_indices(pores)
if np.sum(self['pore.outlets'][Ps]) > 0:
raise Exception('Some inlets are already defined as outlets')
if overwrite:
self['pore.inlets'] = False
self['pore.inlets'][Ps] = True
self['pore.invasion_pressure'][Ps] = 0
self['pore.invasion_sequence'][Ps] = 0 | 0.002041 |
def from_file(self, filename):
"""Read configuration from a .rc file.
`filename` is a file name to read.
"""
self.attempted_config_files.append(filename)
cp = HandyConfigParser()
files_read = cp.read(filename)
if files_read is not None: # return value changed in 2.4
self.config_files.extend(files_read)
for option_spec in self.CONFIG_FILE_OPTIONS:
self.set_attr_from_config_option(cp, *option_spec)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option) | 0.002985 |
def pushbullet(body, apikey, device, title="JCVI: Job Monitor", type="note"):
"""
pushbullet.com API
<https://www.pushbullet.com/api>
"""
import base64
headers = {}
auth = base64.encodestring("{0}:".format(apikey)).strip()
headers['Authorization'] = "Basic {0}".format(auth)
headers['Content-type'] = "application/x-www-form-urlencoded"
conn = HTTPSConnection("api.pushbullet.com".format(apikey))
conn.request("POST", "/api/pushes",
urlencode({
"iden": device,
"type": "note",
"title": title,
"body": body,
}), headers)
conn.getresponse() | 0.003044 |
def display_db_info(self):
"""Displays some basic info about the GnuCash book"""
with self.open_book() as book:
default_currency = book.default_currency
print("Default currency is ", default_currency.mnemonic) | 0.008032 |
def merge_validator_config(configs):
"""
Given a list of ValidatorConfig objects, merges them into a single
ValidatorConfig, giving priority in the order of the configs
(first has highest priority).
"""
bind_network = None
bind_component = None
bind_consensus = None
endpoint = None
peering = None
seeds = None
peers = None
network_public_key = None
network_private_key = None
scheduler = None
permissions = None
roles = None
opentsdb_url = None
opentsdb_db = None
opentsdb_username = None
opentsdb_password = None
minimum_peer_connectivity = None
maximum_peer_connectivity = None
state_pruning_block_depth = None
fork_cache_keep_time = None
component_thread_pool_workers = None
network_thread_pool_workers = None
signature_thread_pool_workers = None
for config in reversed(configs):
if config.bind_network is not None:
bind_network = config.bind_network
if config.bind_component is not None:
bind_component = config.bind_component
if config.bind_consensus is not None:
bind_consensus = config.bind_consensus
if config.endpoint is not None:
endpoint = config.endpoint
if config.peering is not None:
peering = config.peering
if config.seeds is not None:
seeds = config.seeds
if config.peers is not None:
peers = config.peers
if config.network_public_key is not None:
network_public_key = config.network_public_key
if config.network_private_key is not None:
network_private_key = config.network_private_key
if config.scheduler is not None:
scheduler = config.scheduler
if config.permissions is not None or config.permissions == {}:
permissions = config.permissions
if config.roles is not None:
roles = config.roles
if config.opentsdb_url is not None:
opentsdb_url = config.opentsdb_url
if config.opentsdb_db is not None:
opentsdb_db = config.opentsdb_db
if config.opentsdb_username is not None:
opentsdb_username = config.opentsdb_username
if config.opentsdb_password is not None:
opentsdb_password = config.opentsdb_password
if config.minimum_peer_connectivity is not None:
minimum_peer_connectivity = config.minimum_peer_connectivity
if config.maximum_peer_connectivity is not None:
maximum_peer_connectivity = config.maximum_peer_connectivity
if config.state_pruning_block_depth is not None:
state_pruning_block_depth = config.state_pruning_block_depth
if config.fork_cache_keep_time is not None:
fork_cache_keep_time = config.fork_cache_keep_time
if config.component_thread_pool_workers is not None:
component_thread_pool_workers = \
config.component_thread_pool_workers
if config.network_thread_pool_workers is not None:
network_thread_pool_workers = \
config.network_thread_pool_workers
if config.signature_thread_pool_workers is not None:
signature_thread_pool_workers = \
config.signature_thread_pool_workers
return ValidatorConfig(
bind_network=bind_network,
bind_component=bind_component,
bind_consensus=bind_consensus,
endpoint=endpoint,
peering=peering,
seeds=seeds,
peers=peers,
network_public_key=network_public_key,
network_private_key=network_private_key,
scheduler=scheduler,
permissions=permissions,
roles=roles,
opentsdb_url=opentsdb_url,
opentsdb_db=opentsdb_db,
opentsdb_username=opentsdb_username,
opentsdb_password=opentsdb_password,
minimum_peer_connectivity=minimum_peer_connectivity,
maximum_peer_connectivity=maximum_peer_connectivity,
state_pruning_block_depth=state_pruning_block_depth,
fork_cache_keep_time=fork_cache_keep_time,
component_thread_pool_workers=component_thread_pool_workers,
network_thread_pool_workers=network_thread_pool_workers,
signature_thread_pool_workers=signature_thread_pool_workers
) | 0.00023 |
def load(self, ioi, ac_ignore_missing=False, **options):
"""
Load config from a file path or a file / file-like object which 'ioi'
refering after some checks.
:param ioi:
'anyconfig.globals.IOInfo' namedtuple object provides various info
of input object to load data from
:param ac_ignore_missing:
Ignore and just return empty result if given `ioi` object does not
exist in actual.
:param options:
options will be passed to backend specific loading functions.
please note that options have to be sanitized w/
:func:`anyconfig.utils.filter_options` later to filter out options
not in _load_opts.
:return: dict or dict-like object holding configurations
"""
container = self._container_factory(**options)
options = self._load_options(container, **options)
if not ioi:
return container()
if anyconfig.utils.is_stream_ioinfo(ioi):
cnf = self.load_from_stream(ioi.src, container, **options)
else:
if ac_ignore_missing and not os.path.exists(ioi.path):
return container()
cnf = self.load_from_path(ioi.path, container, **options)
return cnf | 0.001523 |
def trace_memory_clean_caches(self):
""" Avoid polluting results with some builtin python caches """
urllib.parse.clear_cache()
re.purge()
linecache.clearcache()
copyreg.clear_extension_cache()
if hasattr(fnmatch, "purge"):
fnmatch.purge() # pylint: disable=no-member
elif hasattr(fnmatch, "_purge"):
fnmatch._purge() # pylint: disable=no-member
if hasattr(encodings, "_cache") and len(encodings._cache) > 0:
encodings._cache = {}
for handler in context.log.handlers:
handler.flush() | 0.003284 |
def create_app():
""" Flask application factory """
# Setup Flask app and app.config
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Initialize Flask extensions
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
# Define the User data-model. Make sure to add flask_user UserMixin !!!
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50, collation='NOCASE'), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='')
last_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='')
# Relationship
user_emails = db.relationship('UserEmail')
# Define UserEmail DataModel.
class UserEmail(db.Model):
__tablename__ = 'user_emails'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# User email information
email = db.Column(db.String(255, collation='NOCASE'), nullable=False, unique=True)
email_confirmed_at = db.Column(db.DateTime())
is_primary = db.Column(db.Boolean(), nullable=False, default=False)
# Relationship
user = db.relationship('User', uselist=False)
# Create all database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserEmailClass=UserEmail) # Register the User data-model
user_manager = UserManager(db_adapter, app) # Initialize Flask-User
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "flask_user_layout.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('member_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def member_page():
return render_template_string("""
{% extends "flask_user_layout.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('member_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
return app | 0.005734 |
def take_home_pay(gross_pay, employer_match, taxes_and_fees, numtype='float'):
"""
Calculate net take-home pay including employer retirement savings match
using the formula laid out by Mr. Money Mustache:
http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/
Args:
gross_pay: float or int, gross monthly pay.
employer_match: float or int, the 401(k) match from your employer.
taxes_and_fees: list, taxes and fees that are deducted from your paycheck.
numtype: string, 'decimal' or 'float'; the type of number to return.
Returns:
your monthly take-home pay.
"""
if numtype == 'decimal':
return (Decimal(gross_pay) + Decimal(employer_match)) - Decimal(
sum(taxes_and_fees)
)
else:
return (float(gross_pay) + float(employer_match)) - sum(taxes_and_fees) | 0.002278 |
def concurrent_exec(func, param_list):
"""Executes a function with different parameters pseudo-concurrently.
This is basically a map function. Each element (should be an iterable) in
the param_list is unpacked and passed into the function. Due to Python's
GIL, there's no true concurrency. This is suited for IO-bound tasks.
Args:
func: The function that parforms a task.
param_list: A list of iterables, each being a set of params to be
passed into the function.
Returns:
A list of return values from each function execution. If an execution
caused an exception, the exception object will be the corresponding
result.
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
# Start the load operations and mark each future with its params
future_to_params = {executor.submit(func, *p): p for p in param_list}
return_vals = []
for future in concurrent.futures.as_completed(future_to_params):
params = future_to_params[future]
try:
return_vals.append(future.result())
except Exception as exc:
logging.exception("{} generated an exception: {}".format(
params, traceback.format_exc()))
return_vals.append(exc)
return return_vals | 0.000727 |
def runGetBiosample(self, id_):
"""
Runs a getBiosample request for the specified ID.
"""
compoundId = datamodel.BiosampleCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
biosample = dataset.getBiosample(id_)
return self.runGetRequest(biosample) | 0.005831 |
def ensure_hist_size(self):
"""
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
"""
if self.marker_index_hist_size == 0:
return
result = self.es.search(index=self.marker_index,
doc_type=self.marker_doc_type,
body={'query': {
'term': {'target_index': self.index}}},
sort=('date:desc',))
for i, hit in enumerate(result.get('hits').get('hits'), start=1):
if i > self.marker_index_hist_size:
marker_document_id = hit.get('_id')
self.es.delete(id=marker_document_id, index=self.marker_index,
doc_type=self.marker_doc_type)
self.es.indices.flush(index=self.marker_index) | 0.002212 |
def askopenfilename(**kwargs):
"""Return file name(s) from Tkinter's file open dialog."""
try:
from Tkinter import Tk
import tkFileDialog as filedialog
except ImportError:
from tkinter import Tk, filedialog
root = Tk()
root.withdraw()
root.update()
filenames = filedialog.askopenfilename(**kwargs)
root.destroy()
return filenames | 0.002571 |
def autodiscover_modules(packages, related_name_re='.+',
ignore_exceptions=False):
"""Autodiscover function follows the pattern used by Celery.
:param packages: List of package names to auto discover modules in.
:type packages: list of str
:param related_name_re: Regular expression used to match modules names.
:type related_name_re: str
:param ignore_exceptions: Ignore exception when importing modules.
:type ignore_exceptions: bool
"""
warnings.warn('autodiscover_modules has been deprecated. '
'Use Flask-Registry instead.', DeprecationWarning)
global _RACE_PROTECTION
if _RACE_PROTECTION:
return []
_RACE_PROTECTION = True
modules = []
try:
tmp = [find_related_modules(pkg, related_name_re, ignore_exceptions)
for pkg in packages]
for l in tmp:
for m in l:
if m is not None:
modules.append(m)
# Workaround for finally-statement
except:
_RACE_PROTECTION = False
raise
_RACE_PROTECTION = False
return modules | 0.002648 |
def find_library_full_path(name):
"""
Similar to `from ctypes.util import find_library`, but try
to return full path if possible.
"""
from ctypes.util import find_library
if os.name == "posix" and sys.platform == "darwin":
# on Mac, ctypes already returns full path
return find_library(name)
def _use_proc_maps(name):
"""
Find so from /proc/pid/maps
Only works with libraries that has already been loaded.
But this is the most accurate method -- it finds the exact library that's being used.
"""
procmap = os.path.join('/proc', str(os.getpid()), 'maps')
if not os.path.isfile(procmap):
return None
with open(procmap, 'r') as f:
for line in f:
line = line.strip().split(' ')
sofile = line[-1]
basename = os.path.basename(sofile)
if 'lib' + name + '.so' in basename:
if os.path.isfile(sofile):
return os.path.realpath(sofile)
# The following two methods come from https://github.com/python/cpython/blob/master/Lib/ctypes/util.py
def _use_ld(name):
"""
Find so with `ld -lname -Lpath`.
It will search for files in LD_LIBRARY_PATH, but not in ldconfig.
"""
cmd = "ld -t -l{} -o {}".format(name, os.devnull)
ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '')
for d in ld_lib_path.split(':'):
cmd = cmd + " -L " + d
result, ret = subproc_call(cmd + '|| true')
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
res = re.search(expr, result.decode('utf-8'))
if res:
res = res.group(0)
if not os.path.isfile(res):
return None
return os.path.realpath(res)
def _use_ldconfig(name):
"""
Find so in `ldconfig -p`.
It does not handle LD_LIBRARY_PATH.
"""
with change_env('LC_ALL', 'C'), change_env('LANG', 'C'):
ldconfig, ret = subproc_call("ldconfig -p")
ldconfig = ldconfig.decode('utf-8')
if ret != 0:
return None
expr = r'\s+(lib%s\.[^\s]+)\s+\(.*=>\s+(.*)' % (re.escape(name))
res = re.search(expr, ldconfig)
if not res:
return None
else:
ret = res.group(2)
return os.path.realpath(ret)
if sys.platform.startswith('linux'):
return _use_proc_maps(name) or _use_ld(name) or _use_ldconfig(name) or find_library(name)
return find_library(name) | 0.001524 |
def get_message_id(self):
"""Method to get messageId of group created."""
message_id = self.json_response.get("messageId", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return message_id | 0.007968 |
def check(self, _):
"""Check this configure."""
try:
import yaml
except:
return True
try:
yaml.safe_load(self.recipes)
except Exception as e:
raise RADLParseException("Invalid YAML code: %s." % e, line=self.line)
return True | 0.012461 |
def conditions_met(self, instance, state):
"""
Check if all conditions have been met
"""
transition = self.get_transition(state)
if transition is None:
return False
elif transition.conditions is None:
return True
else:
return all(map(lambda condition: condition(instance), transition.conditions)) | 0.007712 |
def convert_inputs_to_sparse_if_necessary(lhs, rhs):
'''
This function checks to see if a sparse output is desireable given the inputs and if so, casts the inputs to sparse in order to make it so.
'''
if not sp.issparse(lhs) or not sp.issparse(rhs):
if sparse_is_desireable(lhs, rhs):
if not sp.issparse(lhs):
lhs = sp.csc_matrix(lhs)
#print "converting lhs into sparse matrix"
if not sp.issparse(rhs):
rhs = sp.csc_matrix(rhs)
#print "converting rhs into sparse matrix"
return lhs, rhs | 0.006645 |
def _negf(ins):
''' Changes sign of top of the stack (48 bits)
'''
output = _float_oper(ins.quad[2])
output.append('call __NEGF')
output.extend(_fpush())
REQUIRES.add('negf.asm')
return output | 0.004545 |
def get_form_kwargs(self):
"""
Inject the request user into the kwargs passed to the form
"""
kwargs = super(AddUpdateMixin, self).get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs | 0.007905 |
def pre_ref_resolution_callback(self, other_model):
"""
(internal: used to store a model after parsing into the repository)
Args:
other_model: the parsed model
Returns:
nothing
"""
# print("PRE-CALLBACK{}".format(filename))
filename = other_model._tx_filename
assert (filename)
other_model._tx_model_repository = \
GlobalModelRepository(self.all_models)
self.all_models.filename_to_model[filename] = other_model | 0.003781 |
def _generate_rsa_key(key_length):
"""Generate a new RSA private key.
:param int key_length: Required key length in bits
:returns: DER-encoded private key, private key identifier, and DER encoding identifier
:rtype: tuple(bytes, :class:`EncryptionKeyType`, :class:`KeyEncodingType`)
"""
private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_length, backend=default_backend())
key_bytes = private_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
return key_bytes, EncryptionKeyType.PRIVATE, KeyEncodingType.DER | 0.004329 |
def copy_config_input_source_config_source_candidate_candidate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
copy_config = ET.Element("copy_config")
config = copy_config
input = ET.SubElement(copy_config, "input")
source = ET.SubElement(input, "source")
config_source = ET.SubElement(source, "config-source")
candidate = ET.SubElement(config_source, "candidate")
candidate = ET.SubElement(candidate, "candidate")
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003273 |
def _predict_tree(model, X, joint_contribution=False):
"""
For a given DecisionTreeRegressor, DecisionTreeClassifier,
ExtraTreeRegressor, or ExtraTreeClassifier,
returns a triple of [prediction, bias and feature_contributions], such
that prediction ≈ bias + feature_contributions.
"""
leaves = model.apply(X)
paths = _get_tree_paths(model.tree_, 0)
for path in paths:
path.reverse()
leaf_to_path = {}
#map leaves to paths
for path in paths:
leaf_to_path[path[-1]] = path
# remove the single-dimensional inner arrays
values = model.tree_.value.squeeze(axis=1)
# reshape if squeezed into a single float
if len(values.shape) == 0:
values = np.array([values])
if isinstance(model, DecisionTreeRegressor):
biases = np.full(X.shape[0], values[paths[0][0]])
line_shape = X.shape[1]
elif isinstance(model, DecisionTreeClassifier):
# scikit stores category counts, we turn them into probabilities
normalizer = values.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
values /= normalizer
biases = np.tile(values[paths[0][0]], (X.shape[0], 1))
line_shape = (X.shape[1], model.n_classes_)
direct_prediction = values[leaves]
#make into python list, accessing values will be faster
values_list = list(values)
feature_index = list(model.tree_.feature)
contributions = []
if joint_contribution:
for row, leaf in enumerate(leaves):
path = leaf_to_path[leaf]
path_features = set()
contributions.append({})
for i in range(len(path) - 1):
path_features.add(feature_index[path[i]])
contrib = values_list[path[i+1]] - \
values_list[path[i]]
#path_features.sort()
contributions[row][tuple(sorted(path_features))] = \
contributions[row].get(tuple(sorted(path_features)), 0) + contrib
return direct_prediction, biases, contributions
else:
unique_leaves = np.unique(leaves)
unique_contributions = {}
for row, leaf in enumerate(unique_leaves):
for path in paths:
if leaf == path[-1]:
break
contribs = np.zeros(line_shape)
for i in range(len(path) - 1):
contrib = values_list[path[i+1]] - \
values_list[path[i]]
contribs[feature_index[path[i]]] += contrib
unique_contributions[leaf] = contribs
for row, leaf in enumerate(leaves):
contributions.append(unique_contributions[leaf])
return direct_prediction, biases, np.array(contributions) | 0.006906 |
def alignment_to_contacts(
sam_merged,
assembly,
output_dir,
output_file_network=DEFAULT_NETWORK_FILE_NAME,
output_file_chunk_data=DEFAULT_CHUNK_DATA_FILE_NAME,
parameters=DEFAULT_PARAMETERS,
):
"""Generates a network file (in edgelist form) from an
alignment in sam or bam format. Contigs are virtually split into
'chunks' of nearly fixed size (by default between 500 and 1000 bp)
to reduce size bias. The chunks are the network nodes and the edges
are the contact counts.
The network is in a strict barebone form so that it can be reused and
imported quickly into other applications etc. Verbose information about
every single node in the network is written on a 'chunk data' file,
by default called 'idx_contig_hit_size_cov.txt'
Parameters
----------
sam_merged : file, str or pathlib.Path
The alignment file in SAM/BAM format to be processed.
assembly : file, str or pathlib.Path
The initial assembly acting as the alignment file's reference genome.
output_dir : str or pathlib.Path
The output directory to write the network and chunk data into.
output_dir_file_network : str or pathlib.Path, optional
The specific file name for the output network file. Default is
network.txt
output_file_chunk_data : str or pathlib.Path, optional
The specific file name for the output chunk data file. Default is
idx_contig_hit_size_cov.txt
parameters : dict, optional
A dictionary of parameters for converting the alignment file into a
network. These are:
-size_chunk_threshold: the size (in bp) under which chunks are
discarded. Default is 500.
-mapq_threshold: the mapping quality under which alignments are
discarded. Default is 10.
-chunk_size: the default chunk size (in bp) when applicable, save
smaller contigs or tail-ends. Default is 1000.
-read_size: the size of reads used for mapping. Default is 65.
-self_contacts: whether to count alignments between a chunk and
itself. Default is False.
-normalized: whether to normalize contacts by their coverage.
Default is False.
Returns
-------
chunk_complete_data : dict
A dictionary where the keys are chunks in (contig, position) form and
the values are their id, name, total contact count, size and coverage.
all_contacts : dict
A counter dictionary where the keys are chunk pairs and the values are
their contact count.
"""
all_contacts = collections.Counter()
all_chunks = collections.Counter()
# Initialize parameters
chunk_size = int(parameters["chunk_size"])
mapq_threshold = int(parameters["mapq_threshold"])
size_chunk_threshold = int(parameters["size_chunk_threshold"])
read_size = int(parameters["read_size"])
self_contacts = parameters["self_contacts"]
normalized = parameters["normalized"]
logger.info("Establishing chunk list...")
chunk_complete_data = dict()
# Get all information about all chunks from all contigs
# (this gets updated at the end)
global_id = 1
for record in SeqIO.parse(assembly, "fasta"):
length = len(record.seq)
n_chunks = length // chunk_size
n_chunks += (length % chunk_size) >= size_chunk_threshold
for i in range(n_chunks):
if (i + 1) * chunk_size <= length:
size = chunk_size
else:
size = length % chunk_size
chunk_name = "{}_{}".format(record.id, i)
chunk_complete_data[chunk_name] = {
"id": global_id,
"hit": 0,
"size": size,
"coverage": 0,
}
global_id += 1
logger.info("Opening alignment files...")
current_read = None
# Read the BAM file to detect contacts.
with pysam.AlignmentFile(sam_merged, "rb") as alignment_merged_handle:
names = alignment_merged_handle.references
lengths = alignment_merged_handle.lengths
names_and_lengths = {
name: length for name, length in zip(names, lengths)
}
logger.info("Reading contacts...")
# Since the BAM file is supposed to be sorted and interleaved,
# pairs should be always grouped with one below the other (the exact
# order doesn't matter since the network is symmetric, so we simply
# treat the first one as 'forward' and the second one as 'reverse')
# We keep iterating until two consecutive reads have the same name,
# discarding ones that don't.
while "Reading forward and reverse alignments alternatively":
try:
my_read = next(alignment_merged_handle)
if current_read is None:
# First read
current_read = my_read
continue
elif current_read.query_name != my_read.query_name:
# print("{}_{}".format(current_read, my_read))
current_read = my_read
continue
read_forward, read_reverse = current_read, my_read
except StopIteration:
break
# Get a bunch of info about the alignments to pass the tests below
read_name_forward = read_forward.query_name
read_name_reverse = read_reverse.query_name
flag_forward, flag_reverse = read_forward.flag, read_reverse.flag
try:
assert read_name_forward == read_name_reverse
except AssertionError:
logger.error(
"Reads don't have the same name: " "%s and %s",
read_name_forward,
read_name_reverse,
)
raise
# To check if a flag contains 4
# (digit on the third position from the right in base 2),
# 4 = unmapped in SAM spec
def is_unmapped(flag):
return np.base_repr(flag, padding=3)[-3] == "1"
if is_unmapped(flag_forward) or is_unmapped(flag_reverse):
# print("Detected unmapped read on one end, skipping")
continue
contig_name_forward = read_forward.reference_name
contig_name_reverse = read_reverse.reference_name
len_contig_for = names_and_lengths[contig_name_forward]
len_contig_rev = names_and_lengths[contig_name_reverse]
position_forward = read_forward.reference_start
position_reverse = read_reverse.reference_start
mapq_forward = read_forward.mapping_quality
mapq_reverse = read_reverse.mapping_quality
# Some more tests: checking for size, map quality, map status etc.
mapq_test = min(mapq_forward, mapq_reverse) > mapq_threshold
min_length = min(len_contig_for, len_contig_rev)
length_test = min_length > size_chunk_threshold
# Trickest test:
#
#
# contig
# pos1 pos2
# ^ ^
# |-------|-------|-------|-------|---|
# <-------><------><------><------><--> <->
# chunk chunk tail size_chunk_threshold
#
# Test is passed if tail >= size_chunk_threshold (pos2)
# or if the position is a non-tail chunk (pos1)
if position_forward < chunk_size * (len_contig_for // chunk_size):
current_chunk_forward_size = chunk_size
else:
current_chunk_forward_size = len_contig_for % chunk_size
if position_reverse < chunk_size * (len_contig_rev // chunk_size):
current_chunk_reverse_size = chunk_size
else:
current_chunk_reverse_size = len_contig_rev % chunk_size
min_chunk_size = min(
current_chunk_forward_size, current_chunk_reverse_size
)
chunk_test = min_chunk_size >= size_chunk_threshold
if mapq_test and length_test and chunk_test:
chunk_forward = position_forward // chunk_size
chunk_reverse = position_reverse // chunk_size
chunk_name_forward = "{}_{}".format(
contig_name_forward, chunk_forward
)
chunk_name_reverse = "{}_{}".format(
contig_name_reverse, chunk_reverse
)
if self_contacts or chunk_name_forward != chunk_name_reverse:
contact = tuple(
sorted((chunk_name_forward, chunk_name_reverse))
)
all_contacts[contact] += 1
chunk_key_forward = (
chunk_name_forward,
current_chunk_forward_size,
)
all_chunks[chunk_key_forward] += 1
chunk_key_reverse = (
chunk_name_reverse,
current_chunk_reverse_size,
)
all_chunks[chunk_key_reverse] += 1
logger.info("Writing chunk data...")
# Now we can update the chunk dictionary
# with the info we gathered from the BAM file
output_chunk_data_path = os.path.join(output_dir, output_file_chunk_data)
with open(output_chunk_data_path, "w") as chunk_data_file_handle:
for name in sorted(chunk_complete_data.keys()):
chunk_data = chunk_complete_data[name]
size = chunk_data["size"]
chunk = (name, chunk_data["size"])
hit = all_chunks[chunk]
coverage = hit * read_size * 1.0 / size
try:
chunk_complete_data[name]["hit"] = hit
chunk_complete_data[name]["coverage"] = coverage
except KeyError:
logger.error(
"A mismatch was detected between the reference "
"genome and the genome used for the alignment "
"file, some sequence names were not found"
)
raise
idx = chunk_complete_data[name]["id"]
line = "{}\t{}\t{}\t{}\t{}\n".format(
idx, name, hit, size, coverage
)
chunk_data_file_handle.write(line)
# Lastly, generate the network proper
logger.info("Writing network...")
output_network_path = os.path.join(output_dir, output_file_network)
with open(output_network_path, "w") as network_file_handle:
for chunks in sorted(all_contacts.keys()):
chunk_name1, chunk_name2 = chunks
contact_count = all_contacts[chunks]
if normalized:
coverage1 = chunk_complete_data[chunk_name1]["coverage"]
coverage2 = chunk_complete_data[chunk_name2]["coverage"]
mean_coverage = np.sqrt(coverage1 * coverage2)
effective_count = contact_count * 1.0 / mean_coverage
else:
effective_count = contact_count
try:
idx1 = chunk_complete_data[chunk_name1]["id"]
idx2 = chunk_complete_data[chunk_name2]["id"]
line = "{}\t{}\t{}\n".format(idx1, idx2, effective_count)
network_file_handle.write(line)
except KeyError as e:
logger.warning("Mismatch detected: %s", e)
return chunk_complete_data, all_contacts | 0.000085 |
def set_fill_color(self,r,g=-1,b=-1):
"Set color for all filling operations"
if((r==0 and g==0 and b==0) or g==-1):
self.fill_color=sprintf('%.3f g',r/255.0)
else:
self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
self.color_flag=(self.fill_color!=self.text_color)
if(self.page>0):
self._out(self.fill_color) | 0.047146 |
def express_route_links(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`ExpressRouteLinksOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteLinksOperations>`
"""
api_version = self._get_api_version('express_route_links')
if api_version == '2018-08-01':
from .v2018_08_01.operations import ExpressRouteLinksOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | 0.00885 |
def include(self, node):
"""Include the defined yaml file."""
result = None
if isinstance(node, ScalarNode):
result = Loader.include_file(self.construct_scalar(node))
else:
raise RuntimeError("Not supported !include on type %s" % type(node))
return result | 0.009404 |
def fprob(dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5 * dfden, 0.5 * dfnum, dfden / float(dfden + dfnum * F))
return p | 0.002632 |
def _handle_http_errors(response):
"""
Check for HTTP errors and raise
OSError if relevant.
Args:
response (requests.Response):
Returns:
requests.Response: response
"""
code = response.status_code
if 200 <= code < 400:
return response
elif code in (403, 404):
raise {403: _ObjectPermissionError,
404: _ObjectNotFoundError}[code](response.reason)
response.raise_for_status() | 0.002165 |
def is_result_edition_allowed(self, analysis_brain):
"""Checks if the edition of the result field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
"""
# Always check general edition first
if not self.is_analysis_edition_allowed(analysis_brain):
return False
# Get the ananylsis object
obj = api.get_object(analysis_brain)
if not obj.getDetectionLimitOperand():
# This is a regular result (not a detection limit)
return True
# Detection limit selector is enabled in the Analysis Service
if obj.getDetectionLimitSelector():
# Manual detection limit entry is *not* allowed
if not obj.getAllowManualDetectionLimit():
return False
return True | 0.002215 |
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index} | 0.003175 |
def read(self, filename):
"""
Read template from tar format with metadata.
:type filename: str
:param filename: Filename to read template from.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_a.write(
... 'test_template_read') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
>>> template_b = Template().read('test_template_read.tgz')
>>> template_a == template_b
True
"""
tribe = Tribe()
tribe.read(filename=filename)
if len(tribe) > 1:
raise IOError('Multiple templates in file')
for key in self.__dict__.keys():
self.__dict__[key] = tribe[0].__dict__[key]
return self | 0.001896 |
def decode(self):
"Decode self.buffer, populating instance variables and return self."
buflen = len(self.buffer)
tftpassert(buflen >= 4, "malformed ERR packet, too short")
log.debug("Decoding ERR packet, length %s bytes", buflen)
if buflen == 4:
log.debug("Allowing this affront to the RFC of a 4-byte packet")
fmt = b"!HH"
log.debug("Decoding ERR packet with fmt: %s", fmt)
self.opcode, self.errorcode = struct.unpack(fmt,
self.buffer)
else:
log.debug("Good ERR packet > 4 bytes")
fmt = b"!HH%dsx" % (len(self.buffer) - 5)
log.debug("Decoding ERR packet with fmt: %s", fmt)
self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt,
self.buffer)
log.error("ERR packet - errorcode: %d, message: %s"
% (self.errorcode, self.errmsg))
return self | 0.003802 |
def to_entity(entity_type, value, fields):
"""
Internal API: Returns an instance of an entity of type entity_type with the specified value and fields (stored in
dict). This is only used by the local transform runner as a helper function.
"""
e = entity_type(value)
for k, v in fields.items():
e.fields[k] = Field(k, v)
return e | 0.008264 |
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent | 0.004505 |
def popd():
"""Go back to where you once were.
:return: saved directory stack
"""
try:
directory = _saved_paths.pop(0)
except IndexError:
return [os.getcwd()]
os.chdir(directory)
return [directory] + _saved_paths | 0.003891 |
def to_funset(self):
"""
Converts the experimental setup to a set of `gringo.Fun`_ object instances
Returns
-------
set
The set of `gringo.Fun`_ object instances
.. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
"""
fs = set((gringo.Fun('stimulus', [str(var)]) for var in self.stimuli))
fs = fs.union((gringo.Fun('inhibitor', [str(var)]) for var in self.inhibitors))
fs = fs.union((gringo.Fun('readout', [str(var)]) for var in self.readouts))
return fs | 0.008772 |
def amust(self, args, argv):
'''
Requires the User to provide a certain parameter
for the method to function properly.
Else, an Exception is raised.
args - (tuple) arguments you are looking for.
argv - (dict) arguments you have received and want to inspect.
'''
for arg in args:
if str(arg) not in argv:
raise KeyError("ArgMissing: " + str(arg) + " not passed") | 0.004415 |
def num_tagitems(self, tag):
""" Return the total number of items for the specified tag
"""
query = "/{t}/{u}/tags/{ta}/items".format(
u=self.library_id, t=self.library_type, ta=tag
)
return self._totals(query) | 0.007634 |
def do_POST(self): # pylint: disable=g-bad-name
"""Process encrypted message bundles."""
self._IncrementActiveCount()
try:
if self.path.startswith("/upload"):
stats_collector_instance.Get().IncrementCounter(
"frontend_http_requests", fields=["upload", "http"])
logging.error("Requested no longer supported file upload through HTTP.")
self.Send("File upload though HTTP is no longer supported", status=404)
else:
stats_collector_instance.Get().IncrementCounter(
"frontend_http_requests", fields=["control", "http"])
self.Control()
except Exception as e: # pylint: disable=broad-except
if flags.FLAGS.pdb_post_mortem:
pdb.post_mortem()
logging.exception("Had to respond with status 500.")
self.Send("Error: %s" % e, status=500)
finally:
self._DecrementActiveCount() | 0.008929 |
def status(config):
"""time series lastest record time by account."""
with open(config) as fh:
config = yaml.safe_load(fh.read())
jsonschema.validate(config, CONFIG_SCHEMA)
last_index = get_incremental_starts(config, None)
accounts = {}
for (a, region), last in last_index.items():
accounts.setdefault(a, {})[region] = last
print(yaml.safe_dump(accounts, default_flow_style=False)) | 0.002353 |
def draw(self, surface):
""" Draw all sprites and map onto the surface
:param surface: pygame surface to draw to
:type surface: pygame.surface.Surface
"""
ox, oy = self._map_layer.get_center_offset()
new_surfaces = list()
spritedict = self.spritedict
gl = self.get_layer_of_sprite
new_surfaces_append = new_surfaces.append
for spr in self.sprites():
new_rect = spr.rect.move(ox, oy)
try:
new_surfaces_append((spr.image, new_rect, gl(spr), spr.blendmode))
except AttributeError: # generally should only fail when no blendmode available
new_surfaces_append((spr.image, new_rect, gl(spr)))
spritedict[spr] = new_rect
self.lostsprites = []
return self._map_layer.draw(surface, surface.get_rect(), new_surfaces) | 0.00451 |
def fit(self, X, y):
"""Fit
Args:
X (np.array): Array of hyperparameter values with shape (n_samples, len(tunables))
y (np.array): Array of scores with shape (n_samples, )
"""
self.X = X
self.y = y | 0.01145 |
def load_config(cls, configfile="logging.yaml"):
"""
:raises: ValueError
"""
configfile = getenv(cls.CONFIGFILE_ENV_KEY, configfile)
if isfile(configfile):
with open(configfile, "r") as cf:
# noinspection PyBroadException
try:
dictConfig(load(cf))
except ValueError:
debug("Learn to config foooo! Improper config at %s", configfile)
except Exception:
exception("Something went wrong while reading %s.", configfile)
else:
raise ValueError("Invalid configfile specified: {}".format(configfile)) | 0.007267 |
def match_option_with_value(arguments, option, value):
"""
Check if a list of command line options contains an option with a value.
:param arguments: The command line arguments (a list of strings).
:param option: The long option (a string).
:param value: The expected value (a string).
:returns: :data:`True` if the command line contains the option/value pair,
:data:`False` otherwise.
"""
return ('%s=%s' % (option, value) in arguments or
contains_sublist(arguments, [option, value])) | 0.001838 |
def set_schedule_enabled(self, state):
"""
:param state: a boolean True (on) or False (off)
:return: nothing
"""
desired_state = {"schedule_enabled": state}
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response) | 0.005464 |
def pack_int(v):
""" Returns <v> as packed string. """
if v == 0:
return "\0"
ret = ''
while v > 0:
c = v & 127
v >>= 7
if v != 0:
c = c | 128
ret += chr(c)
return ret | 0.004184 |
def createEditor(self, parent, option, index):
"""Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
"""
editor = QtGui.QLineEdit(parent)
return editor | 0.006237 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.