text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _remove_esc_chars(self, raw_message):
"""
Removes any escape characters from the message
:param raw_message: a list of bytes containing the un-processed data
:return: a message that has the escaped characters appropriately un-escaped
"""
message = []
escape_next = False
for c in raw_message:
if escape_next:
message.append(c ^ self._ESC_XOR)
escape_next = False
else:
if c == self._ESC:
escape_next = True
else:
message.append(c)
return message | 0.004615 |
def consult_error_hook(self, item_session: ItemSession, error: BaseException):
'''Return scripting action when an error occured.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_error, item_session, error)
except HookDisconnected:
return Actions.NORMAL | 0.006024 |
def _parse_kraken_output(out_dir, db, data):
"""Parse kraken stat info comming from stderr,
generating report with kraken-report
"""
in_file = os.path.join(out_dir, "kraken_out")
stat_file = os.path.join(out_dir, "kraken_stats")
out_file = os.path.join(out_dir, "kraken_summary")
kraken_cmd = config_utils.get_program("kraken-report", data["config"])
classify = unclassify = None
with open(stat_file, 'r') as handle:
for line in handle:
if line.find(" classified") > -1:
classify = line[line.find("(") + 1:line.find(")")]
if line.find(" unclassified") > -1:
unclassify = line[line.find("(") + 1:line.find(")")]
if os.path.getsize(in_file) > 0 and not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals())
do.run(cl, "kraken report: %s" % dd.get_sample_name(data))
kraken = {"kraken_clas": classify, "kraken_unclas": unclassify}
kraken_sum = _summarize_kraken(out_file)
kraken.update(kraken_sum)
return kraken | 0.001715 |
def ensure_resource_data(self, update_data=False):
"""Retrieves data from OneView and updates resource object.
Args:
update_data: Flag to update resource data when it is required.
"""
# Check for unique identifier in the resource data
if not any(key in self.data for key in self.UNIQUE_IDENTIFIERS):
raise exceptions.HPOneViewMissingUniqueIdentifiers(MISSING_UNIQUE_IDENTIFIERS)
# Returns if data update is not required
if not update_data:
return
resource_data = None
if 'uri' in self.UNIQUE_IDENTIFIERS and self.data.get('uri'):
resource_data = self._helper.do_get(self.data['uri'])
else:
for identifier in self.UNIQUE_IDENTIFIERS:
identifier_value = self.data.get(identifier)
if identifier_value:
result = self.get_by(identifier, identifier_value)
if result and isinstance(result, list):
resource_data = result[0]
break
if resource_data:
self.data.update(resource_data)
else:
raise exceptions.HPOneViewResourceNotFound(RESOURCE_DOES_NOT_EXIST) | 0.002398 |
def fetch_url(src, dst):
"""
Fetch file from URL src and save it to dst.
"""
# we do not use the nicer sys.version_info.major
# for compatibility with Python < 2.7
if sys.version_info[0] > 2:
import urllib.request
class URLopener(urllib.request.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
sys.stderr.write("ERROR: could not fetch {0}\n".format(url))
sys.exit(-1)
else:
import urllib
class URLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
sys.stderr.write("ERROR: could not fetch {0}\n".format(url))
sys.exit(-1)
dirname = os.path.dirname(dst)
if dirname != '':
if not os.path.isdir(dirname):
os.makedirs(dirname)
opener = URLopener()
opener.retrieve(src, dst) | 0.00107 |
def MakeDestinationKey(directory, filename):
"""Creates a name that identifies a database file."""
return utils.SmartStr(utils.JoinPath(directory, filename)).lstrip("/") | 0.017341 |
def decode_html_entities(s):
"""
Replaces html entities with the character they represent.
>>> print(decode_html_entities("<3 &"))
<3 &
"""
parser = HTMLParser.HTMLParser()
def unesc(m):
return parser.unescape(m.group())
return re.sub(r'(&[^;]+;)', unesc, ensure_unicode(s)) | 0.003021 |
def unixtime(cdf_time, to_np=False): # @NoSelf
"""
Encodes the epoch(s) into seconds after 1970-01-01. Precision is only
kept to the nearest microsecond.
If to_np is True, then the values will be returned in a numpy array.
"""
import datetime
time_list = CDFepoch.breakdown(cdf_time, to_np=False)
#Check if only one time was input into unixtime.
#If so, turn the output of breakdown into a list for this function to work
if hasattr(cdf_time, '__len__'):
if len(cdf_time) == 1:
time_list = [time_list]
else:
time_list = [time_list]
unixtime = []
for t in time_list:
date = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
for i in range(0, len(t)):
if i > 7:
continue
elif i == 6:
date[i] = 1000*t[i]
elif i == 7:
date[i-1] += t[i]
else:
date[i] = t[i]
unixtime.append(datetime.datetime(*date).replace(tzinfo=datetime.timezone.utc).timestamp())
return np.array(unixtime) if to_np else unixtime | 0.007223 |
def _resolve_to_field_class(self, names, scope):
"""Resolve the names to a class in fields.py, resolving past
typedefs, etc
:names: TODO
:scope: TODO
:ctxt: TODO
:returns: TODO
"""
switch = {
"char" : "Char",
"int" : "Int",
"long" : "Int",
"int64" : "Int64",
"uint64" : "UInt64",
"short" : "Short",
"double" : "Double",
"float" : "Float",
"void" : "Void",
"string" : "String",
"wstring" : "WString"
}
core = names[-1]
if core not in switch:
# will return a list of resolved names
type_info = scope.get_type(core)
if type(type_info) is type and issubclass(type_info, fields.Field):
return type_info
resolved_names = type_info
if resolved_names is None:
raise errors.UnresolvedType(self._coord, " ".join(names), " ")
if resolved_names[-1] not in switch:
raise errors.UnresolvedType(self._coord, " ".join(names), " ".join(resolved_names))
names = copy.copy(names)
names.pop()
names += resolved_names
if len(names) >= 2 and names[-1] == names[-2] and names[-1] == "long":
res = "Int64"
else:
res = switch[names[-1]]
if names[-1] in ["char", "short", "int", "long"] and "unsigned" in names[:-1]:
res = "U" + res
cls = getattr(fields, res)
return cls | 0.010916 |
def submit(self, info, *flags):
"""Finish recording and upload or save the report.
This closes the `Stats` object, no further methods should be called.
The report is either saved, uploaded or discarded, depending on
configuration. If uploading is enabled, previous reports might be
uploaded too. If uploading is not explicitly enabled or disabled, the
prompt will be shown, to ask the user to enable or disable it.
"""
if not self.recording:
return
env_val = os.environ.get(self.env_var, '').lower()
if env_val not in (None, '', '1', 'on', 'enabled', 'yes', 'true'):
self.status = Stats.DISABLED_ENV
self.notes = None
return
if self.notes is None:
raise ValueError("This report has already been submitted")
all_info, self.notes = self.notes, None
all_info.extend(self._to_notes(info))
for flag in flags:
flag(self, all_info)
now = time.time()
secs = int(now)
msecs = int((now - secs) * 1000)
all_info.insert(0, ('date', '%d.%d' % (secs, msecs)))
if self.user_id:
all_info.insert(1, ('user', self.user_id))
logger.debug("Generated report:\n%r", (all_info,))
# Current report
def generator():
for key, value in all_info:
yield _encode(key) + b':' + _encode(value) + b'\n'
filename = 'report_%d_%d.txt' % (secs, msecs)
# Save current report and exit, unless user has opted in
if not self.sending:
fullname = os.path.join(self.location, filename)
with open(fullname, 'wb') as fp:
for l in generator():
fp.write(l)
# Show prompt
sys.stderr.write(self.prompt.prompt)
return
# Post previous reports
old_reports = [f for f in os.listdir(self.location)
if f.startswith('report_')]
old_reports.sort()
old_reports = old_reports[:4] # Only upload 5 at a time
for old_filename in old_reports:
fullname = os.path.join(self.location, old_filename)
try:
with open(fullname, 'rb') as fp:
# `data=fp` would make requests stream, which is currently
# not a good idea (WSGI chokes on it)
r = requests.post(self.drop_point, data=fp.read(),
timeout=1, verify=self.ssl_verify)
r.raise_for_status()
except Exception as e:
logger.warning("Couldn't upload %s: %s", old_filename, str(e))
break
else:
logger.info("Submitted report %s", old_filename)
os.remove(fullname)
# Post current report
try:
# `data=generator()` would make requests stream, which is currently
# not a good idea (WSGI chokes on it)
r = requests.post(self.drop_point, data=b''.join(generator()),
timeout=1, verify=self.ssl_verify)
except requests.RequestException as e:
logger.warning("Couldn't upload report: %s", str(e))
fullname = os.path.join(self.location, filename)
with open(fullname, 'wb') as fp:
for l in generator():
fp.write(l)
else:
try:
r.raise_for_status()
logger.info("Submitted report")
except requests.RequestException as e:
logger.warning("Server rejected report: %s", str(e)) | 0.001079 |
def get_column(self, field, components=None, computed_type='for_observations'):
"""
TODO: add documentation
return a dictionary for a single column, with component as keys and the
column array as values
:parameter str field: name of the mesh columnname
:parameter components:
"""
def get_field(c, field, computed_type):
if c not in self._dict.keys() and self._parent_envelope_of[c] in self._dict.keys():
mesh = self._dict[self._parent_envelope_of[c]]
return mesh.get_column_flat(field, components, computed_type)
mesh = self._dict[c]
if isinstance(mesh, Meshes):
# then do this recursively for all components in the Meshes object
# but don't allow nesting in the dictionary, instead combine
# all subcomponents into one entry with the current component
return mesh.get_column_flat(field, mesh._components, computed_type)
f = mesh[field]
if isinstance(f, ComputedColumn):
col = getattr(f, computed_type)
else:
col = f
return col
if components:
if isinstance(components, str):
components = [components]
else:
components = self.keys()
return {c: get_field(c, field, computed_type) for c in components} | 0.004147 |
def last_month():
""" Return start and end date of this month. """
since = TODAY + delta(day=1, months=-1)
until = since + delta(months=1)
return Date(since), Date(until) | 0.009901 |
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
"""
Kind of like urlparse.parse_qs, except returns an ordered dict.
Also avoids replicating that function's bad habit of overriding the
built-in 'dict' type.
Taken from below with modification:
<https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
"""
od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
od[name].append(value)
return od | 0.00177 |
def view(cloudpath, hostname="localhost", port=DEFAULT_PORT):
"""Start a local web app on the given port that lets you explore this cutout."""
def handler(*args):
return ViewerServerHandler(cloudpath, *args)
myServer = HTTPServer((hostname, port), handler)
print("Neuroglancer server listening to http://{}:{}".format(hostname, port))
try:
myServer.serve_forever()
except KeyboardInterrupt:
# extra \n to prevent display of "^CContinuing"
print("\nContinuing program execution...")
finally:
myServer.server_close() | 0.016393 |
def is_address_guard(self, address):
"""
Determines if an address belongs to a guard page.
@note: Returns always C{False} for kernel mode addresses.
@type address: int
@param address: Memory address to query.
@rtype: bool
@return: C{True} if the address belongs to a guard page.
@raise WindowsError: An exception is raised on error.
"""
try:
mbi = self.mquery(address)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror == win32.ERROR_INVALID_PARAMETER:
return False
raise
return mbi.is_guard() | 0.003003 |
def CreateClientPool(n):
"""Create n clients to run in a pool."""
clients = []
# Load previously stored clients.
try:
certificates = []
with open(flags.FLAGS.cert_file, "rb") as fd:
# Certificates are base64-encoded, so that we can use new-lines as
# separators.
for l in fd:
cert = rdf_crypto.RSAPrivateKey(initializer=base64.b64decode(l))
certificates.append(cert)
for certificate in certificates[:n]:
clients.append(
PoolGRRClient(
private_key=certificate,
ca_cert=config.CONFIG["CA.certificate"],
fast_poll=flags.FLAGS.fast_poll,
send_foreman_request=flags.FLAGS.send_foreman_request,
))
clients_loaded = True
except (IOError, EOFError):
clients_loaded = False
if clients_loaded and len(clients) < n:
raise RuntimeError(
"Loaded %d clients, but expected %d." % (len(clients), n))
while len(clients) < n:
# Generate a new RSA key pair for each client.
bits = config.CONFIG["Client.rsa_key_length"]
key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=bits)
clients.append(
PoolGRRClient(private_key=key, ca_cert=config.CONFIG["CA.certificate"]))
# Start all the clients now.
for c in clients:
c.start()
start_time = time.time()
try:
if flags.FLAGS.enroll_only:
while True:
time.sleep(1)
enrolled = len([x for x in clients if x.enrolled])
if enrolled == n:
logging.info("All clients enrolled, exiting.")
break
else:
logging.info("%s: Enrolled %d/%d clients.", int(time.time()),
enrolled, n)
else:
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
finally:
# Stop all pool clients.
for cl in clients:
cl.Stop()
# Note: code below is going to be executed after SIGTERM is sent to this
# process.
logging.info("Pool done in %s seconds.", time.time() - start_time)
# The way benchmarking is supposed to work is that we execute poolclient with
# --enroll_only flag, it dumps the certificates to the flags.FLAGS.cert_file.
# Then, all further poolclient invocations just read private keys back
# from that file. Therefore if private keys were loaded from
# flags.FLAGS.cert_file, then there's no need to rewrite it again with the
# same data.
if not clients_loaded:
logging.info("Saving certificates.")
with open(flags.FLAGS.cert_file, "wb") as fd:
# We're base64-encoding ceritificates so that we can use new-lines
# as separators.
b64_certs = [
base64.b64encode(x.private_key.SerializeToString()) for x in clients
]
fd.write("\n".join(b64_certs)) | 0.014727 |
def _gen_labels_columns(self, list_columns):
"""
Auto generates pretty label_columns from list of columns
"""
for col in list_columns:
if not self.label_columns.get(col):
self.label_columns[col] = self._prettify_column(col) | 0.006969 |
def default_logging(grab_log=None, # '/tmp/grab.log',
network_log=None, # '/tmp/grab.network.log',
level=logging.DEBUG, mode='a',
propagate_network_logger=False):
"""
Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file.
"""
logging.basicConfig(level=level)
network_logger = logging.getLogger('grab.network')
network_logger.propagate = propagate_network_logger
if network_log:
hdl = logging.FileHandler(network_log, mode)
network_logger.addHandler(hdl)
network_logger.setLevel(level)
grab_logger = logging.getLogger('grab')
if grab_log:
hdl = logging.FileHandler(grab_log, mode)
grab_logger.addHandler(hdl)
grab_logger.setLevel(level) | 0.003517 |
def read_identifiables(self, cls, sdmxobj, offset=None):
'''
If sdmxobj inherits from dict: update it with modelized elements.
These must be instances of model.IdentifiableArtefact,
i.e. have an 'id' attribute. This will be used as dict keys.
If sdmxobj does not inherit from dict: return a new DictLike.
'''
path = self._paths[cls]
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = {e.get('id'): cls(self, e) for e in path(base)}
if isinstance(sdmxobj, dict):
sdmxobj.update(result)
else:
return DictLike(result) | 0.0025 |
def get_tunnel_statistics_output_tunnel_stat_rx_bytes(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_statistics = ET.Element("get_tunnel_statistics")
config = get_tunnel_statistics
output = ET.SubElement(get_tunnel_statistics, "output")
tunnel_stat = ET.SubElement(output, "tunnel-stat")
rx_bytes = ET.SubElement(tunnel_stat, "rx-bytes")
rx_bytes.text = kwargs.pop('rx_bytes')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003466 |
def get_arg_name(self, param):
""" gets the argument name used in the command table for a parameter """
if self.current_command in self.cmdtab:
for arg in self.cmdtab[self.current_command].arguments:
for name in self.cmdtab[self.current_command].arguments[arg].options_list:
if name == param:
return arg
return None | 0.009709 |
def read_csv_as_integer(csv_name, integer_columns, usecols=None):
"""Returns a DataFrame from a .csv file stored in /data/raw/.
Converts columns specified by 'integer_columns' to integer.
"""
csv_path = os.path.join(DATA_FOLDER, csv_name)
csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols)
for column in integer_columns:
csv = csv[pd.to_numeric(csv[column], errors="coerce").notnull()]
csv[integer_columns] = csv[integer_columns].apply(pd.to_numeric)
return csv | 0.001949 |
def get(vals, key, default_val=None):
"""
Returns a dictionary value
"""
val = vals
for part in key.split('.'):
if isinstance(val, dict):
val = val.get(part, None)
if val is None:
return default_val
else:
return default_val
return val | 0.003067 |
def deletegroup(self, group_id):
"""
Deletes an group by ID
:param group_id: id of the group to delete
:return: True if it deleted, False if it couldn't. False could happen for several reasons, but there isn't a good way of differentiating them
"""
request = requests.delete(
'{0}/{1}'.format(self.groups_url, group_id),
headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
return request.status_code == 200 | 0.007692 |
def getDwordAtOffset(self, offset):
"""
Returns a C{DWORD} from a given offset.
@type offset: int
@param offset: The offset to get the C{DWORD} from.
@rtype: L{DWORD}
@return: The L{DWORD} obtained at the given offset.
"""
return datatypes.DWORD.parse(utils.ReadData(self.getDataAtOffset(offset, 4))) | 0.015625 |
def paint(self, painter, option, index):
"""Paint text"""
icon_rect = QtCore.QRectF(option.rect).adjusted(3, 3, -3, -3)
icon_rect.setWidth(14)
icon_rect.setHeight(14)
icon_color = colors["idle"]
icon = icons[index.data(model.Type)]
if index.data(model.Type) == "record":
icon_color = record_colors[index.data(model.LogLevel)]
elif index.data(model.Type) == "error":
icon_color = colors["warning"]
metrics = painter.fontMetrics()
label_rect = QtCore.QRectF(option.rect.adjusted(
icon_rect.width() + 12, 2, 0, -2))
assert label_rect.width() > 0
label = index.data(model.Label)
label = metrics.elidedText(label,
QtCore.Qt.ElideRight,
label_rect.width() - 20)
font_color = colors["idle"]
hover = QtGui.QPainterPath()
hover.addRect(QtCore.QRectF(option.rect).adjusted(0, 0, -1, -1))
# Maintain reference to state, so we can restore it once we're done
painter.save()
# Draw label
painter.setFont(fonts["h4"])
painter.setPen(QtGui.QPen(font_color))
painter.drawText(label_rect, label)
# Draw icon
painter.setFont(fonts["smallAwesome"])
painter.setPen(QtGui.QPen(icon_color))
painter.drawText(icon_rect, QtCore.Qt.AlignCenter, icon)
if option.state & QtWidgets.QStyle.State_MouseOver:
painter.fillPath(hover, colors["hover"])
if option.state & QtWidgets.QStyle.State_Selected:
painter.fillPath(hover, colors["selected"])
# Ok, we're done, tidy up.
painter.restore() | 0.001147 |
def add_layer_from_env(self):
"""This function creates a new layer, gets a list of all the
current attributes, and attempts to find matching environment variables
with the prefix of FJS\_. If matches are found it sets those attributes
in the new layer.
"""
self.add_layer()
for attribute in self.get_attributes():
env_attribute = os.environ.get('FJS_{}'.format(attribute))
if env_attribute:
setattr(self, attribute, env_attribute) | 0.005703 |
def rename_script(rename=None): # noqa: E501
"""Rename a script
Rename a script # noqa: E501
:param rename: The data needed to save this script
:type rename: dict | bytes
:rtype: Response
"""
if connexion.request.is_json:
rename = Rename.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 0.002786 |
def num_taps(sample_rate, transitionwidth, gpass, gstop):
"""Returns the number of taps for an FIR filter with the given shape
Parameters
----------
sample_rate : `float`
sampling rate of target data
transitionwidth : `float`
the width (in the same units as `sample_rate` of the transition
from stop-band to pass-band
gpass : `float`
the maximum loss in the passband (dB)
gstop : `float`
the minimum attenuation in the stopband (dB)
Returns
-------
numtaps : `int`
the number of taps for an FIR filter
Notes
-----
Credit: http://dsp.stackexchange.com/a/31077/8223
"""
gpass = 10 ** (-gpass / 10.)
gstop = 10 ** (-gstop / 10.)
return int(2/3. * log10(1 / (10 * gpass * gstop)) *
sample_rate / transitionwidth) | 0.001185 |
def isVisible(self):
"""
Returns whether or not this connection is visible. If either node it is
connected to is hidden, then it should be as well.
:return <bool>
"""
in_node = self.inputNode()
out_node = self.outputNode()
if in_node and not in_node.isVisible():
return False
if out_node and not out_node.isVisible():
return False
return self._visible | 0.008475 |
def deprecated(message): # pragma: no cover
"""
Raise a `DeprecationWarning` when wrapped function/method is called.
Borrowed from https://stackoverflow.com/a/48632082/866026
"""
def deprecated_decorator(func):
"""Deprecation decorator."""
@wraps(func)
def deprecated_func(*args, **kwargs):
"""Display deprecation warning."""
warnings.warn(
"'{}' is deprecated. {}".format(func.__name__, message),
category=DeprecationWarning,
stacklevel=2
)
return func(*args, **kwargs)
return deprecated_func
return deprecated_decorator | 0.001475 |
def before_render(self):
"""Before template render hook
"""
super(BatchFolderContentsView, self).before_render()
if self.context.portal_type == "BatchFolder":
self.request.set("disable_border", 1) | 0.008299 |
def get_specification_info(self, obj):
"""Returns the info for a Specification
"""
info = self.get_base_info(obj)
results_range = obj.getResultsRange()
info.update({
"results_range": results_range,
"sample_type_uid": obj.getSampleTypeUID(),
"sample_type_title": obj.getSampleTypeTitle(),
"client_uid": obj.getClientUID(),
})
bsc = api.get_tool("bika_setup_catalog")
def get_service_by_keyword(keyword):
if keyword is None:
return []
return map(api.get_object, bsc({
"portal_type": "AnalysisService",
"getKeyword": keyword
}))
# append a mapping of service_uid -> specification
specifications = {}
for spec in results_range:
service_uid = spec.get("uid")
if service_uid is None:
# service spec is not attached to a specific service, but to a
# keyword
for service in get_service_by_keyword(spec.get("keyword")):
service_uid = api.get_uid(service)
specifications[service_uid] = spec
continue
specifications[service_uid] = spec
info["specifications"] = specifications
# spec'd service UIDs
info["service_uids"] = specifications.keys()
return info | 0.00139 |
def sub_path(self):
"""The path of the partition source, excluding the bundle path parts.
Includes the revision.
"""
try:
return os.path.join(*(self._local_parts()))
except TypeError as e:
raise TypeError(
"Path failed for partition {} : {}".format(
self.name,
e.message)) | 0.005063 |
def __new_argv(self, *new_pargs, **new_kargs):
"""Calculate new argv and extra_argv values resulting from adding
the specified positional and keyword arguments."""
new_argv = self.argv.copy()
new_extra_argv = list(self.extra_argv)
for v in new_pargs:
arg_name = None
for name in self.pargl:
if not name in new_argv:
arg_name = name
break
if arg_name:
new_argv[arg_name] = v
elif self.var_pargs:
new_extra_argv.append(v)
else:
num_prev_pargs = len([name for name in self.pargl if name in self.argv])
raise TypeError("%s() takes exactly %d positional arguments (%d given)" \
% (self.__name__,
len(self.pargl),
num_prev_pargs + len(new_pargs)))
for k,v in new_kargs.items():
if not (self.var_kargs or (k in self.pargl) or (k in self.kargl)):
raise TypeError("%s() got an unexpected keyword argument '%s'" \
% (self.__name__, k))
new_argv[k] = v
return (new_argv, new_extra_argv) | 0.008423 |
def _expand_alternates(self, phonetic):
"""Expand phonetic alternates separated by |s.
Parameters
----------
phonetic : str
A Beider-Morse phonetic encoding
Returns
-------
str
A Beider-Morse phonetic code
"""
alt_start = phonetic.find('(')
if alt_start == -1:
return self._normalize_lang_attrs(phonetic, False)
prefix = phonetic[:alt_start]
alt_start += 1 # get past the (
alt_end = phonetic.find(')', alt_start)
alt_string = phonetic[alt_start:alt_end]
alt_end += 1 # get past the )
suffix = phonetic[alt_end:]
alt_array = alt_string.split('|')
result = ''
for i in range(len(alt_array)):
alt = alt_array[i]
alternate = self._expand_alternates(prefix + alt + suffix)
if alternate != '' and alternate != '[0]':
if result != '':
result += '|'
result += alternate
return result | 0.001873 |
def fibonacci_approx(n):
r"""
approximate value (due to numerical errors) of fib(n) using closed form
expression
Args:
n (int):
Returns:
int: the n-th fib number
CommandLine:
python -m utool.util_alg fibonacci_approx
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> series = [int(fibonacci_approx(n)) for n in range(20)]
>>> result = ('series = %s' % (str(series[0:10]),))
>>> print(result)
"""
sqrt_5 = math.sqrt(5)
phi = (1 + sqrt_5) / 2
return ((phi ** n) - (-phi) ** (-n)) / sqrt_5 | 0.001527 |
def threat(self, name, **kwargs):
"""Add Threat data to Batch object
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Threat.
"""
group_obj = Threat(name, **kwargs)
return self._group(group_obj) | 0.006849 |
def get_all_snapshots(self):
"""
This method returns a list of all Snapshots.
"""
data = self.get_data("snapshots/")
return [
Snapshot(token=self.token, **snapshot)
for snapshot in data['snapshots']
] | 0.007246 |
def save(self, *args, **kwargs):
"""
This save method protects against two processesses concurrently modifying
the same object. Normally the second save would silently overwrite the
changes from the first. Instead we raise a ConcurrentModificationError.
"""
cls = self.__class__
if self.pk:
rows = cls.objects.filter(
pk=self.pk, _change=self._change).update(
_change=self._change + 1)
if not rows:
raise ConcurrentModificationError(cls.__name__, self.pk)
self._change += 1
count = 0
max_retries=3
while True:
try:
return super(BaseModel, self).save(*args, **kwargs)
except django.db.utils.OperationalError:
if count >= max_retries:
raise
count += 1 | 0.004415 |
def playSound(folder, name=""):
""" as easy as that """
try:
if not name:
onlyfiles = [
f for f in os.listdir(folder)
if os.path.isfile(os.path.join(folder, f))
]
name = random.choice(onlyfiles)
subprocess.call(["afplay", folder + name])
# subprocess.call(["say", "%d started, batch %d" % (adate, batch)])
except:
pass | 0.004651 |
def predict_interval(self, X, percentile, nsamples=200, likelihood_args=(),
multiproc=True):
"""
Predictive percentile interval (upper and lower quantiles).
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, D dimensions).
percentile : float
The percentile confidence interval (e.g. 95%) to return.
nsamples : int, optional
Number of samples for sampling the predictive percentiles.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N*.
multiproc : bool, optional
Use multiprocessing to paralellise this prediction computation.
Returns
-------
ql : ndarray
The lower end point of the interval with shape (N*,)
qu : ndarray
The upper end point of the interval with shape (N*,)
"""
N = X.shape[0]
# Generate latent function samples per observation (n in N)
fsamples = self._sample_func(X, nsamples, genaxis=0)
# Make sure likelihood_args is consistent with work
if len(likelihood_args) > 0:
likelihood_args = _reshape_likelihood_args(likelihood_args, N)
# Now create work for distrbuted workers
like_hypers = atleast_list(self.like_hypers_)
work = ((f[0], self.likelihood, like_hypers, f[1:], percentile)
for f in zip(fsamples, *likelihood_args))
# Distribute sampling and rootfinding
if multiproc:
pool = Pool()
res = pool.map(_star_rootfinding, work)
pool.close()
pool.join()
else:
res = [_rootfinding(*w) for w in work]
# Get results of work
ql, qu = zip(*res)
return np.array(ql), np.array(qu) | 0.001502 |
def linkify(buildroot, s, memoized_urls):
"""Augment text by heuristically finding URL and file references and turning them into links.
:param string buildroot: The base directory of the project.
:param string s: The text to insert links into.
:param dict memoized_urls: A cache of text to links so repeated substitutions don't require
additional file stats calls.
"""
def memoized_to_url(m):
# to_url uses None to signal not to replace the text,
# so we use a different sentinel value.
value = memoized_urls.get(m.group(0), _NO_URL)
if value is _NO_URL:
value = to_url(m)
memoized_urls[m.group(0)] = value
return value
def to_url(m):
if m.group(1):
return m.group(0) # It's an http(s) url.
path = m.group(0)
if path.startswith('/'):
path = os.path.relpath(path, buildroot)
elif path.startswith('..'):
# The path is not located inside the buildroot, so it's definitely not a BUILD file.
return None
else:
# The path is located in the buildroot: see if it's a reference to a target in a BUILD file.
parts = path.split(':')
if len(parts) == 2:
putative_dir = parts[0]
else:
putative_dir = path
if os.path.isdir(os.path.join(buildroot, putative_dir)):
build_files = list(BuildFile.get_build_files_family(
FileSystemProjectTree(buildroot),
putative_dir))
if build_files:
path = build_files[0].relpath
else:
return None
if os.path.exists(os.path.join(buildroot, path)):
# The reporting server serves file content at /browse/<path_from_buildroot>.
return '/browse/{}'.format(path)
else:
return None
def maybe_add_link(url, text):
return '<a target="_blank" href="{}">{}</a>'.format(url, text) if url else text
return _PATH_RE.sub(lambda m: maybe_add_link(memoized_to_url(m), m.group(0)), s) | 0.014841 |
def add_distinguished_name(list_name, item_name):
'''
Adds a distinguished name to a distinguished name list.
list_name(str): The name of the specific policy distinguished name list to append to.
item_name(str): The distinguished name to append.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name MyDistinguishedList cn=foo.bar.com
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_distinguished_names",
"params": [list_name, {"item_name": item_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) | 0.004292 |
def cleanup(self):
"""DataFlowKernel cleanup.
This involves killing resources explicitly and sending die messages to IPP workers.
If the executors are managed (created by the DFK), then we call scale_in on each of
the executors and call executor.shutdown. Otherwise, we do nothing, and executor
cleanup is left to the user.
"""
logger.info("DFK cleanup initiated")
# this check won't detect two DFK cleanups happening from
# different threads extremely close in time because of
# non-atomic read/modify of self.cleanup_called
if self.cleanup_called:
raise Exception("attempt to clean up DFK when it has already been cleaned-up")
self.cleanup_called = True
self.log_task_states()
# Checkpointing takes priority over the rest of the tasks
# checkpoint if any valid checkpoint method is specified
if self.checkpoint_mode is not None:
self.checkpoint()
if self._checkpoint_timer:
logger.info("Stopping checkpoint timer")
self._checkpoint_timer.close()
# Send final stats
self.usage_tracker.send_message()
self.usage_tracker.close()
logger.info("Terminating flow_control and strategy threads")
self.flowcontrol.close()
for executor in self.executors.values():
if executor.managed:
if executor.scaling_enabled:
job_ids = executor.provider.resources.keys()
executor.scale_in(len(job_ids))
executor.shutdown()
self.time_completed = datetime.datetime.now()
if self.monitoring:
self.monitoring.send(MessageType.WORKFLOW_INFO,
{'tasks_failed_count': self.tasks_failed_count,
'tasks_completed_count': self.tasks_completed_count,
"time_began": self.time_began,
'time_completed': self.time_completed,
'workflow_duration': (self.time_completed - self.time_began).total_seconds(),
'run_id': self.run_id, 'rundir': self.run_dir})
self.monitoring.close()
"""
if self.logging_server is not None:
self.logging_server.terminate()
self.logging_server.join()
if self.web_app is not None:
self.web_app.terminate()
self.web_app.join()
"""
logger.info("DFK cleanup complete") | 0.003814 |
def derive_logger(self, logger):
"""
Return a child of `logger` specific for this instance. This is called
after :attr:`client` has been set, from the constructor.
The child name is calculated by the default implementation in a way
specific for aioxmpp services; it is not meant to be used by
non-:mod:`aioxmpp` classes; do not rely on the way how the child name
is calculated.
"""
parts = type(self).__module__.split(".")[1:]
if parts[-1] == "service" and len(parts) > 1:
del parts[-1]
return logger.getChild(".".join(
parts+[type(self).__qualname__]
)) | 0.002959 |
def vm_reconfig_task(vm, device_change):
"""
Create Task for VM re-configure
:param vm: <vim.vm obj> VM which will be re-configure
:param device_change:
:return: Task
"""
config_spec = vim.vm.ConfigSpec(deviceChange=device_change)
task = vm.ReconfigVM_Task(config_spec)
return task | 0.005666 |
def console_from_file(filename: str) -> tcod.console.Console:
"""Return a new console object from a filename.
The file format is automactially determined. This can load REXPaint `.xp`,
ASCII Paint `.apf`, or Non-delimited ASCII `.asc` files.
Args:
filename (Text): The path to the file, as a string.
Returns: A new :any`Console` instance.
"""
return tcod.console.Console._from_cdata(
lib.TCOD_console_from_file(filename.encode("utf-8"))
) | 0.002041 |
def unit_tangent(self, T):
"""returns the unit tangent vector of the Path at T (centered at the
origin and expressed as a complex number). If the tangent vector's
magnitude is zero, this method will find the limit of
self.derivative(tau)/abs(self.derivative(tau)) as tau approaches T."""
seg_idx, t = self.T2t(T)
return self._segments[seg_idx].unit_tangent(t) | 0.004902 |
def fail(self, key, **kwargs):
"""
A helper method that simply raises a validation error.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
raise AssertionError(msg)
message_string = msg.format(**kwargs)
raise ValidationError(message_string) | 0.004386 |
def get_classes(self):
"""
Returns all Java Classes from the DEX objects as an array of DEX files.
"""
for idx, digest in enumerate(self.analyzed_vms):
dx = self.analyzed_vms[digest]
for vm in dx.vms:
filename = self.analyzed_digest[digest]
yield idx, filename, digest, vm.get_classes() | 0.005348 |
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts) | 0.001294 |
def QA_fetch_stock_day(code, start, end, format='numpy', frequence='day', collections=DATABASE.stock_day):
"""'获取股票日线'
Returns:
[type] -- [description]
感谢@几何大佬的提示
https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/#return-the-specified-fields-and-the-id-field-only
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
cursor = collections.find({
'code': {'$in': code}, "date_stamp": {
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)}}, {"_id": 0}, batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(volume=res.vol, date=pd.to_datetime(
res.date)).drop_duplicates((['date', 'code'])).query('volume>1').set_index('date', drop=False)
res = res.ix[:, ['code', 'open', 'high', 'low',
'close', 'volume', 'amount', 'date']]
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info(
'QA Error QA_fetch_stock_day data parameter start=%s end=%s is not right' % (start, end)) | 0.0047 |
def load_commands(cli, manage_dict):
"""Loads the commands defined in manage file"""
namespaced = manage_dict.get('namespaced')
# get click commands
commands = manage_dict.get('click_commands', [])
for command_dict in commands:
root_module = import_string(command_dict['module'])
group = cli.manage_groups.get(command_dict.get('group'), cli)
if getattr(root_module, '__path__', None):
# This is a package
iter_modules = pkgutil.iter_modules(
root_module.__path__, prefix=root_module.__name__ + '.'
)
submodules_names = [item[1] for item in iter_modules]
submodules = [import_string(name) for name in submodules_names]
for module in submodules:
add_click_commands(module, group, command_dict, namespaced)
else:
# a single file module
add_click_commands(root_module, group, command_dict, namespaced)
# get inline commands
commands = manage_dict.get('inline_commands', [])
for command_dict in commands:
name = command_dict['name']
help_text = command_dict.get('help_text')
options = command_dict.get('options', {})
arguments = command_dict.get('arguments', {})
context = command_dict.get('context', [])
code = command_dict['code']
group = cli.manage_groups.get(command_dict.get('group'), cli)
group.add_command(
make_command_from_string(
code=code,
cmd_context=get_context(context),
options=options,
arguments=arguments,
help_text=help_text
),
name=name
)
# get function commands
commands = manage_dict.get('function_commands', [])
for command_dict in commands:
name = command_dict['name']
help_text = command_dict.get('help_text')
options = command_dict.get('options', {})
arguments = command_dict.get('arguments', {})
function = import_string(command_dict['function'])
group = cli.manage_groups.get(command_dict.get('group'), cli)
group.add_command(
make_command_from_function(
function=function,
options=options,
arguments=arguments,
help_text=help_text
),
name=name
) | 0.000412 |
async def reseed_apply(self) -> DIDInfo:
"""
Replace verification key with new verification key from reseed operation.
Raise WalletState if wallet is closed.
:return: DIDInfo with new verification key and metadata for DID
"""
LOGGER.debug('Wallet.reseed_apply >>>')
if not self.handle:
LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
await did.replace_keys_apply(self.handle, self.did)
self.verkey = await did.key_for_local_did(self.handle, self.did)
now = int(time())
rv = DIDInfo(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now})
await did.set_did_metadata(self.handle, self.did, json.dumps(rv.metadata))
LOGGER.info('Wallet %s set seed hash metadata for DID %s', self.name, self.did)
LOGGER.debug('Wallet.reseed_apply <<< %s', rv)
return rv | 0.007028 |
def create(cls, cards, custom_headers=None):
"""
:type user_id: int
:param cards: The cards that need to be updated.
:type cards: list[object_.CardBatchEntry]
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseCardBatch
"""
if custom_headers is None:
custom_headers = {}
request_map = {
cls.FIELD_CARDS: cards
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
api_client = client.ApiClient(cls._get_api_context())
request_bytes = request_map_string.encode()
endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id())
response_raw = api_client.post(endpoint_url, request_bytes,
custom_headers)
return BunqResponseCardBatch.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_POST)
) | 0.002913 |
def set_extractor_processor_inputs(self, extractor_processors,
sub_output=None):
"""Instead of specifying fields in the source document to rename
for the extractor, allows the user to specify ExtractorProcessors that
are executed earlier in the chain and generate json paths from
their output fields"""
if not (isinstance(extractor_processors, ExtractorProcessor) or
isinstance(extractor_processors, types.ListType)):
raise ValueError(
"extractor_processors must be an ExtractorProcessor or a list")
if isinstance(extractor_processors, ExtractorProcessor):
extractor_processor = extractor_processors
self.input_fields = self.__get_jp(extractor_processor, sub_output)
elif isinstance(extractor_processors, types.ListType):
self.input_fields = list()
for extractor_processor in extractor_processors:
if isinstance(extractor_processor, ExtractorProcessor):
self.input_fields.append(
self.__get_jp(extractor_processor, sub_output))
elif isinstance(extractor_processor, list):
self.input_fields.append(
reduce(lambda a, b: "{}|{}".format(a, b),
["({})".format(self.__get_jp(x, sub_output))
for x in extractor_processor]))
self.generate_json_paths()
return self | 0.001944 |
def iiif_image_handler(prefix=None, identifier=None,
path=None, config=None, klass=None, auth=None, **args):
"""Handler for IIIF Image Requests.
Behaviour for case of a non-authn or non-authz case is to
return 403.
"""
if (not auth or degraded_request(identifier) or auth.image_authz()):
# serve image
if (auth):
logging.debug("Authorized for image %s" % identifier)
i = IIIFHandler(prefix, identifier, config, klass, auth)
try:
return i.image_request_response(path)
except IIIFError as e:
return i.error_response(e)
else:
# redirect to degraded (for not authz and for authn but not authz too)
degraded_uri = host_port_prefix(
config.host, config.port, prefix) + '/' + identifier + '-deg/' + path
logging.info("Redirection to degraded: %s" % degraded_uri)
response = redirect(degraded_uri)
response.headers['Access-control-allow-origin'] = '*'
return response | 0.001918 |
def resolve_network_cidr(ip_address):
'''
Resolves the full address cidr of an ip_address based on
configured network interfaces
'''
netmask = get_netmask_for_address(ip_address)
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) | 0.003704 |
def normalize_query(query):
"""Normalize query: sort params by name, remove params without value.
>>> normalize_query('z=3&y=&x=1')
'x=1&z=3'
"""
if query == '' or len(query) <= 2:
return ''
nquery = unquote(query, exceptions=QUOTE_EXCEPTIONS['query'])
params = nquery.split('&')
nparams = []
for param in params:
if '=' in param:
k, v = param.split('=', 1)
if k and v:
nparams.append("%s=%s" % (k, v))
nparams.sort()
return '&'.join(nparams) | 0.001842 |
def DrainTaskSchedulerQueueForClient(self, client, max_count=None):
"""Drains the client's Task Scheduler queue.
1) Get all messages in the client queue.
2) Sort these into a set of session_ids.
3) Use data_store.DB.ResolvePrefix() to query all requests.
4) Delete all responses for retransmitted messages (if needed).
Args:
client: The ClientURN object specifying this client.
max_count: The maximum number of messages we will issue for the client.
If not given, uses self.max_queue_size .
Returns:
The tasks respresenting the messages returned. If we can not send them,
we can reschedule them for later.
"""
if max_count is None:
max_count = self.max_queue_size
if max_count <= 0:
return []
client = rdf_client.ClientURN(client)
start_time = time.time()
# Drain the queue for this client
if data_store.RelationalDBEnabled():
action_requests = data_store.REL_DB.LeaseClientActionRequests(
client.Basename(),
lease_time=rdfvalue.Duration.FromSeconds(self.message_expiry_time),
limit=max_count)
result = [
rdf_flow_objects.GRRMessageFromClientActionRequest(r)
for r in action_requests
]
else:
new_tasks = queue_manager.QueueManager(token=self.token).QueryAndOwn(
queue=client.Queue(),
limit=max_count,
lease_seconds=self.message_expiry_time)
initial_ttl = rdf_flows.GrrMessage().task_ttl
check_before_sending = []
result = []
for task in new_tasks:
if task.task_ttl < initial_ttl - 1:
# This message has been leased before.
check_before_sending.append(task)
else:
result.append(task)
if check_before_sending:
with queue_manager.QueueManager(token=self.token) as manager:
status_found = manager.MultiCheckStatus(check_before_sending)
# All messages that don't have a status yet should be sent again.
for task in check_before_sending:
if task not in status_found:
result.append(task)
else:
manager.DeQueueClientRequest(task)
stats_collector_instance.Get().IncrementCounter("grr_messages_sent",
len(result))
if result:
logging.debug("Drained %d messages for %s in %s seconds.", len(result),
client,
time.time() - start_time)
return result | 0.007896 |
def _handle_exists(self, node, scope, ctxt, stream):
"""Handle the exists unary operator
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
res = fields.Int()
try:
self._handle_node(node.expr, scope, ctxt, stream)
res._pfp__set_value(1)
except AttributeError:
res._pfp__set_value(0)
return res | 0.004566 |
def _format_axes(axes, shape):
"""
Format target axes given an array shape
"""
if isinstance(axes, int):
axes = (axes,)
elif isinstance(axes, list) or hasattr(axes, '__iter__'):
axes = tuple(axes)
if not isinstance(axes, tuple):
raise ValueError("axes argument %s in the constructor not specified correctly" % str(axes))
if min(axes) < 0 or max(axes) > len(shape) - 1:
raise ValueError("invalid key axes %s given shape %s" % (str(axes), str(shape)))
return axes | 0.006981 |
def count(self):
""" count: get number of nodes in tree
Args: None
Returns: int
"""
total = len(self.children)
for child in self.children:
total += child.count()
return total | 0.008 |
def overlap(r1: 'Rectangle', r2: 'Rectangle'):
"""
Overlapping rectangles overlap both horizontally & vertically
"""
h_overlaps = (r1.left <= r2.right) and (r1.right >= r2.left)
v_overlaps = (r1.bottom >= r2.top) and (r1.top <= r2.bottom)
return h_overlaps and v_overlaps | 0.00627 |
def escape_grouped_arguments(s):
"""Prepares a string for the shell (on Windows too!)
Only for use on grouped arguments (passed as a string to Popen)
"""
if s is None:
return None
# Additional escaping for windows paths
if os.name == "nt":
s = "{}".format(s.replace("\\", "\\\\"))
return '"' + s.replace("'", "'\\''") + '"' | 0.00271 |
def ProjectHomeRelative(self):
"""
Returns the :attr:`ProjectHome` relative to :attr:`FileName` directory.
"""
return os.path.relpath(self.ProjectHome, os.path.dirname(self.FileName)) | 0.013953 |
def make_qq_plot(kev, obs, mdl, unit, key_text):
"""Make a quantile-quantile plot comparing events and a model.
*kev*
A 1D, sorted array of event energy bins measured in keV.
*obs*
A 1D array giving the number or rate of events in each bin.
*mdl*
A 1D array giving the modeled number or rate of events in each bin.
*unit*
Text describing the unit in which *obs* and *mdl* are measured; will
be shown on the plot axes.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
"""
import omega as om
kev = np.asarray(kev)
obs = np.asarray(obs)
mdl = np.asarray(mdl)
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = max(c_obs[-1], c_mdl[-1])
p = om.RectPlot()
p.addXY([0, mx], [0, mx], '1:1')
p.addXY(c_mdl, c_obs, key_text)
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = mx * 1.05
c1 = mx * 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative model ' + unit, 'Cumulative data ' + unit)
p.defaultKeyOverlay.vAlign = 0.3
return p | 0.001042 |
def get_credentials(self):
"""Get read-only credentials.
Returns:
class: Read-only credentials.
"""
return ReadOnlyCredentials(
self.access_token, self.client_id, self.client_secret,
self.refresh_token
) | 0.007117 |
def _avg(value1, value2, weight):
"""Returns the weighted average of two values and handles the case where
one value is None. If both values are None, None is returned.
"""
if value1 is None:
return value2
if value2 is None:
return value1
return value2 * weight + value1 * (1 - weight) | 0.00831 |
def remove_plugin(self, f):
"""Remvoing a deleted plugin.
Args:
f: the filepath for the plugin.
"""
if f.endswith('.py'):
plugin_name = os.path.splitext(os.path.basename(f))[0]
print '- %s %sREMOVED' % (plugin_name, color.Red)
print '\t%sNote: still in memory, restart Workbench to remove...%s' % \
(color.Yellow, color.Normal) | 0.007042 |
def scale(arr, mn=0, mx=1):
"""
Apply min-max scaling (normalize)
then scale to (mn,mx)
"""
amn = arr.min()
amx = arr.max()
# normalize:
arr = (arr - amn) / (amx - amn)
# scale:
if amn != mn or amx != mx:
arr *= mx - mn
arr += mn
return arr | 0.003333 |
def get(self, channel):
"""Read single ADC Channel"""
checked_channel = self._check_channel_no(channel)
self.i2c.write_raw8(checked_channel | self._dac_enabled)
reading = self.i2c.read_raw8()
reading = self.i2c.read_raw8()
return reading / 255.0 | 0.016892 |
def _new_from_xml(cls, xmlnode):
"""Create a new `Option` object from an XML element.
:Parameters:
- `xmlnode`: the XML element.
:Types:
- `xmlnode`: `libxml2.xmlNode`
:return: the object created.
:returntype: `Option`
"""
label = from_utf8(xmlnode.prop("label"))
child = xmlnode.children
value = None
for child in xml_element_ns_iter(xmlnode.children, DATAFORM_NS):
if child.name == "value":
value = from_utf8(child.getContent())
break
if value is None:
raise BadRequestProtocolError("No value in <option/> element")
return cls(value, label) | 0.002778 |
def draw_polygon_with_info(ax, polygon, off_x=0, off_y=0):
"""Draw one of the natural neighbor polygons with some information."""
pts = np.array(polygon)[ConvexHull(polygon).vertices]
for i, pt in enumerate(pts):
ax.plot([pt[0], pts[(i + 1) % len(pts)][0]],
[pt[1], pts[(i + 1) % len(pts)][1]], 'k-')
avex, avey = np.mean(pts, axis=0)
ax.annotate('area: {:.3f}'.format(geometry.area(pts)), xy=(avex + off_x, avey + off_y),
fontsize=12) | 0.004032 |
def _update_limits_from_api(self):
"""
Query Lambda's DescribeLimits API action, and update limits
with the quotas returned. Updates ``self.limits``.
"""
logger.debug("Updating limits for Lambda from the AWS API")
if len(self.limits) == 2:
return
self.connect()
lims = self.conn.get_account_settings()['AccountLimit']
self.limits['Total Code Size (MiB)']._set_api_limit(
(lims['TotalCodeSize']/1048576))
self.limits['Code Size Unzipped (MiB) per Function']._set_api_limit(
(lims['CodeSizeUnzipped']/1048576))
self.limits['Unreserved Concurrent Executions']._set_api_limit(
lims['UnreservedConcurrentExecutions'])
self.limits['Concurrent Executions']._set_api_limit(
lims['ConcurrentExecutions'])
self.limits['Code Size Zipped (MiB) per Function']._set_api_limit(
(lims['CodeSizeZipped']/1048576)) | 0.002058 |
def get_sorted_proposal_list(self):
"""Return a list of `CodeAssistProposal`"""
proposals = {}
for proposal in self.proposals:
proposals.setdefault(proposal.scope, []).append(proposal)
result = []
for scope in self.scopepref:
scope_proposals = proposals.get(scope, [])
scope_proposals = [proposal for proposal in scope_proposals
if proposal.type in self.typerank]
scope_proposals.sort(key=self._proposal_key)
result.extend(scope_proposals)
return result | 0.003378 |
def parse_error(self, tup_tree):
"""
Parse the tuple for an ERROR element:
::
<!ELEMENT ERROR (INSTANCE*)>
<!ATTLIST ERROR
CODE CDATA #REQUIRED
DESCRIPTION CDATA #IMPLIED>
"""
self.check_node(tup_tree, 'ERROR', ('CODE',), ('DESCRIPTION',),
('INSTANCE',))
# self.list_of_various() has the same effect as self.list_of_same()
# when used with a single allowed child element, but is a little
# faster.
instance_list = self.list_of_various(tup_tree, ('INSTANCE',))
return (name(tup_tree), attrs(tup_tree), instance_list) | 0.002933 |
def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time | 0.005398 |
def ImportFile(store, filename, start):
"""Import hashes from 'filename' into 'store'."""
with io.open(filename, "r") as fp:
reader = csv.Reader(fp.read())
i = 0
current_row = None
product_code_list = []
op_system_code_list = []
for row in reader:
# Skip first row.
i += 1
if i and i % 5000 == 0:
data_store.DB.Flush()
print("Imported %d hashes" % i)
if i > 1:
if len(row) != 8:
continue
try:
if i < start:
continue
if current_row:
if current_row[0] == row[0]:
# Same hash, add product/system
product_code_list.append(int(row[5]))
op_system_code_list.append(row[6])
continue
# Fall through and add current row.
else:
# First row.
current_row = row
product_code_list = [int(row[5])]
op_system_code_list = [row[6]]
continue
_ImportRow(store, current_row, product_code_list, op_system_code_list)
# Set new hash.
current_row = row
product_code_list = [int(row[5])]
op_system_code_list = [row[6]]
except Exception as e: # pylint: disable=broad-except
print("Failed at %d with %s" % (i, str(e)))
return i - 1
if current_row:
_ImportRow(store, current_row, product_code_list, op_system_code_list)
return i | 0.01636 |
def init_app(self, app, config_prefix='MONGOALCHEMY'):
"""This callback can be used to initialize an application for the use with this
MongoDB setup. Never use a database in the context of an application not
initialized that way or connections will leak."""
self.config_prefix = config_prefix
def key(suffix):
return '%s_%s' % (config_prefix, suffix)
if key('DATABASE') not in app.config:
raise ImproperlyConfiguredError("You should provide a database name "
"(the %s setting)." % key('DATABASE'))
uri = _get_mongo_uri(app, key)
rs = app.config.get(key('REPLICA_SET'))
timezone = None
if key('TIMEZONE') in app.config:
timezone = pytz.timezone(app.config.get(key('TIMEZONE')))
self.session = session.Session.connect(app.config.get(key('DATABASE')),
safe=app.config.get(key('SAFE_SESSION'),
False),
timezone = timezone,
host=uri, replicaSet=rs)
self.Document._session = self.session | 0.007886 |
def parse_html(html, cleanup=True):
"""
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
"""
if cleanup:
# This removes any extra markup or structure like <head>:
html = cleanup_html(html)
return fragment_fromstring(html, create_parent=True) | 0.004141 |
def create_process(cmd, root_helper=None, addl_env=None, log_output=True):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
log_output and LOG.info("Running command: %s", cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
return obj, cmd | 0.001486 |
def select_warp_gates(self, shift):
"""Select all warp gates."""
action = sc_pb.Action()
action.action_ui.select_warp_gates.selection_add = shift
return action | 0.005714 |
def scan_pressures(cryst, lo, hi, n=5, eos=None):
'''
Scan the pressure axis from lo to hi (inclusive)
using B-M EOS as the volume predictor.
Pressure (lo, hi) in GPa
'''
# Inverse B-M EOS to get volumes from pressures
# This will work only in limited pressure range p>-B/B'.
# Warning! Relative, the V0 prefactor is removed.
def invbmeos(b, bp, x):
return array([pow(b/(bp*xv+b), 1/(3*bp)) for xv in x])
if eos is None:
raise RuntimeError('Required EOS data missing')
# Limit negative pressures to 90% of the singularity value.
# Beyond this B-M EOS is bound to be wrong anyway.
lo = max(lo, -0.9*eos[1]/eos[2])
scale = (eos[0]/cryst.get_volume())*invbmeos(eos[1], eos[2],
linspace(lo, hi, num=n))
# print(scale)
uc = cryst.get_cell()
systems = [Atoms(cryst) for s in scale]
for n, s in enumerate(scale):
systems[n].set_cell(s*uc, scale_atoms=True)
return systems | 0.000983 |
def _get_snmpv3(self, oid):
"""
Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
"""
snmp_target = (self.hostname, self.snmp_port)
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.UsmUserData(
self.user,
self.auth_key,
self.encrypt_key,
authProtocol=self.auth_proto,
privProtocol=self.encryp_proto,
),
cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2),
oid,
lookupNames=True,
lookupValues=True,
)
if not error_detected and snmp_data[0][1]:
return text_type(snmp_data[0][1])
return "" | 0.003756 |
def generate_msg(filename, msg, key, value):
""" Generate a message for the output log indicating the file/association will not
be processed as the characteristics of the data are known to be inconsistent
with alignment.
"""
log.info('Dataset ' + filename + ' has (keyword = value) of (' + key + ' = ' + str(value) + ').')
if msg == Messages.NOPROC.value:
log.info('Dataset cannot be aligned.')
else:
log.info('Dataset can be aligned, but the result may be compromised.') | 0.007634 |
def battery_status_send(self, id, battery_function, type, temperature, voltages, current_battery, current_consumed, energy_consumed, battery_remaining, force_mavlink1=False):
'''
Battery information
id : Battery ID (uint8_t)
battery_function : Function of the battery (uint8_t)
type : Type (chemistry) of the battery (uint8_t)
temperature : Temperature of the battery in centi-degrees celsius. INT16_MAX for unknown temperature. (int16_t)
voltages : Battery voltage of cells, in millivolts (1 = 1 millivolt). Cells above the valid cell count for this battery should have the UINT16_MAX value. (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
current_consumed : Consumed charge, in milliampere hours (1 = 1 mAh), -1: autopilot does not provide mAh consumption estimate (int32_t)
energy_consumed : Consumed energy, in 100*Joules (intergrated U*I*dt) (1 = 100 Joule), -1: autopilot does not provide energy consumption estimate (int32_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot does not estimate the remaining battery (int8_t)
'''
return self.send(self.battery_status_encode(id, battery_function, type, temperature, voltages, current_battery, current_consumed, energy_consumed, battery_remaining), force_mavlink1=force_mavlink1) | 0.00652 |
def check_int(val):
r"""Check if input value is an int or a np.ndarray of ints, if not convert.
Parameters
----------
val : any
Input value
Returns
-------
int or np.ndarray of ints
Examples
--------
>>> from modopt.base.types import check_int
>>> a = np.arange(5).astype(float)
>>> a
array([ 0., 1., 2., 3., 4.])
>>> check_float(a)
array([0, 1, 2, 3, 4])
"""
if not isinstance(val, (int, float, list, tuple, np.ndarray)):
raise TypeError('Invalid input type.')
if isinstance(val, float):
val = int(val)
elif isinstance(val, (list, tuple)):
val = np.array(val, dtype=int)
elif isinstance(val, np.ndarray) and (not np.issubdtype(val.dtype,
np.integer)):
val = val.astype(int)
return val | 0.001138 |
def map_coordinates(data, coordinates, interpolation="linear",
mode='constant'):
"""
Map data to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input.
should correspond to scipy.ndimage.map_coordinates
Parameters
----------
data
coordinates
output
interpolation
mode
cval
prefilter
Returns
-------
"""
if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)):
raise ValueError("input data has to be a 2d or 3d array!")
coordinates = np.asarray(coordinates, np.int32)
if not (coordinates.shape[0] == data.ndim):
raise ValueError("coordinate has to be of shape (data.ndim,m) ")
interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
"nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]}
mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"]
}
if not interpolation in interpolation_defines:
raise KeyError(
"interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys())))
if not mode in mode_defines:
raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys())))
if not data.dtype.type in cl_buffer_datatype_dict:
raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys())))
dtype_defines = ["-D", "DTYPE=%s" % cl_buffer_datatype_dict[data.dtype.type]]
d_im = OCLImage.from_array(data)
coordinates_g = OCLArray.from_array(coordinates.astype(np.float32, copy=False))
res_g = OCLArray.empty(coordinates.shape[1], data.dtype)
prog = OCLProgram(abspath("kernels/map_coordinates.cl")
, build_options=interpolation_defines[interpolation] +
mode_defines[mode] + dtype_defines)
kernel = "map_coordinates{ndim}".format(ndim=data.ndim)
prog.run_kernel(kernel,
(coordinates.shape[-1],), None,
d_im, res_g.data, coordinates_g.data)
return res_g.get() | 0.00579 |
def format_year(year):
"""
Format the year value of the ``YearArchiveView``,
which can be a integer or date object.
This tag is no longer needed, but exists for template compatibility.
It was a compatibility tag for Django 1.4.
"""
if isinstance(year, (date, datetime)):
# Django 1.5 and up, 'year' is a date object, consistent with month+day views.
return unicode(year.year)
else:
# Django 1.4 just passes the kwarg as string.
return unicode(year) | 0.003899 |
def createNetwork(dataSource):
"""Create the Network instance.
The network has a sensor region reading data from `dataSource` and passing
the encoded representation to an Identity Region.
:param dataSource: a RecordStream instance to get data from
:returns: a Network instance ready to run
"""
network = Network()
# Our input is sensor data from the gym file. The RecordSensor region
# allows us to specify a file record stream as the input source via the
# dataSource attribute.
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": _VERBOSITY}))
sensor = network.regions["sensor"].getSelf()
# The RecordSensor needs to know how to encode the input values
sensor.encoder = createEncoder()
# Specify the dataSource as a file record stream instance
sensor.dataSource = dataSource
# CUSTOM REGION
# Add path to custom region to PYTHONPATH
# NOTE: Before using a custom region, please modify your PYTHONPATH
# export PYTHONPATH="<path to custom region module>:$PYTHONPATH"
# In this demo, we have modified it using sys.path.append since we need it to
# have an effect on this program.
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from custom_region.identity_region import IdentityRegion
# Add custom region class to the network
Network.registerRegion(IdentityRegion)
# Create a custom region
network.addRegion("identityRegion", "py.IdentityRegion",
json.dumps({
"dataWidth": sensor.encoder.getWidth(),
}))
# Link the Identity region to the sensor output
network.link("sensor", "identityRegion", "UniformLink", "")
network.initialize()
return network | 0.016695 |
def to_internal_filter(self, attribute_profile, external_attribute_names):
"""
Converts attribute names from external "type" to internal
:type attribute_profile: str
:type external_attribute_names: list[str]
:type case_insensitive: bool
:rtype: list[str]
:param attribute_profile: From which external type to convert (ex: oidc, saml, ...)
:param external_attribute_names: A list of attribute names
:param case_insensitive: Create a case insensitive filter
:return: A list of attribute names in the internal format
"""
try:
profile_mapping = self.to_internal_attributes[attribute_profile]
except KeyError:
logger.warn("no attribute mapping found for the given attribute profile '%s'", attribute_profile)
# no attributes since the given profile is not configured
return []
internal_attribute_names = set() # use set to ensure only unique values
for external_attribute_name in external_attribute_names:
try:
internal_attribute_name = profile_mapping[external_attribute_name]
internal_attribute_names.add(internal_attribute_name)
except KeyError:
pass
return list(internal_attribute_names) | 0.004484 |
def check_time_extrator(self):
"""将抽取得时间转换为date标准时间格式
Keyword arguments:
string -- 含有时间的文本,str类型
Return:
release_time -- 新闻发布时间
"""
if self.year_check and self.month_check and self.day_check:
time = str(self.year) + '-' + str(self.month) + '-' + str(self.day)
release_time = datetime.datetime.strptime(time, "%Y-%m-%d").date()
return release_time | 0.004274 |
def schedule_enabled():
'''
Check the status of automatic update scheduling.
:return: True if scheduling is enabled, False if disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.schedule_enabled
'''
cmd = ['softwareupdate', '--schedule']
ret = salt.utils.mac_utils.execute_return_result(cmd)
enabled = ret.split()[-1]
return salt.utils.mac_utils.validate_enabled(enabled) == 'on' | 0.002151 |
def migrate_data(ignore: Sequence[str],
new_data_path: str,
old_data_path: str):
""" Copy everything in the app data to the root of the main data part
:param ignore: A list of files that should be ignored in the root of /data
:param new_data_path: Where the new data partition is mounted
:param old_data_path: Where the old date files are
"""
# the new ’data’ path is actually /var and /data is in /var/data
dest_data = os.path.join(new_data_path, 'data')
LOG.info(f"migrate_data: copying {old_data_path} to {dest_data}")
os.makedirs(dest_data, exist_ok=True)
with os.scandir(old_data_path) as scanner:
for entry in scanner:
if entry.name in ignore:
LOG.info(f"migrate_data: ignoring {entry.name}")
continue
src = os.path.join(old_data_path, entry.name)
dest = os.path.join(dest_data, entry.name)
if os.path.exists(dest):
LOG.info(f"migrate_data: removing dest tree {dest}")
shutil.rmtree(dest, ignore_errors=True)
if entry.is_dir():
LOG.info(f"migrate_data: copying tree {src}->{dest}")
shutil.copytree(src, dest, symlinks=True,
ignore=migrate_files_to_ignore)
else:
LOG.info(f"migrate_data: copying file {src}->{dest}")
shutil.copy2(src, dest) | 0.000685 |
def external_system_identifiers(endpoint):
"""Populate the ``external_system_identifiers`` key.
Also populates the ``new_record`` key through side effects.
"""
@utils.flatten
@utils.for_each_value
def _external_system_identifiers(self, key, value):
new_recid = maybe_int(value.get('d'))
if new_recid:
self['new_record'] = get_record_ref(new_recid, endpoint)
return [
{
'schema': 'SPIRES',
'value': ext_sys_id,
} for ext_sys_id in force_list(value.get('a'))
]
return _external_system_identifiers | 0.0016 |
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee | 0.000452 |
def copy_file(self, from_path, to_path):
""" Copy file. """
if not op.exists(op.dirname(to_path)):
self.make_directory(op.dirname(to_path))
shutil.copy(from_path, to_path)
logging.debug('File copied: {0}'.format(to_path)) | 0.007519 |
def build_for_contour_tree(self, contour_tree, negate=False):
""" A helper function that will reduce duplication of data by
reusing the parent contour tree's parameters and data
"""
if self.debug:
tree_type = "Join"
if negate:
tree_type = "Split"
sys.stdout.write("{} Tree Computation: ".format(tree_type))
start = time.clock()
Y = contour_tree.Y
if negate:
Y = -Y
self.__tree = MergeTreeFloat(
vectorFloat(contour_tree.Xnorm.flatten()),
vectorFloat(Y),
str(contour_tree.gradient),
contour_tree.graph_rep.full_graph(),
self.debug,
)
self._internal_build()
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) | 0.002278 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.