text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def make_lines_texture(num_lines=10, resolution=50):
"""Makes a texture consisting of a given number of horizontal lines.
Args:
num_lines (int): the number of lines to draw
resolution (int): the number of midpoints on each line
Returns:
A texture.
"""
x, y = np.meshgrid(
np.hstack([np.linspace(0, 1, resolution), np.nan]),
np.linspace(0, 1, num_lines),
)
y[np.isnan(x)] = np.nan
return x.flatten(), y.flatten() | 0.004082 |
def tokens(self):
"""
`generator` : the tokens in this segment
"""
for subsegment_or_token in self:
if isinstance(subsegment_or_token, Segment):
subsegment = subsegment_or_token
for token in subsegment.tokens():
yield token
else:
token = subsegment_or_token
yield token | 0.00489 |
def _det_inference(self):
"""
Internal method for determining the inference method
"""
# 2 random effects with complete design -> gp2KronSum
# TODO: add check for low-rankness, use GP3KronSumLR and GP2KronSumLR when possible
if (self.n_randEffs==2) and (~sp.isnan(self.Y).any()):
rv = 'GP2KronSum'
else:
rv = 'GP'
return rv | 0.009732 |
def get_host(self, hostname):
""" Returns a Host dict with config options, or None if none exists"""
if hostname in self.get_hosts():
return self.load_ssh_conf().lookup(hostname)
logger.warn('Tried to find host with name {0}, but host not found'.format(hostname))
return None | 0.009404 |
def _ResolutionOrder(self, variables_to_solve):
"""
return a list of lists of tuples (block,output,ndof) to be solved
"""
# Gp=nx.DiGraph()
#
# for i in range(nvar):
# Gp.add_node('v'+str(i),bipartite=0)
#
# for i in range(neq):
# Gp.add_node('e'+str(i),bipartite=1)
# for j in range(nvar):
# if Mo[i,j]==1:
# Gp.add_edge('e'+str(i),'v'+str(j))
Gp = nx.DiGraph()
for variable in self.variables:
Gp.add_node(variable, bipartite=0)
for block in self.blocks:
for iov, output_variable in enumerate(block.outputs):
Gp.add_node((block, iov), bipartite=1)
Gp.add_edge((block, iov), output_variable)
Gp.add_edge(output_variable, (block, iov))
for input_variable in block.inputs:
if not isinstance(input_variable, Signal):
Gp.add_edge(input_variable, (block, iov))
# for n1,n2 in M.items():
# Gp.add_edge(n1,n2)
sinks = []
sources = []
for node in Gp.nodes():
if Gp.out_degree(node) == 0:
sinks.append(node)
elif Gp.in_degree(node) == 0:
sources.append(node)
G2 = sources[:]
for node in sources:
for node2 in nx.descendants(Gp, node):
if node2 not in G2:
G2.append(node2)
if G2 != []:
print(G2)
raise ModelError('Overconstrained variables')
G3 = sinks[:]
for node in sinks:
for node2 in nx.ancestors(Gp, node):
if node2 not in G3:
G3.append(node2)
if G3 != []:
raise ModelError('Underconstrained variables')
# vars_resolvables=[]
# for var in vars_resoudre:
# if not 'v'+str(var) in G2+G3:
# vars_resolvables.append(var)
# G1=Gp.copy()
# G1.remove_nodes_from(G2+G3)
#
# M1=nx.bipartite.maximum_matching(G1)
# G1p=nx.DiGraph()
#
# G1p.add_nodes_from(G1.nodes())
# for e in G1.edges():
# # equation vers variable
# if e[0][0]=='v':
# G1p.add_edge(e[0],e[1])
# else:
# G1p.add_edge(e[1],e[0])
# # print(len(M))
# for n1,n2 in M1.items():
# # print(n1,n2)
# if n1[0]=='e':
# G1p.add_edge(n1,n2)
# else:
# G1p.add_edge(n2,n1)
scc = list(nx.strongly_connected_components(Gp))
# pos=nx.spring_layout(G1p)
# plt.figure()
# nx.draw(G1p,pos)
# nx.draw_networkx_labels(G1p,pos)
# print(scc)
if scc != []:
C = nx.condensation(Gp, scc)
isc_vars = []
for isc, sc in enumerate(scc):
for var in variables_to_solve:
if var in sc:
isc_vars.append(isc)
break
ancestors_vars = isc_vars[:]
for isc_var in isc_vars:
for ancetre in nx.ancestors(C, isc_var):
if ancetre not in ancestors_vars:
ancestors_vars.append(ancetre)
order_sc = [sc for sc in nx.topological_sort(
C) if sc in ancestors_vars]
order_ev = []
for isc in order_sc:
# liste d'équations et de variables triées pour être séparées
evs = list(scc[isc])
# print(evs)
# levs=int(len(evs)/2)
eqs = []
var = []
for element in evs:
if type(element) == tuple:
eqs.append(element)
else:
var.append(element)
order_ev.append((len(eqs), eqs, var))
return order_ev
raise ModelError | 0.000501 |
def cross_dir(self, forcex86=False):
r"""
Cross platform specific subfolder.
Parameters
----------
forcex86: bool
Use 'x86' as current architecture even if current acritecture is
not x86.
Return
------
subfolder: str
'' if target architecture is current architecture,
'\current_target' if not.
"""
current = 'x86' if forcex86 else self.current_cpu
return (
'' if self.target_cpu == current else
self.target_dir().replace('\\', '\\%s_' % current)
) | 0.003241 |
def get_interpolation_function(self, times, data):
""" Initializes interpolation model
:param times: Array of reference times in second relative to the first timestamp
:type times: numpy.array
:param data: One dimensional array of time series
:type data: numpy.array
:return: Initialized interpolation model class
"""
return self.interpolation_object(times, data, axis=0, **self.interpolation_parameters) | 0.008529 |
def enterContainer(self, entry, query):
"""
Enters a new container for the given entry widget.
:param entry | <XOrbQueryEntryWidget> || None
"""
self._compoundStack.append(entry)
self.addContainer(query) | 0.011029 |
def delete_rows(self, row, no_rows=1):
"""Deletes no_rows rows and marks grid as changed"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
tab = self.grid.current_table
try:
self.code_array.delete(row, no_rows, axis=0, tab=tab)
except ValueError, err:
post_command_event(self.main_window, self.StatusBarMsg,
text=err.message) | 0.004237 |
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
#return response
for datacenter in response['datacenters']:
#return data center
ret[datacenter['template']['datacenter']['name']] = {
'name': datacenter['template']['datacenter']['name'],
}
return ret | 0.004902 |
def mutualReceptions(self, idA, idB):
""" Returns all pairs of dignities in mutual reception. """
AB = self.receives(idA, idB)
BA = self.receives(idB, idA)
# Returns a product of both lists
return [(a,b) for a in AB for b in BA] | 0.011194 |
def decrease_exponent_to(self, new_exp):
"""Return an EncryptedNumber with same value but lower exponent.
If we multiply the encoded value by :attr:`EncodedNumber.BASE` and
decrement :attr:`exponent`, then the decoded value does not change.
Thus we can almost arbitrarily ratchet down the exponent of an
`EncryptedNumber` - we only run into trouble when the encoded
integer overflows. There may not be a warning if this happens.
When adding `EncryptedNumber` instances, their exponents must
match.
This method is also useful for hiding information about the
precision of numbers - e.g. a protocol can fix the exponent of
all transmitted `EncryptedNumber` instances to some lower bound(s).
Args:
new_exp (int): the desired exponent.
Returns:
EncryptedNumber: Instance with the same plaintext and desired
exponent.
Raises:
ValueError: You tried to increase the exponent.
"""
if new_exp > self.exponent:
raise ValueError('New exponent %i should be more negative than '
'old exponent %i' % (new_exp, self.exponent))
multiplied = self * pow(EncodedNumber.BASE, self.exponent - new_exp)
multiplied.exponent = new_exp
return multiplied | 0.001461 |
def drag_and_release(self, start_x, start_y, end_x, end_y, pre_dl=None, post_dl=None):
"""Drag something from (start_x, start_y) to (end_x, endy)
**中文文档**
从start的坐标处鼠标左键单击拖曳到end的坐标处
start, end是tuple. 格式是(x, y)
"""
self.delay(pre_dl)
self.m.press(start_x, start_y, 1)
self.m.drag(end_x, end_y)
self.m.release(end_x, end_y, 1)
self.delay(post_dl) | 0.007042 |
def create_signed_value(
self, name: str, value: Union[str, bytes], version: int = None
) -> bytes:
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(
secret, name, value, version=version, key_version=key_version
) | 0.003707 |
def run_query(self, body):
"""
Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:return: the batch of query results.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.runQuery(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp['batch'] | 0.003515 |
def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0):
"""
CreateFileType
0 - Creates no new object.
1 - Creates a notebook with the specified name at the specified location.
2 - Creates a section group with the specified name at the specified location.
3 - Creates a section with the specified name at the specified location.
"""
try:
return(self.process.OpenHierarchy(path, relative_to_object_id, "", create_file_type))
except Exception as e:
print(e)
print("Could not Open Hierarchy") | 0.012559 |
def toggle_pac(self):
"""Enable and disable PAC options."""
if Pac is not None:
pac_on = self.pac['pac_on'].get_value()
self.pac['prep'].setEnabled(pac_on)
self.pac['box_metric'].setEnabled(pac_on)
self.pac['box_complex'].setEnabled(pac_on)
self.pac['box_surro'].setEnabled(pac_on)
self.pac['box_opts'].setEnabled(pac_on)
if not pac_on:
self.pac['prep'].set_value(False)
if Pac is not None and pac_on:
pac = self.pac
hilb_on = pac['hilbert_on'].isChecked()
wav_on = pac['wavelet_on'].isChecked()
for button in pac['hilbert'].values():
button[0].setEnabled(hilb_on)
if button[1] is not None:
button[1].setEnabled(hilb_on)
pac['wav_width'][0].setEnabled(wav_on)
pac['wav_width'][1].setEnabled(wav_on)
if pac['metric'].get_value() in [
'Kullback-Leibler Distance',
'Heights ratio']:
pac['nbin'][0].setEnabled(True)
pac['nbin'][1].setEnabled(True)
else:
pac['nbin'][0].setEnabled(False)
pac['nbin'][1].setEnabled(False)
if pac['metric'] == 'ndPac':
for button in pac['surro'].values():
button[0].setEnabled(False)
if button[1] is not None:
button[1].setEnabled(False)
pac['surro']['pval'][0].setEnabled(True)
ndpac_on = pac['metric'].get_value() == 'ndPac'
surro_on = logical_and(pac['surro_method'].get_value() != ''
'No surrogates', not ndpac_on)
norm_on = pac['surro_norm'].get_value() != 'No normalization'
blocks_on = 'across time' in pac['surro_method'].get_value()
pac['surro_method'].setEnabled(not ndpac_on)
for button in pac['surro'].values():
button[0].setEnabled(surro_on and norm_on)
if button[1] is not None:
button[1].setEnabled(surro_on and norm_on)
pac['surro']['nblocks'][0].setEnabled(blocks_on)
pac['surro']['nblocks'][1].setEnabled(blocks_on)
if ndpac_on:
pac['surro_method'].set_value('No surrogates')
pac['surro']['pval'][0].setEnabled(True) | 0.001209 |
def _add_crud(self, model_data, object_type, results):
"""
Creates a menu entry for given model data.
Updates results in place.
Args:
model_data: Model data.
object_type: Relation name.
results: Results dict.
"""
model = model_registry.get_model(model_data['name'])
field_name = model_data.get('field')
verbose_name = model_data.get('verbose_name', model.Meta.verbose_name_plural)
category = model_data.get('category', settings.DEFAULT_OBJECT_CATEGORY_NAME)
wf_dict = {"text": verbose_name,
"wf": model_data.get('wf', "crud"),
"model": model_data['name'],
"kategori": category}
if field_name:
wf_dict['param'] = field_name
results[object_type].append(wf_dict)
self._add_to_quick_menu(wf_dict['model'], wf_dict) | 0.004348 |
def save_report(session):
'''
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
'''
# prune this folder to contain the last 10 sessions
previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport'))
previous_reports.sort(reverse=True)
while len(previous_reports) > 10:
report_file = previous_reports.pop()
os.remove(report_file)
identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time))
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
session.save(path)
return path, identifier | 0.002878 |
def eval_string(self, s):
"""
Returns the tristate value of the expression 's', represented as 0, 1,
and 2 for n, m, and y, respectively. Raises KconfigError if syntax
errors are detected in 's'. Warns if undefined symbols are referenced.
As an example, if FOO and BAR are tristate symbols at least one of
which has the value y, then config.eval_string("y && (FOO || BAR)")
returns 2 (y).
To get the string value of non-bool/tristate symbols, use
Symbol.str_value. eval_string() always returns a tristate value, and
all non-bool/tristate symbols have the tristate value 0 (n).
The expression parsing is consistent with how parsing works for
conditional ('if ...') expressions in the configuration, and matches
the C implementation. m is rewritten to 'm && MODULES', so
eval_string("m") will return 0 (n) unless modules are enabled.
"""
# The parser is optimized to be fast when parsing Kconfig files (where
# an expression can never appear at the beginning of a line). We have
# to monkey-patch things a bit here to reuse it.
self._filename = None
# Don't include the "if " from below to avoid giving confusing error
# messages
self._line = s
self._tokens = self._tokenize("if " + s)
self._tokens_i = 1 # Skip the 'if' token
return expr_value(self._expect_expr_and_eol()) | 0.001353 |
def validateNodeMsg(self, wrappedMsg):
"""
Validate another node's message sent to this node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message
:return: Tuple of message from node and name of the node
"""
msg, frm = wrappedMsg
if self.isNodeBlacklisted(frm):
self.discard(str(msg)[:256], "received from blacklisted node {}".format(frm), logger.display)
return None
with self.metrics.measure_time(MetricsName.INT_VALIDATE_NODE_MSG_TIME):
try:
message = node_message_factory.get_instance(**msg)
except (MissingNodeOp, InvalidNodeOp) as ex:
raise ex
except Exception as ex:
raise InvalidNodeMsg(str(ex))
try:
self.verifySignature(message)
except BaseExc as ex:
raise SuspiciousNode(frm, ex, message) from ex
logger.debug("{} received node message from {}: {}".format(self, frm, message), extra={"cli": False})
return message, frm | 0.003643 |
def _mark_lines(lines, sender):
"""Mark message lines with markers to distinguish signature lines.
Markers:
* e - empty line
* s - line identified as signature
* t - other i.e. ordinary text line
>>> mark_message_lines(['Some text', '', 'Bob'], 'Bob')
'tes'
"""
global EXTRACTOR
candidate = get_signature_candidate(lines)
# at first consider everything to be text no signature
markers = list('t' * len(lines))
# mark lines starting from bottom up
# mark only lines that belong to candidate
# no need to mark all lines of the message
for i, line in reversed(list(enumerate(candidate))):
# markers correspond to lines not candidate
# so we need to recalculate our index to be
# relative to lines not candidate
j = len(lines) - len(candidate) + i
if not line.strip():
markers[j] = 'e'
elif is_signature_line(line, sender, EXTRACTOR):
markers[j] = 's'
return "".join(markers) | 0.000982 |
def parse_hex_color(value):
"""
Convert a CSS color in hexadecimal notation into its R, G, B components.
:param value: A CSS color in hexadecimal notation (a string like '#000000').
:return: A tuple with three integers (with values between 0 and 255)
corresponding to the R, G and B components of the color.
:raises: :exc:`~exceptions.ValueError` on values that can't be parsed.
"""
if value.startswith('#'):
value = value[1:]
if len(value) == 3:
return (
int(value[0] * 2, 16),
int(value[1] * 2, 16),
int(value[2] * 2, 16),
)
elif len(value) == 6:
return (
int(value[0:2], 16),
int(value[2:4], 16),
int(value[4:6], 16),
)
else:
raise ValueError() | 0.002436 |
def get_paths_cfg(
sys_file='pythran.cfg',
platform_file='pythran-{}.cfg'.format(sys.platform),
user_file='.pythranrc'
):
"""
>>> os.environ['HOME'] = '/tmp/test'
>>> get_paths_cfg()['user']
'/tmp/test/.pythranrc'
>>> os.environ['HOME'] = '/tmp/test'
>>> os.environ['XDG_CONFIG_HOME'] = '/tmp/test2'
>>> get_paths_cfg()['user']
'/tmp/test2/.pythranrc'
>>> os.environ['HOME'] = '/tmp/test'
>>> os.environ['XDG_CONFIG_HOME'] = '/tmp/test2'
>>> os.environ['PYTHRANRC'] = '/tmp/test3/pythranrc'
>>> get_paths_cfg()['user']
'/tmp/test3/pythranrc'
"""
sys_config_dir = os.path.dirname(__file__)
sys_config_path = os.path.join(sys_config_dir, sys_file)
platform_config_path = os.path.join(sys_config_dir, platform_file)
user_config_path = os.environ.get('PYTHRANRC', None)
if not user_config_path:
user_config_dir = os.environ.get('XDG_CONFIG_HOME', '~')
user_config_path = os.path.expanduser(
os.path.join(user_config_dir, user_file))
return {"sys": sys_config_path, "platform": platform_config_path, "user": user_config_path} | 0.001751 |
def set_value(self, option, value, index=None):
"""
Sets the value on the given option.
:param option: The name of the option as it appears in the config file
:param value: The value that is being applied. If this section is indexed then the
value must be a list (to be applied directly) or you must supply the index parameter,
which will cause the value to be inserted into an existing list.
:param index: If the attribute is indexed, we will use this index to insert
the value you have supplied.
:return: an instance of itself so that you can chain setting values together.
"""
if self.is_indexed and index is None and not isinstance(value, list):
raise TypeError("Value should be a list when not giving an index in an indexed header")
self.values[option].set_value(value=value, index=index)
return self | 0.0076 |
def unwrap(self, value):
"""
A helper method for unwrapping the loaderplugin fragment out of
the provided value (typically a modname) and return it.
Note that the filter chaining is very implementation specific to
each and every loader plugin and their specific toolchain, so
this default implementation is not going to attempt to consume
everything in one go.
Another note: if this is to be subclassed and if the return
value does not actually remove the loaderplugin fragment, issues
like default implmenetation of ``modname_source_to_target`` in
this class to recurse forever.
"""
globs = value.split('!', 1)
if globs[0].split('?', 1)[0] == self.name:
return globs[-1]
else:
return value | 0.002389 |
def enterprise_customer_required(view):
"""
Ensure the user making the API request is associated with an EnterpriseCustomer.
This decorator attempts to find an EnterpriseCustomer associated with the requesting
user and passes that EnterpriseCustomer to the view as a parameter. It will return a
PermissionDenied error if an EnterpriseCustomer cannot be found.
Usage::
@enterprise_customer_required()
def my_view(request, enterprise_customer):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_customer_required)
def get(self, request, enterprise_customer):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Checks for an enterprise customer associated with the user, calls the view function
if one exists, raises PermissionDenied if not.
"""
user = request.user
enterprise_customer = get_enterprise_customer_for_user(user)
if enterprise_customer:
args = args + (enterprise_customer,)
return view(request, *args, **kwargs)
else:
raise PermissionDenied(
'User {username} is not associated with an EnterpriseCustomer.'.format(
username=user.username
)
)
return wrapper | 0.004181 |
def connectDropzone( self,
rect,
slot,
color = None,
style = None,
name = '',
toolTip = '' ):
"""
Connects the inputed dropzone to the given slot at the defined rect.
:param rect | <QRectF>
slot | <method> || <function>
:return <XNodeHotspot>
"""
if not color:
color = self.hotspotColor()
if not style:
style = self.hotspotStyle()
hotspot = XNodeHotspot(rect,
slot,
name,
toolTip)
hotspot.setColor(color)
hotspot.setStyle(style)
self._dropzones.append(hotspot)
return hotspot | 0.030601 |
def nnz_obs_names(self):
""" wrapper around pyemu.Pst.nnz_obs_names for listing non-zero
observation names
Returns
-------
nnz_obs_names : list
pyemu.Pst.nnz_obs_names
"""
if self.__pst is not None:
return self.pst.nnz_obs_names
else:
return self.jco.obs_names | 0.008108 |
def hyphen(self):
'''
Returns ISBN number with segment hypenation
Data obtained from https://www.isbn-international.org/
https://www.isbn-international.org/export_rangemessage.xml
@return: ISBN formated as ISBN13 with hyphens
'''
if not ISBN.hyphenRange:
ISBN.hyphenRange = hyphen.ISBNRange()
return ISBN.hyphenRange.hyphenformat(self._id) | 0.004808 |
def genCaCert(self, name, signas=None, outp=None, save=True):
'''
Generates a CA keypair.
Args:
name (str): The name of the CA keypair.
signas (str): The CA keypair to sign the new CA with.
outp (synapse.lib.output.Output): The output buffer.
Examples:
Make a CA named "myca":
mycakey, mycacert = cdir.genCaCert('myca')
Returns:
((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the private key and certificate objects.
'''
pkey, cert = self._genBasePkeyCert(name)
ext0 = crypto.X509Extension(b'basicConstraints', False, b'CA:TRUE')
cert.add_extensions([ext0])
if signas is not None:
self.signCertAs(cert, signas)
else:
self.selfSignCert(cert, pkey)
if save:
keypath = self._savePkeyTo(pkey, 'cas', '%s.key' % name)
if outp is not None:
outp.printf('key saved: %s' % (keypath,))
crtpath = self._saveCertTo(cert, 'cas', '%s.crt' % name)
if outp is not None:
outp.printf('cert saved: %s' % (crtpath,))
return pkey, cert | 0.002453 |
def find_version(*paths):
'''reads a file and returns the defined __version__ value'''
version_match = re.search(r"^__version__ ?= ?['\"]([^'\"]*)['\"]",
read(*paths), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.") | 0.003049 |
def _uprint(dest, text):
"""
Write text to dest, adding a newline character.
Text may be a unicode string, or a byte string in UTF-8 encoding.
It must not be None.
If dest is None, the text is encoded to a codepage suitable for the current
stdout and is written to stdout.
Otherwise, dest must be a file path, and the text is encoded to a UTF-8
Byte sequence and is appended to the file (opening and closing the file).
"""
if isinstance(text, six.text_type):
text = text + u'\n'
elif isinstance(text, six.binary_type):
text = text + b'\n'
else:
raise TypeError(
"text must be a unicode or byte string, but is {0}".
format(type(text)))
if dest is None:
if six.PY2:
# On py2, stdout.write() requires byte strings
if isinstance(text, six.text_type):
text = text.encode(STDOUT_ENCODING, 'replace')
else:
# On py3, stdout.write() requires unicode strings
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
sys.stdout.write(text)
elif isinstance(dest, (six.text_type, six.binary_type)):
if isinstance(text, six.text_type):
open_kwargs = dict(mode='a', encoding='utf-8')
else:
open_kwargs = dict(mode='ab')
if six.PY2:
# Open with codecs to be able to set text mode
with codecs.open(dest, **open_kwargs) as f:
f.write(text)
else:
with open(dest, **open_kwargs) as f:
f.write(text)
else:
raise TypeError(
"dest must be None or a string, but is {0}".
format(type(text))) | 0.000572 |
def msg2usernames(msg, **config):
''' Return cached fedmsg.meta.msg2usernames(...) '''
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = "|".join(['usernames', msg['msg_id']]).encode('utf-8')
creator = lambda: fedmsg.meta.msg2usernames(msg, **config)
return _cache.get_or_create(key, creator) | 0.005666 |
def boot(zone, single=False, altinit=None, smf_options=None):
'''
Boot (or activate) the specified zone.
zone : string
name or uuid of the zone
single : boolean
boots only to milestone svc:/milestone/single-user:default.
altinit : string
valid path to an alternative executable to be the primordial process.
smf_options : string
include two categories of options to control booting behavior of
the service management facility: recovery options and messages options.
CLI Example:
.. code-block:: bash
salt '*' zoneadm.boot clementine
salt '*' zoneadm.boot maeve single=True
salt '*' zoneadm.boot teddy single=True smf_options=verbose
'''
ret = {'status': True}
## build boot_options
boot_options = ''
if single:
boot_options = '-s {0}'.format(boot_options)
if altinit: # note: we cannot validate the path, as this is local to the zonepath.
boot_options = '-i {0} {1}'.format(altinit, boot_options)
if smf_options:
boot_options = '-m {0} {1}'.format(smf_options, boot_options)
if boot_options != '':
boot_options = ' -- {0}'.format(boot_options.strip())
## execute boot
res = __salt__['cmd.run_all']('zoneadm {zone} boot{boot_opts}'.format(
zone='-u {0}'.format(zone) if _is_uuid(zone) else '-z {0}'.format(zone),
boot_opts=boot_options,
))
ret['status'] = res['retcode'] == 0
ret['message'] = res['stdout'] if ret['status'] else res['stderr']
ret['message'] = ret['message'].replace('zoneadm: ', '')
if ret['message'] == '':
del ret['message']
return ret | 0.002982 |
def from_genes(cls, genes: List[ExpGene]):
"""Initialize instance using a list of `ExpGene` objects."""
data = [g.to_dict() for g in genes]
index = [d.pop('ensembl_id') for d in data]
table = cls(data, index=index)
return table | 0.007491 |
def exists(self, path_or_index):
"""
Checks if a path exists in the document. This is meant to be used
for a corresponding :meth:`~couchbase.subdocument.exists` request.
:param path_or_index: The path (or index) to check
:return: `True` if the path exists, `False` if the path does not exist
:raise: An exception if the server-side check failed for a reason other
than the path not existing.
"""
result = self._resolve(path_or_index)
if not result[0]:
return True
elif E.SubdocPathNotFoundError._can_derive(result[0]):
return False
else:
raise E.exc_from_rc(result[0]) | 0.002837 |
def load_freesurfer_geometry(filename, to='mesh', warn=False):
'''
load_freesurfer_geometry(filename) yields the data stored at the freesurfer geometry file given
by filename. The optional argument 'to' may be used to change the kind of data that is
returned.
The following are valid settings for the 'to' keyword argument:
* 'mesh' (the default) yields a mesh object
* 'tess' yields a tess object (discarding coordinates)
* 'raw' yields a tuple of numpy arrays, identical to the read_geometry return value.
'''
if not warn:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=UserWarning,
module='nibabel')
(xs, fs, info) = fsio.read_geometry(filename, read_metadata=True)
else:
(xs, fs, info) = fsio.read_geometry(filename, read_metadata=True)
# see if there's chirality data here...
filename = os.path.split(filename)[1]
filename = filename.lower()
if filename.startswith('lh'): info['chirality'] = 'lh.'
elif filename.startswith('rh'): info['chirality'] = 'rh.'
# parse it into something
to = to.lower()
if to in ['mesh', 'auto', 'automatic']:
return geo.Mesh(fs, xs, meta_data=info)
elif to in ['tess', 'tesselation']:
return geo.Tesselation(fs, meta_data=info)
elif to in ['coords', 'coordinates']:
return xs
elif to in ['triangles', 'faces']:
return fs
elif to in ['meta', 'meta_data']:
return info
elif to =='raw':
return (xs, fs)
else:
raise ValueError('Could not understand \'to\' argument: %s' % to) | 0.004667 |
def fix_logging_path(config, main_section):
"""
Expand environment variables and user home (~) in the log.file and return
as relative path.
"""
log_file = config.get(main_section, 'log.file')
if log_file:
log_file = os.path.expanduser(os.path.expandvars(log_file))
if os.path.isabs(log_file):
log_file = os.path.relpath(log_file)
return log_file | 0.002494 |
def parse_message(
self, body, timestamp=None, nonce=None, msg_signature=None
):
"""
解析获取到的 Raw XML ,如果需要的话进行解密,返回 WeRoBot Message。
:param body: 微信服务器发来的请求中的 Body。
:return: WeRoBot Message
"""
message_dict = parse_xml(body)
if "Encrypt" in message_dict:
xml = self.crypto.decrypt_message(
timestamp=timestamp,
nonce=nonce,
msg_signature=msg_signature,
encrypt_msg=message_dict["Encrypt"]
)
message_dict = parse_xml(xml)
return process_message(message_dict) | 0.004747 |
def _convert_value(val):
"""Handle multiple input type values.
"""
def _is_number(x, op):
try:
op(x)
return True
except ValueError:
return False
if isinstance(val, (list, tuple)):
return [_convert_value(x) for x in val]
elif val is None:
return val
elif _is_number(val, int):
return int(val)
elif _is_number(val, float):
return float(val)
elif val.find(";;") >= 0:
return [_convert_value(v) for v in val.split(";;")]
elif val.startswith(("{", "[")):
# Can get ugly JSON output from CWL with unicode and ' instead of "
# This tries to fix it so parsed correctly by json loader
return json.loads(val.replace("u'", "'").replace("'", '"'))
elif val.lower() == "true":
return True
elif val.lower() == "false":
return False
else:
return val | 0.001085 |
def protected_resource_view(scopes=None):
"""
View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7
"""
if scopes is None:
scopes = []
def wrapper(view):
def view_wrapper(request, *args, **kwargs):
access_token = extract_access_token(request)
try:
try:
kwargs['token'] = Token.objects.get(access_token=access_token)
except Token.DoesNotExist:
logger.debug('[UserInfo] Token does not exist: %s', access_token)
raise BearerTokenError('invalid_token')
if kwargs['token'].has_expired():
logger.debug('[UserInfo] Token has expired: %s', access_token)
raise BearerTokenError('invalid_token')
if not set(scopes).issubset(set(kwargs['token'].scope)):
logger.debug('[UserInfo] Missing openid scope.')
raise BearerTokenError('insufficient_scope')
except BearerTokenError as error:
response = HttpResponse(status=error.status)
response['WWW-Authenticate'] = 'error="{0}", error_description="{1}"'.format(
error.code, error.description)
return response
return view(request, *args, **kwargs)
return view_wrapper
return wrapper | 0.003331 |
def matches(self, node, value):
"""
Returns whether the given node matches the filter rule with the given value.
Args:
node (Element): The node to filter.
value (object): The desired value with which the node should be evaluated.
Returns:
bool: Whether the given node matches.
"""
if self.skip(value):
return True
if not self._valid_value(value):
msg = "Invalid value {value} passed to filter {name} - ".format(
value=repr(value),
name=self.name)
if self.default is not None:
warn(msg + "defaulting to {}".format(self.default))
value = self.default
else:
warn(msg + "skipping")
return True
return self.func(node, value) | 0.004603 |
def debugTreePrint(node,pfx="->"):
"""Purely a debugging aid: Ascii-art picture of a tree descended from node"""
print pfx,node.item
for c in node.children:
debugTreePrint(c," "+pfx) | 0.036269 |
def _Complete(self):
"""Marks the hunt as completed."""
self._RemoveForemanRule()
if "w" in self.hunt_obj.mode:
self.hunt_obj.Set(self.hunt_obj.Schema.STATE("COMPLETED"))
self.hunt_obj.Flush() | 0.013889 |
def selectnotin(table, field, value, complement=False):
"""Select rows where the given field is not a member of the given value."""
return select(table, field, lambda v: v not in value,
complement=complement) | 0.004255 |
def git_checkout(branch_name, create=False):
# type: (str, bool) -> None
""" Checkout or create a given branch
Args:
branch_name (str):
The name of the branch to checkout or create.
create (bool):
If set to **True** it will create the branch instead of checking it
out.
"""
log.info("Checking out <33>{}".format(branch_name))
shell.run('git checkout {} {}'.format('-b' if create else '', branch_name)) | 0.002092 |
def feed_index(service, opts):
"""Feed the named index in a specific manner."""
indexname = opts.args[0]
itype = opts.kwargs['ingest']
# get index handle
try:
index = service.indexes[indexname]
except KeyError:
print("Index %s not found" % indexname)
return
if itype in ["stream", "submit"]:
stream = index.attach()
else:
# create a tcp input if one doesn't exist
input_host = opts.kwargs.get("inputhost", SPLUNK_HOST)
input_port = int(opts.kwargs.get("inputport", SPLUNK_PORT))
input_name = "tcp:%s" % (input_port)
if input_name not in service.inputs.list():
service.inputs.create("tcp", input_port, index=indexname)
# connect to socket
ingest = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ingest.connect((input_host, input_port))
count = 0
lastevent = ""
try:
for i in range(0, 10):
for j in range(0, 5000):
lastevent = "%s: event bunch %d, number %d\n" % \
(datetime.datetime.now().isoformat(), i, j)
if itype == "stream":
stream.write(lastevent + "\n")
elif itype == "submit":
index.submit(lastevent + "\n")
else:
ingest.send(lastevent + "\n")
count = count + 1
print("submitted %d events, sleeping 1 second" % count)
time.sleep(1)
except KeyboardInterrupt:
print("^C detected, last event written:")
print(lastevent) | 0.001839 |
def _set_nameserver_cos(self, v, load=False):
"""
Setter method for nameserver_cos, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail/output/show_nameserver/nameserver_cos (nameserver-cos-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_nameserver_cos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nameserver_cos() directly.
YANG Description: Indicates the Fibre Channel Class of
Service supported by the device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'F|1|2|3|,', 'length': [u'0..8']}), is_leaf=True, yang_name="nameserver-cos", rest_name="nameserver-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'class of service'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-cos-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nameserver_cos must be of a type compatible with nameserver-cos-type""",
'defined-type': "brocade-nameserver:nameserver-cos-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'F|1|2|3|,', 'length': [u'0..8']}), is_leaf=True, yang_name="nameserver-cos", rest_name="nameserver-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'class of service'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-cos-type', is_config=True)""",
})
self.__nameserver_cos = t
if hasattr(self, '_set'):
self._set() | 0.005033 |
def process_config_section(cls, config_section, storage):
"""Process the config section and store the extracted data in
the param:`storage` (as outgoing param).
"""
# -- CONCEPT:
# if not storage:
# # -- INIT DATA: With default parts.
# storage.update(dict(_PERSONS={}))
schema = cls.select_config_schema_for(config_section.name)
if not schema:
message = "No schema found for: section=%s"
raise LookupError(message % config_section.name)
# -- PARSE AND STORE CONFIG SECTION:
section_storage = cls.select_storage_for(config_section.name, storage)
section_data = parse_config_section(config_section, schema)
section_storage.update(section_data) | 0.002564 |
def zcat_make_temps(data, raws, num, tmpdir, optim, njobs, start):
"""
Call bash command 'cat' and 'split' to split large files. The goal
is to create N splitfiles where N is a multiple of the number of processors
so that each processor can work on a file in parallel.
"""
printstr = ' chunking large files | {} | s1 |'
## split args
tmpdir = os.path.realpath(tmpdir)
LOGGER.info("zcat is using optim = %s", optim)
## read it, is it gzipped?
catcmd = ["cat"]
if raws[0].endswith(".gz"):
catcmd = ["gunzip", "-c"]
## get reading commands for r1s, r2s
cmd1 = catcmd + [raws[0]]
cmd2 = catcmd + [raws[1]]
## second command splits and writes with name prefix
cmd3 = ["split", "-a", "4", "-l", str(int(optim)), "-",
os.path.join(tmpdir, "chunk1_"+str(num)+"_")]
cmd4 = ["split", "-a", "4", "-l", str(int(optim)), "-",
os.path.join(tmpdir, "chunk2_"+str(num)+"_")]
### run splitter
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
## wrap the actual call so we can kill it if anything goes awry
while 1:
try:
if not isinstance(proc3.poll(), int):
elapsed = datetime.timedelta(seconds=int(time.time()-start))
done = len(glob.glob(os.path.join(tmpdir, 'chunk1_*')))
progressbar(njobs, min(njobs, done), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
else:
res = proc3.communicate()[0]
proc1.stdout.close()
break
except KeyboardInterrupt:
proc1.kill()
proc3.kill()
raise KeyboardInterrupt()
if proc3.returncode:
raise IPyradWarningExit(" error in %s: %s", cmd3, res)
## grab output handles
chunks1 = glob.glob(os.path.join(tmpdir, "chunk1_"+str(num)+"_*"))
chunks1.sort()
if "pair" in data.paramsdict["datatype"]:
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout)
## wrap the actual call so we can kill it if anything goes awry
while 1:
try:
if not isinstance(proc4.poll(), int):
elapsed = datetime.timedelta(seconds=int(time.time()-start))
done = len(glob.glob(os.path.join(tmpdir, 'chunk1_*')))
progressbar(njobs, min(njobs, done), printstr.format(elapsed), data._spacer)
time.sleep(0.1)
else:
res = proc4.communicate()[0]
proc2.stdout.close()
break
except KeyboardInterrupt:
proc2.kill()
proc4.kill()
raise KeyboardInterrupt()
if proc4.returncode:
raise IPyradWarningExit(" error in %s: %s", cmd4, res)
## grab output handles
chunks2 = glob.glob(os.path.join(tmpdir, "chunk2_"+str(num)+"_*"))
chunks2.sort()
else:
chunks2 = [0]*len(chunks1)
assert len(chunks1) == len(chunks2), \
"R1 and R2 files are not the same length."
## ensure full progress bar b/c estimates njobs could be off
progressbar(10, 10, printstr.format(elapsed), spacer=data._spacer)
return zip(chunks1, chunks2) | 0.005727 |
def _add_data(self, eopatch, data):
""" Adds downloaded data to EOPatch """
valid_mask = data[..., -1]
data = data[..., :-1]
if data.ndim == 3:
data = data.reshape(data.shape + (1,))
if not self.feature_type.is_time_dependent():
if data.shape[0] > 1:
raise ValueError('Cannot save time dependent data to time independent feature')
data = data.squeeze(axis=0)
if self.feature_type.is_discrete():
data = data.astype(np.int32)
eopatch[self.feature_type][self.feature_name] = data
mask_feature_type, mask_feature_name = next(self.valid_data_mask_feature())
max_value = self.image_format.get_expected_max_value()
valid_data = (valid_mask == max_value).astype(np.bool).reshape(valid_mask.shape + (1,))
if mask_feature_name not in eopatch[mask_feature_type]:
eopatch[mask_feature_type][mask_feature_name] = valid_data | 0.005107 |
def sort_menus(c):
"""
sort_menus goes through the items and sorts them based on
their weight
"""
for name in c.items:
if not c.sorted[name]:
c.items[name].sort(key=lambda x: x.weight)
c.sorted[name] = True | 0.006897 |
def get_gradebook_column(self):
"""Gets the ``GradebookColumn``.
return: (osid.grading.GradebookColumn) - the ``GradebookColumn``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
if not bool(self._my_map['gradebookColumnId']):
raise errors.IllegalState('gradebook_column empty')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_gradebook_column_lookup():
raise errors.OperationFailed('Grading does not support GradebookColumn lookup')
lookup_session = mgr.get_gradebook_column_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_gradebook_view()
return lookup_session.get_gradebook_column(self.get_gradebook_column_id()) | 0.005417 |
def add(self, data, overwrite=False):
"""Add given data string by guessing its format. The format must be
Motorola S-Records, Intel HEX or TI-TXT. Set `overwrite` to
``True`` to allow already added data to be overwritten.
"""
if is_srec(data):
self.add_srec(data, overwrite)
elif is_ihex(data):
self.add_ihex(data, overwrite)
elif is_ti_txt(data):
self.add_ti_txt(data, overwrite)
else:
raise UnsupportedFileFormatError() | 0.003738 |
def get_body_region(defined):
"""Return the start and end offsets of function body"""
scope = defined.get_scope()
pymodule = defined.get_module()
lines = pymodule.lines
node = defined.get_ast()
start_line = node.lineno
if defined.get_doc() is None:
start_line = node.body[0].lineno
elif len(node.body) > 1:
start_line = node.body[1].lineno
start = lines.get_line_start(start_line)
scope_start = pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= start_line:
# a one-liner!
# XXX: what if colon appears in a string
start = pymodule.source_code.index(':', start) + 1
while pymodule.source_code[start].isspace():
start += 1
end = min(lines.get_line_end(scope.end) + 1, len(pymodule.source_code))
return start, end | 0.001185 |
def compile_flags(args):
"""
Build a dictionnary with an entry for cppflags, ldflags, and cxxflags.
These options are filled according to the command line defined options
"""
compiler_options = {
'define_macros': args.defines,
'undef_macros': args.undefs,
'include_dirs': args.include_dirs,
'extra_compile_args': args.extra_flags,
'library_dirs': args.libraries_dir,
'extra_link_args': args.extra_flags,
}
for param in ('opts', ):
val = getattr(args, param, None)
if val:
compiler_options[param] = val
return compiler_options | 0.00157 |
def lmfit_jacobian(pars, x, y, errs=None, B=None, emp=False):
"""
Wrapper around :func:`AegeanTools.fitting.jacobian` and :func:`AegeanTools.fitting.emp_jacobian`
which gives the output in a format that is required for lmfit.
Parameters
----------
pars : lmfit.Model
The model parameters
x, y : list
Locations at which the jacobian is being evaluated
errs : list
a vector of 1\sigma errors (optional). Default = None
B : 2d-array
a B-matrix (optional) see :func:`AegeanTools.fitting.Bmatrix`
emp : bool
If true the use the empirical Jacobian, otherwise use the analytical one.
Default = False.
Returns
-------
j : 2d-array
A Jacobian.
See Also
--------
:func:`AegeanTools.fitting.Bmatrix`
:func:`AegeanTools.fitting.jacobian`
:func:`AegeanTools.fitting.emp_jacobian`
"""
if emp:
matrix = emp_jacobian(pars, x, y)
else:
# calculate in the normal way
matrix = jacobian(pars, x, y)
# now munge this to be as expected for lmfit
matrix = np.vstack(matrix)
if errs is not None:
matrix /= errs
# matrix = matrix.dot(errs)
if B is not None:
matrix = matrix.dot(B)
matrix = np.transpose(matrix)
return matrix | 0.003026 |
def update_loan_entry(database, entry):
"""Update a record of a loan report in the provided database.
@param db: The MongoDB database to operate on. The loans collection will be
used from this database.
@type db: pymongo.database.Database
@param entry: The entry to insert into the database, updating the entry with
the same recordID if one exists.
@type entry: dict
"""
entry = clean_entry(entry)
database.loans.update(
{'recordID': entry['recordID']},
{'$set': entry},
upsert=True
) | 0.003565 |
def updateGeometry(self):
"""Move widget to point under cursor
"""
WIDGET_BORDER_MARGIN = 5
SCROLLBAR_WIDTH = 30 # just a guess
sizeHint = self.sizeHint()
width = sizeHint.width()
height = sizeHint.height()
cursorRect = self._qpart.cursorRect()
parentSize = self.parentWidget().size()
spaceBelow = parentSize.height() - cursorRect.bottom() - WIDGET_BORDER_MARGIN
spaceAbove = cursorRect.top() - WIDGET_BORDER_MARGIN
if height <= spaceBelow or \
spaceBelow > spaceAbove:
yPos = cursorRect.bottom()
if height > spaceBelow and \
spaceBelow > self.minimumHeight():
height = spaceBelow
width = width + SCROLLBAR_WIDTH
else:
if height > spaceAbove and \
spaceAbove > self.minimumHeight():
height = spaceAbove
width = width + SCROLLBAR_WIDTH
yPos = max(3, cursorRect.top() - height)
xPos = cursorRect.right() - self._horizontalShift()
if xPos + width + WIDGET_BORDER_MARGIN > parentSize.width():
xPos = max(3, parentSize.width() - WIDGET_BORDER_MARGIN - width)
self.setGeometry(xPos, yPos, width, height)
self._closeIfNotUpdatedTimer.stop() | 0.002235 |
def _completion_checker(async_id, context_id):
"""Check if all Async jobs within a Context have been run."""
if not context_id:
logging.debug("Context for async %s does not exist", async_id)
return
context = FuriousContext.from_id(context_id)
marker = FuriousCompletionMarker.get_by_id(context_id)
if marker and marker.complete:
logging.info("Context %s already complete" % context_id)
return True
task_ids = context.task_ids
if async_id in task_ids:
task_ids.remove(async_id)
logging.debug("Loaded context.")
logging.debug(task_ids)
done, has_errors = _check_markers(task_ids)
if not done:
return False
_mark_context_complete(marker, context, has_errors)
return True | 0.001289 |
def locate_arcgis():
'''
Find the path to the ArcGIS Desktop installation.
Keys to check:
HLKM/SOFTWARE/ESRI/ArcGIS 'RealVersion' - will give the version, then we can use
that to go to
HKLM/SOFTWARE/ESRI/DesktopXX.X 'InstallDir'. Where XX.X is the version
We may need to check HKLM/SOFTWARE/Wow6432Node/ESRI instead
'''
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Wow6432Node\\ESRI\\ArcGIS', 0)
version = _winreg.QueryValueEx(key, "RealVersion")[0][:4]
key_string = "SOFTWARE\\Wow6432Node\\ESRI\\Desktop{0}".format(version)
desktop_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
key_string, 0)
install_dir = _winreg.QueryValueEx(desktop_key, "InstallDir")[0]
return install_dir
except WindowsError:
raise ImportError("Could not locate the ArcGIS directory on this machine") | 0.005482 |
def QA_indicator_PBX(DataFrame, N1=3, N2=5, N3=8, N4=13, N5=18, N6=24):
'瀑布线'
C = DataFrame['close']
PBX1 = (EMA(C, N1) + EMA(C, 2 * N1) + EMA(C, 4 * N1)) / 3
PBX2 = (EMA(C, N2) + EMA(C, 2 * N2) + EMA(C, 4 * N2)) / 3
PBX3 = (EMA(C, N3) + EMA(C, 2 * N3) + EMA(C, 4 * N3)) / 3
PBX4 = (EMA(C, N4) + EMA(C, 2 * N4) + EMA(C, 4 * N4)) / 3
PBX5 = (EMA(C, N5) + EMA(C, 2 * N5) + EMA(C, 4 * N5)) / 3
PBX6 = (EMA(C, N6) + EMA(C, 2 * N6) + EMA(C, 4 * N6)) / 3
DICT = {'PBX1': PBX1, 'PBX2': PBX2, 'PBX3': PBX3,
'PBX4': PBX4, 'PBX5': PBX5, 'PBX6': PBX6}
return pd.DataFrame(DICT) | 0.001616 |
def settings_view_for_block(block_wrapper, settings_view_factory):
"""
Returns the settings view for an arbitrary block.
Args:
block_wrapper (BlockWrapper): The block for which a settings
view is to be returned
settings_view_factory (SettingsViewFactory): The settings
view factory used to create the SettingsView object
Returns:
SettingsView object associated with the block
"""
state_root_hash = \
block_wrapper.state_root_hash \
if block_wrapper is not None else None
return settings_view_factory.create_settings_view(state_root_hash) | 0.002907 |
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act | 0.006655 |
def to_output(self, value):
"""Convert value to process output format."""
return json.loads(resolwe_runtime_utils.save(self.name, str(value))) | 0.012658 |
def plot_vs_mass(dataset, vars, filename, bins=60):
""" Plot 2D marginalised posteriors of the 'vars' vs the dark matter mass.
We plot the one sigma, and two sigma filled contours. More contours can be plotted
which produces something more akin to a heatmap.
If one require more complicated plotting, it is recommended to write a custom
plotting function by extending the default plot() method.
"""
n = len(vars)
fig, axes = plt.subplots(nrows=n,
ncols=1,
sharex='col',
sharey=False)
plt.subplots_adjust(wspace=0, hspace=0)
m = 'log(m_{\chi})'
for i, y in enumerate(vars):
ax = axes[i]
P = posterior.twoD(dataset+'.h5', m, y,
xlimits=limits(m), ylimits=limits(y), xbins=bins, ybins=bins)
# apply some gaussian smoothing to make the contours slightly smoother
sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0])
P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest')
P.plot(ax, levels=np.linspace(0.9, 0.1, 9))
ax.set_xlabel(labels('log(m_{\chi})'))
ax.set_ylabel(labels(y))
fig.set_size_inches(4,n*3)
fig.savefig(filename, dpi=200, bbox_inches='tight')
plt.close(fig) | 0.005323 |
def init_UI(self):
"""
Builds User Interface for the interpretation Editor
"""
#set fonts
FONT_WEIGHT=1
if sys.platform.startswith('win'): FONT_WEIGHT=-1
font1 = wx.Font(9+FONT_WEIGHT, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type)
font2 = wx.Font(12+FONT_WEIGHT, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type)
#if you're on mac do some funny stuff to make it look okay
is_mac = False
if sys.platform.startswith("darwin"):
is_mac = True
self.search_bar = wx.SearchCtrl(self.panel, size=(350*self.GUI_RESOLUTION,25) ,style=wx.TE_PROCESS_ENTER | wx.TE_PROCESS_TAB | wx.TE_NOHIDESEL)
self.Bind(wx.EVT_TEXT_ENTER, self.on_enter_search_bar,self.search_bar)
self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.on_enter_search_bar,self.search_bar)
self.search_bar.SetHelpText(dieh.search_help)
# self.Bind(wx.EVT_TEXT, self.on_complete_search_bar,self.search_bar)
#build logger
self.logger = wx.ListCtrl(self.panel, -1, size=(100*self.GUI_RESOLUTION,475*self.GUI_RESOLUTION),style=wx.LC_REPORT)
self.logger.SetFont(font1)
self.logger.InsertColumn(0, 'specimen',width=75*self.GUI_RESOLUTION)
self.logger.InsertColumn(1, 'fit name',width=65*self.GUI_RESOLUTION)
self.logger.InsertColumn(2, 'max',width=55*self.GUI_RESOLUTION)
self.logger.InsertColumn(3, 'min',width=55*self.GUI_RESOLUTION)
self.logger.InsertColumn(4, 'n',width=25*self.GUI_RESOLUTION)
self.logger.InsertColumn(5, 'fit type',width=60*self.GUI_RESOLUTION)
self.logger.InsertColumn(6, 'dec',width=45*self.GUI_RESOLUTION)
self.logger.InsertColumn(7, 'inc',width=45*self.GUI_RESOLUTION)
self.logger.InsertColumn(8, 'mad',width=45*self.GUI_RESOLUTION)
self.logger.InsertColumn(9, 'dang',width=45*self.GUI_RESOLUTION)
self.logger.InsertColumn(10, 'a95',width=45*self.GUI_RESOLUTION)
self.logger.InsertColumn(11, 'K',width=45*self.GUI_RESOLUTION)
self.logger.InsertColumn(12, 'R',width=45*self.GUI_RESOLUTION)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnClick_listctrl, self.logger)
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK,self.OnRightClickListctrl,self.logger)
self.logger.SetHelpText(dieh.logger_help)
#set fit attributes boxsizers
self.display_sizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "display options"), wx.HORIZONTAL)
self.name_sizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "fit name/color"), wx.VERTICAL)
self.bounds_sizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "fit bounds"), wx.VERTICAL)
self.buttons_sizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY), wx.VERTICAL)
#logger display selection box
UPPER_LEVEL = self.parent.level_box.GetValue()
if UPPER_LEVEL=='sample':
name_choices = self.parent.samples
if UPPER_LEVEL=='site':
name_choices = self.parent.sites
if UPPER_LEVEL=='location':
name_choices = self.parent.locations
if UPPER_LEVEL=='study':
name_choices = ['this study']
self.level_box = wx.ComboBox(self.panel, -1, size=(110*self.GUI_RESOLUTION, 25), value=UPPER_LEVEL, choices=['sample','site','location','study'], style=wx.CB_DROPDOWN|wx.TE_READONLY)
self.Bind(wx.EVT_COMBOBOX, self.on_select_high_level,self.level_box)
self.level_box.SetHelpText(dieh.level_box_help)
self.level_names = wx.ComboBox(self.panel, -1, size=(110*self.GUI_RESOLUTION, 25), value=self.parent.level_names.GetValue(), choices=name_choices, style=wx.CB_DROPDOWN|wx.TE_READONLY)
self.Bind(wx.EVT_COMBOBOX, self.on_select_level_name,self.level_names)
self.level_names.SetHelpText(dieh.level_names_help)
#mean type and plot display boxes
self.mean_type_box = wx.ComboBox(self.panel, -1, size=(110*self.GUI_RESOLUTION, 25), value=self.parent.mean_type_box.GetValue(), choices=['Fisher','Fisher by polarity','None'], style=wx.CB_DROPDOWN|wx.TE_READONLY, name="high_type")
self.Bind(wx.EVT_COMBOBOX, self.on_select_mean_type_box,self.mean_type_box)
self.mean_type_box.SetHelpText(dieh.mean_type_help)
self.mean_fit_box = wx.ComboBox(self.panel, -1, size=(110*self.GUI_RESOLUTION, 25), value=self.parent.mean_fit, choices=(['None','All'] + self.parent.fit_list), style=wx.CB_DROPDOWN|wx.TE_READONLY, name="high_type")
self.Bind(wx.EVT_COMBOBOX, self.on_select_mean_fit_box,self.mean_fit_box)
self.mean_fit_box.SetHelpText(dieh.mean_fit_help)
#show box
if UPPER_LEVEL == "study" or UPPER_LEVEL == "location":
show_box_choices = ['specimens','samples','sites']
if UPPER_LEVEL == "site":
show_box_choices = ['specimens','samples']
if UPPER_LEVEL == "sample":
show_box_choices = ['specimens']
self.show_box = wx.ComboBox(self.panel, -1, size=(110*self.GUI_RESOLUTION, 25), value='specimens', choices=show_box_choices, style=wx.CB_DROPDOWN|wx.TE_READONLY,name="high_elements")
self.Bind(wx.EVT_COMBOBOX, self.on_select_show_box,self.show_box)
self.show_box.SetHelpText(dieh.show_help)
#coordinates box
self.coordinates_box = wx.ComboBox(self.panel, -1, size=(110*self.GUI_RESOLUTION, 25), choices=self.parent.coordinate_list, value=self.parent.coordinates_box.GetValue(), style=wx.CB_DROPDOWN|wx.TE_READONLY, name="coordinates")
self.Bind(wx.EVT_COMBOBOX, self.on_select_coordinates,self.coordinates_box)
self.coordinates_box.SetHelpText(dieh.coordinates_box_help)
#bounds select boxes
self.tmin_box = wx.ComboBox(self.panel, -1, size=(80*self.GUI_RESOLUTION, 25), choices=[''] + self.parent.T_list, style=wx.CB_DROPDOWN|wx.TE_READONLY, name="lower bound")
self.tmin_box.SetHelpText(dieh.tmin_box_help)
self.tmax_box = wx.ComboBox(self.panel, -1, size=(80*self.GUI_RESOLUTION, 25), choices=[''] + self.parent.T_list, style=wx.CB_DROPDOWN|wx.TE_READONLY, name="upper bound")
self.tmax_box.SetHelpText(dieh.tmax_box_help)
#color box
self.color_dict = self.parent.color_dict
self.color_box = wx.ComboBox(self.panel, -1, size=(80*self.GUI_RESOLUTION, 25), choices=[''] + sorted(self.color_dict.keys()), style=wx.CB_DROPDOWN|wx.TE_PROCESS_ENTER, name="color")
self.Bind(wx.EVT_TEXT_ENTER, self.add_new_color, self.color_box)
self.color_box.SetHelpText(dieh.color_box_help)
#name box
self.name_box = wx.TextCtrl(self.panel, -1, size=(80*self.GUI_RESOLUTION, 25), name="name")
self.name_box.SetHelpText(dieh.name_box_help)
#more mac stuff
h_size_buttons,button_spacing = 25,5.5
if is_mac: h_size_buttons,button_spacing = 18,0.
#buttons
self.add_all_button = wx.Button(self.panel, id=-1, label='add new fit to all specimens',size=(160*self.GUI_RESOLUTION,h_size_buttons))
self.add_all_button.SetFont(font1)
self.Bind(wx.EVT_BUTTON, self.add_fit_to_all, self.add_all_button)
self.add_all_button.SetHelpText(dieh.add_all_help)
self.add_fit_button = wx.Button(self.panel, id=-1, label='add fit to highlighted specimens',size=(160*self.GUI_RESOLUTION,h_size_buttons))
self.add_fit_button.SetFont(font1)
self.Bind(wx.EVT_BUTTON, self.add_highlighted_fits, self.add_fit_button)
self.add_fit_button.SetHelpText(dieh.add_fit_btn_help)
self.delete_fit_button = wx.Button(self.panel, id=-1, label='delete highlighted fits',size=(160*self.GUI_RESOLUTION,h_size_buttons))
self.delete_fit_button.SetFont(font1)
self.Bind(wx.EVT_BUTTON, self.delete_highlighted_fits, self.delete_fit_button)
self.delete_fit_button.SetHelpText(dieh.delete_fit_btn_help)
self.apply_changes_button = wx.Button(self.panel, id=-1, label='apply changes to highlighted fits',size=(160*self.GUI_RESOLUTION,h_size_buttons))
self.apply_changes_button.SetFont(font1)
self.Bind(wx.EVT_BUTTON, self.apply_changes, self.apply_changes_button)
self.apply_changes_button.SetHelpText(dieh.apply_changes_help)
#windows
display_window_0 = wx.GridSizer(2, 1, 10*self.GUI_RESOLUTION, 19*self.GUI_RESOLUTION)
display_window_1 = wx.GridSizer(2, 1, 10*self.GUI_RESOLUTION, 19*self.GUI_RESOLUTION)
display_window_2 = wx.GridSizer(2, 1, 10*self.GUI_RESOLUTION, 19*self.GUI_RESOLUTION)
name_window = wx.GridSizer(2, 1, 10*self.GUI_RESOLUTION, 19*self.GUI_RESOLUTION)
bounds_window = wx.GridSizer(2, 1, 10*self.GUI_RESOLUTION, 19*self.GUI_RESOLUTION)
buttons1_window = wx.GridSizer(4, 1, 5*self.GUI_RESOLUTION, 19*self.GUI_RESOLUTION)
display_window_0.AddMany( [(self.coordinates_box, wx.ALIGN_LEFT),
(self.show_box, wx.ALIGN_LEFT)] )
display_window_1.AddMany( [(self.level_box, wx.ALIGN_LEFT),
(self.level_names, wx.ALIGN_LEFT)] )
display_window_2.AddMany( [(self.mean_type_box, wx.ALIGN_LEFT),
(self.mean_fit_box, wx.ALIGN_LEFT)] )
name_window.AddMany( [(self.name_box, wx.ALIGN_LEFT),
(self.color_box, wx.ALIGN_LEFT)] )
bounds_window.AddMany( [(self.tmin_box, wx.ALIGN_LEFT),
(self.tmax_box, wx.ALIGN_LEFT)] )
buttons1_window.AddMany( [(self.add_fit_button, wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, 0),
(self.add_all_button, wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, 0),
(self.delete_fit_button, wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, 0),
(self.apply_changes_button, wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, 0)])
self.display_sizer.Add(display_window_0, 1, wx.TOP|wx.EXPAND, 8)
self.display_sizer.Add(display_window_1, 1, wx.TOP | wx.LEFT|wx.EXPAND, 8)
self.display_sizer.Add(display_window_2, 1, wx.TOP | wx.LEFT|wx.EXPAND, 8)
self.name_sizer.Add(name_window, 1, wx.TOP, 5.5)
self.bounds_sizer.Add(bounds_window, 1, wx.TOP, 5.5)
self.buttons_sizer.Add(buttons1_window, 1, wx.TOP, 0)
#duplicate high levels plot
self.fig = Figure((2.5*self.GUI_RESOLUTION, 2.5*self.GUI_RESOLUTION), dpi=100)
self.canvas = FigCanvas(self.panel, -1, self.fig, )
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.Hide()
self.toolbar.zoom()
self.high_EA_setting = "Zoom"
self.canvas.Bind(wx.EVT_LEFT_DCLICK,self.on_equalarea_high_select)
self.canvas.Bind(wx.EVT_MOTION,self.on_change_high_mouse_cursor)
self.canvas.Bind(wx.EVT_MIDDLE_DOWN,self.home_high_equalarea)
self.canvas.Bind(wx.EVT_RIGHT_DOWN,self.pan_zoom_high_equalarea)
self.canvas.SetHelpText(dieh.eqarea_help)
self.eqarea = self.fig.add_subplot(111)
draw_net(self.eqarea)
#Higher Level Statistics Box
self.stats_sizer = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY,"mean statistics" ), wx.VERTICAL)
for parameter in ['mean_type','dec','inc','alpha95','K','R','n_lines','n_planes']:
COMMAND="self.%s_window=wx.TextCtrl(self.panel,style=wx.TE_CENTER|wx.TE_READONLY,size=(100*self.GUI_RESOLUTION,25))"%parameter
exec(COMMAND)
COMMAND="self.%s_window.SetBackgroundColour(wx.WHITE)"%parameter
exec(COMMAND)
COMMAND="self.%s_window.SetFont(font2)"%parameter
exec(COMMAND)
COMMAND="self.%s_outer_window = wx.GridSizer(1,2,5*self.GUI_RESOLUTION,15*self.GUI_RESOLUTION)"%parameter
exec(COMMAND)
COMMAND="""self.%s_outer_window.AddMany([
(wx.StaticText(self.panel,label='%s',style=wx.TE_CENTER),wx.EXPAND),
(self.%s_window, wx.EXPAND)])"""%(parameter,parameter,parameter)
exec(COMMAND)
COMMAND="self.stats_sizer.Add(self.%s_outer_window, 1, wx.ALIGN_LEFT|wx.EXPAND, 0)"%parameter
exec(COMMAND)
self.switch_stats_button = wx.SpinButton(self.panel, id=wx.ID_ANY, style=wx.SP_HORIZONTAL|wx.SP_ARROW_KEYS|wx.SP_WRAP, name="change stats")
self.Bind(wx.EVT_SPIN, self.on_select_stats_button,self.switch_stats_button)
self.switch_stats_button.SetHelpText(dieh.switch_stats_btn_help)
#construct panel
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
hbox0.Add(self.name_sizer,flag=wx.ALIGN_TOP|wx.EXPAND,border=8)
hbox0.Add(self.bounds_sizer,flag=wx.ALIGN_TOP|wx.EXPAND,border=8)
vbox0 = wx.BoxSizer(wx.VERTICAL)
vbox0.Add(hbox0,flag=wx.ALIGN_TOP,border=8)
vbox0.Add(self.buttons_sizer,flag=wx.ALIGN_TOP,border=8)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(vbox0,flag=wx.ALIGN_TOP,border=8)
hbox1.Add(self.stats_sizer,flag=wx.ALIGN_TOP,border=8)
hbox1.Add(self.switch_stats_button,flag=wx.ALIGN_TOP|wx.EXPAND,border=8)
vbox1 = wx.BoxSizer(wx.VERTICAL)
vbox1.Add(self.display_sizer,flag=wx.ALIGN_TOP,border=8)
vbox1.Add(hbox1,flag=wx.ALIGN_TOP,border=8)
vbox1.Add(self.canvas,proportion=1,flag=wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL | wx.EXPAND,border=8)
vbox2 = wx.BoxSizer(wx.VERTICAL)
vbox2.Add(self.search_bar,proportion=.5,flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.EXPAND, border=8)
vbox2.Add(self.logger,proportion=1,flag=wx.ALIGN_LEFT|wx.EXPAND,border=8)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(vbox2,proportion=1,flag=wx.ALIGN_LEFT|wx.EXPAND)
hbox2.Add(vbox1,flag=wx.ALIGN_TOP|wx.EXPAND)
self.panel.SetSizerAndFit(hbox2)
hbox2.Fit(self) | 0.016602 |
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out) | 0.005435 |
def append_to_list(self, source, start=None, hasIndex=False):
'''Appends new list to self.nameDict
Argument:
source -- source of new name list (filename or list)
start -- starting index of new list
hasIndex -- the file is already indexed
'''
nfy = Numberify()
try:
if start is None:
if type(source) is str:
if hasIndex is True:
newList = self.get_from_indexedFile(source)
else:
newList = nfy.numberify_data(source,
len(self.nameDict) + 1)
else:
newList = nfy.numberify_data(source, start)
self.nameDict = dict(self.nameDict.items() + newList.items())
self.totalCount = len(self.nameDict)
except:
print 'Unknown error:', exc_info()[0] | 0.003236 |
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator | 0.001597 |
def calcAcceptanceRatio(self, V, W):
"""
Given a order vector V and a proposed order vector W, calculate the acceptance ratio for
changing to W when using MCMC.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
:ivar float phi: A value for phi such that 0 <= phi <= 1.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
:ivar list<int> W: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the proposed sample.
"""
acceptanceRatio = 1.0
for comb in itertools.combinations(V, 2):
#Check if comb[0] is ranked before comb[1] in V and W
vIOverJ = 1
wIOverJ = 1
if V.index(comb[0]) > V.index(comb[1]):
vIOverJ = 0
if W.index(comb[0]) > W.index(comb[1]):
wIOverJ = 0
acceptanceRatio = acceptanceRatio * self.phi**(self.wmg[comb[0]][comb[1]]*(vIOverJ-wIOverJ))
return acceptanceRatio | 0.008713 |
def slicedIterator(sourceList, sliceSize):
"""
:param: sourceList: list which need to be sliced
:type: list
:param: sliceSize: size of the slice
:type: int
:return: iterator of the sliced list
"""
start = 0
end = 0
while len(sourceList) > end:
end = start + sliceSize
yield sourceList[start: end]
start = end | 0.002681 |
def bivconvolve (sx_a, sy_a, cxy_a, sx_b, sy_b, cxy_b):
"""Given two independent bivariate distributions, compute a bivariate
distribution corresponding to their convolution.
I'm sure this is worked out in a ton of places, but I got the equations
from Pineau+ (2011A&A...527A.126P).
Returns: (sx_c, sy_c, cxy_c), the parameters of the convolved
distribution.
"""
_bivcheck (sx_a, sy_a, cxy_a)
_bivcheck (sx_b, sy_b, cxy_b)
sx_c = np.sqrt (sx_a**2 + sx_b**2)
sy_c = np.sqrt (sy_a**2 + sy_b**2)
cxy_c = cxy_a + cxy_b
return _bivcheck (sx_c, sy_c, cxy_c) | 0.011513 |
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The event.
"""
if self._aspect is None:
return
w, h = self._canvas.size
aspect = self._aspect / (w / h)
self.scale = (self.scale[0], self.scale[0] / aspect)
self.shader_map() | 0.005362 |
def format_formula(formula):
"""
Converts str of chemical formula into
latex format for labelling purposes
Args:
formula (str): Chemical formula
"""
formatted_formula = ""
number_format = ""
for i, s in enumerate(formula):
if s.isdigit():
if not number_format:
number_format = "_{"
number_format += s
if i == len(formula) - 1:
number_format += "}"
formatted_formula += number_format
else:
if number_format:
number_format += "}"
formatted_formula += number_format
number_format = ""
formatted_formula += s
return r"$%s$" % (formatted_formula) | 0.001316 |
def activationFunctionASIG(self, x):
"""
Determine the activation of a node based on that nodes net input.
"""
def act(v):
if v < -15.0: return 0.0
elif v > 15.0: return 1.0
else: return 1.0 / (1.0 + Numeric.exp(-v))
return Numeric.array(list(map(act, x)), 'f') | 0.020588 |
def parse(self, rec):
"""Retrieve row data from files associated with the ISATabRecord.
"""
final_studies = []
for study in rec.studies:
source_data = self._parse_study(study.metadata["Study File Name"],
["Source Name", "Sample Name", "Comment[ENA_SAMPLE]"])
if source_data:
study.nodes = source_data
final_assays = []
for assay in study.assays:
cur_assay = ISATabAssayRecord(assay)
assay_data = self._parse_study(assay["Study Assay File Name"],
["Sample Name","Extract Name","Raw Data File","Derived Data File", "Image File", "Acquisition Parameter Data File", "Free Induction Decay Data File"])
cur_assay.nodes = assay_data
self._get_process_nodes(assay["Study Assay File Name"], cur_assay)
final_assays.append(cur_assay)
study.assays = final_assays
#get process nodes
self._get_process_nodes(study.metadata["Study File Name"], study)
final_studies.append(study)
rec.studies = final_studies
return rec | 0.008574 |
def write_wonambi(data, filename, subj_id='', dtype='float64'):
"""Write file in simple Wonambi format.
Parameters
----------
data : instance of ChanTime
data with only one trial
filename : path to file
file to export to (the extensions .won and .dat will be added)
subj_id : str
subject id
dtype : str
numpy dtype in which you want to save the data
Notes
-----
Wonambi format creates two files, one .won with the dataset info as json
file and one .dat with the memmap recordings.
It will happily overwrite any existing file with the same name.
Memory-mapped matrices are column-major, Fortran-style, to be compatible
with Matlab.
"""
filename = Path(filename)
json_file = filename.with_suffix('.won')
memmap_file = filename.with_suffix('.dat')
start_time = data.start_time + timedelta(seconds=data.axis['time'][0][0])
start_time_str = start_time.strftime('%Y-%m-%d %H:%M:%S.%f')
dataset = {'subj_id': subj_id,
'start_time': start_time_str,
's_freq': data.s_freq,
'chan_name': list(data.axis['chan'][0]),
'n_samples': int(data.number_of('time')[0]),
'dtype': dtype,
}
with json_file.open('w') as f:
dump(dataset, f, sort_keys=True, indent=4)
memshape = (len(dataset['chan_name']),
dataset['n_samples'])
mem = memmap(str(memmap_file), dtype, mode='w+', shape=memshape, order='F')
mem[:, :] = data.data[0]
mem.flush() | 0.000635 |
def new_project():
"""New Project."""
form = NewProjectForm()
if not form.validate_on_submit():
return jsonify(errors=form.errors), 400
data = form.data
data['slug'] = slugify(data['name'])
data['owner_id'] = get_current_user_id()
id = add_instance('project', **data)
if not id:
return jsonify(errors={'name': ['duplicated slug.']}), 400
project = get_data_or_404('project', id)
return jsonify(**project) | 0.002151 |
def validate_examples(example_file):
"""Validate that examples are well formed.
Pi should sum to 1.0
value should be {-1,1}
Usage:
validate_examples("../data/300.tfrecord.zz")
"""
def test_example(raw):
example = tf.train.Example()
example.ParseFromString(raw)
pi = np.frombuffer(example.features.feature['pi'].bytes_list.value[0], np.float32)
value = example.features.feature['outcome'].float_list.value[0]
assert abs(pi.sum() - 1) < 1e-4, pi.sum()
assert value in (-1, 1), value
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
for record in tqdm(tf.python_io.tf_record_iterator(example_file, opts)):
test_example(record) | 0.003979 |
def pending_assignment(self):
"""Return the pending partition assignment that this state represents."""
return {
self.partitions[pid].name: [
self.brokers[bid].id for bid in self.replicas[pid]
]
for pid in set(self.pending_partitions)
} | 0.009615 |
def get_active_window_pos():
'''screen coordinates massaged so that movewindow command works to
restore the window to the same position
returns x, y
'''
# http://stackoverflow.com/questions/26050788/in-bash-on-ubuntu-14-04-unity-how-can-i-get-the-total-size-of-an-open-window-i/26060527#26060527
cmd = ['xdotool','getactivewindow', 'getwindowgeometry']
res = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
stdout = res[0].decode('utf-8').splitlines()
pos = stdout[1].split(':')[1].split(',')
geo = stdout[2].split(':')[1].split('x')
x, y = int(pos[0].strip()), int(pos[1].split('(')[0].strip())
w, h = int(geo[0].strip()), int(geo[1].strip())
# get the window decorations
window_id = get_window_id()
cmd = ['xprop', '_NET_FRAME_EXTENTS', '-id', window_id]
res = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
decos = res[0].decode('utf-8').split('=')[1].split(',')
l, r = int(decos[0].strip()), int(decos[1].strip())
t, b = int(decos[2].strip()), int(decos[3].strip())
return x-l, y-t | 0.008726 |
def configparser(self):
"""
Adapter to dump/load INI format strings and files using standard library's
``ConfigParser`` (or the backported configparser module in Python 2).
Returns:
ConfigPersistenceAdapter
"""
if self._configparser_adapter is None:
self._configparser_adapter = ConfigPersistenceAdapter(
config=self,
reader_writer=ConfigParserReaderWriter(
config_parser_factory=self.settings.configparser_factory,
),
)
return self._configparser_adapter | 0.006421 |
def check(self):
"""
Basic checks that don't depend on any context.
Adapted from Bicoin Code: main.cpp
"""
self._check_tx_inout_count()
self._check_txs_out()
self._check_txs_in()
# Size limits
self._check_size_limit() | 0.00692 |
def randomise_labels(
self,
inplace=False,
):
""" Shuffles the leaf labels, but doesn't alter the tree structure """
if not inplace:
t = self.copy()
else:
t = self
names = list(t.labels)
random.shuffle(names)
for l in t._tree.leaf_node_iter():
l.taxon._label = names.pop()
t._dirty = True
return t | 0.009368 |
def load_config_yaml(self, flags, config_dict):
""" Load config dict and yaml dict and then override both with flags dict. """
if config_dict is None:
print('Config File not specified. Using only input flags.')
return flags
try:
config_yaml_dict = self.cfg_from_file(flags['YAML_FILE'], config_dict)
except KeyError:
print('Yaml File not specified. Using only input flags and config file.')
return config_dict
print('Using input flags, config file, and yaml file.')
config_yaml_flags_dict = self._merge_a_into_b_simple(flags, config_yaml_dict)
return config_yaml_flags_dict | 0.008708 |
def validate_config_key(ctx, param, value):
"""Validate a configuration key according to `section.item`."""
if not value:
return value
try:
section, item = value.split(".", 1)
except ValueError:
raise click.BadArgumentUsage("Given key does not contain a section name.")
else:
return section, item | 0.005731 |
def _any(self, memory, addr, **kwargs):
"""
Gets any solution of an address.
"""
return memory.state.solver.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs) | 0.015 |
def get_content_object(self, page, language, ctype):
"""Gets the latest published :class:`Content <pages.models.Content>`
for a particular page, language and placeholder type."""
params = {
'language': language,
'type': ctype,
'page': None if page is fake_page else page
}
if page.freeze_date:
params['creation_date__lte'] = page.freeze_date
return self.filter(**params).latest() | 0.004211 |
def render_string(self, template_name, **kwargs):
"""
添加注入模板的自定义参数等信息
"""
if hasattr(self, "session"): kwargs["session"] = self.session
return super(BaseHandler, self).render_string(template_name, **kwargs) | 0.011765 |
def config_delete(args):
""" Remove a method config from a workspace """
r = fapi.delete_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, [200,204])
return r.text if r.text else None | 0.010204 |
def dasonw(fname, ftype, ifname, ncomch):
"""
Internal undocumented command for creating a new DAS file
:param fname: filename
:type fname: str
:param ftype: type
:type ftype: str
:param ifname: internal file name
:type ifname: str
:param ncomch: amount of comment area
:type ncomch: int
:return: Handle to new DAS file
:rtype: int
"""
fnamelen = ctypes.c_int(len(fname))
ftypelen = ctypes.c_int(len(ftype))
ifnamelen = ctypes.c_int(len(ifname))
ncomch = ctypes.c_int(ncomch)
handle = ctypes.c_int()
fname = stypes.stringToCharP(fname)
ftype = stypes.stringToCharP(ftype)
ifname = stypes.stringToCharP(ifname)
libspice.dasonw_(fname, ftype, ifname, ctypes.byref(ncomch), ctypes.byref(handle), fnamelen, ftypelen, ifnamelen)
return handle.value | 0.009445 |
def gen_nf_quick_check(output, ascii_props=False, append=False, prefix=""):
"""Generate quick check properties."""
categories = []
nf = {}
all_chars = ALL_ASCII if ascii_props else ALL_CHARS
file_name = os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedNormalizationProps.txt')
with codecs.open(file_name, 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split('#')[0].split(';')
if len(data) < 2:
continue
if not data[1].strip().lower().endswith('_qc'):
continue
span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props)
if span is None:
continue
name = format_name(data[1][:-3] + 'quickcheck')
subvalue = format_name(data[2])
if name not in nf:
nf[name] = {}
categories.append(name)
if subvalue not in nf[name]:
nf[name][subvalue] = []
nf[name][subvalue].extend(span)
for k1, v1 in nf.items():
temp = set()
for k2 in list(v1.keys()):
temp |= set(v1[k2])
v1['y'] = list(all_chars - temp)
for k1, v1 in nf.items():
for name in list(v1.keys()):
s = set(nf[k1][name])
nf[k1][name] = sorted(s)
# Convert characters values to ranges
char2range(nf, is_bytes=ascii_props)
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
for key, value in sorted(nf.items()):
# Write out the Unicode properties
f.write('%s_%s = {\n' % (prefix, key.replace('quickcheck', '_quick_check')))
count = len(value) - 1
i = 0
for k1, v1 in sorted(value.items()):
f.write(' "%s": "%s"' % (k1, v1))
if i == count:
f.write('\n}\n')
else:
f.write(',\n')
i += 1
return categories | 0.001841 |
def get_market_most_active(*args, **kwargs):
"""
MOVED to iexfinance.stocks.get_market_most_active
"""
import warnings
warnings.warn(WNG_MSG, ("get_market_most_active",
"stocks.get_market_most_active"))
return stocks.get_market_most_active(*args, **kwargs) | 0.003175 |
def show_dependencies(self, stream=sys.stdout):
"""Writes to the given stream the ASCII representation of the dependency tree."""
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream) | 0.006961 |
def get_all_maintenance_window(self, **kwargs): # noqa: E501
"""Get all maintenance windows for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_maintenance_window(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedMaintenanceWindow
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_maintenance_window_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_maintenance_window_with_http_info(**kwargs) # noqa: E501
return data | 0.002101 |
def from_dict(data, ctx):
"""
Instantiate a new MarketOrderTransaction from a dict (generally from
loading a JSON response). The data used to instantiate the
MarketOrderTransaction is a shallow copy of the dict passed in, with
any complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('units') is not None:
data['units'] = ctx.convert_decimal_number(
data.get('units')
)
if data.get('priceBound') is not None:
data['priceBound'] = ctx.convert_decimal_number(
data.get('priceBound')
)
if data.get('tradeClose') is not None:
data['tradeClose'] = \
ctx.transaction.MarketOrderTradeClose.from_dict(
data['tradeClose'], ctx
)
if data.get('longPositionCloseout') is not None:
data['longPositionCloseout'] = \
ctx.transaction.MarketOrderPositionCloseout.from_dict(
data['longPositionCloseout'], ctx
)
if data.get('shortPositionCloseout') is not None:
data['shortPositionCloseout'] = \
ctx.transaction.MarketOrderPositionCloseout.from_dict(
data['shortPositionCloseout'], ctx
)
if data.get('marginCloseout') is not None:
data['marginCloseout'] = \
ctx.transaction.MarketOrderMarginCloseout.from_dict(
data['marginCloseout'], ctx
)
if data.get('delayedTradeClose') is not None:
data['delayedTradeClose'] = \
ctx.transaction.MarketOrderDelayedTradeClose.from_dict(
data['delayedTradeClose'], ctx
)
if data.get('clientExtensions') is not None:
data['clientExtensions'] = \
ctx.transaction.ClientExtensions.from_dict(
data['clientExtensions'], ctx
)
if data.get('takeProfitOnFill') is not None:
data['takeProfitOnFill'] = \
ctx.transaction.TakeProfitDetails.from_dict(
data['takeProfitOnFill'], ctx
)
if data.get('stopLossOnFill') is not None:
data['stopLossOnFill'] = \
ctx.transaction.StopLossDetails.from_dict(
data['stopLossOnFill'], ctx
)
if data.get('trailingStopLossOnFill') is not None:
data['trailingStopLossOnFill'] = \
ctx.transaction.TrailingStopLossDetails.from_dict(
data['trailingStopLossOnFill'], ctx
)
if data.get('tradeClientExtensions') is not None:
data['tradeClientExtensions'] = \
ctx.transaction.ClientExtensions.from_dict(
data['tradeClientExtensions'], ctx
)
return MarketOrderTransaction(**data) | 0.00066 |
def dict(self):
"""A dict that holds key/values for all of the properties in the
object.
:return:
"""
d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs
if p.key not in ('contents', 'dataset')}
d['modified_datetime'] = self.modified_datetime
d['modified_ago'] = self.modified_ago
return d | 0.005236 |
def standardize_cell(cell,
to_primitive=False,
no_idealize=False,
symprec=1e-5,
angle_tolerance=-1.0):
"""Return standardized cell.
Args:
cell, symprec, angle_tolerance:
See the docstring of get_symmetry.
to_primitive:
bool: If True, the standardized primitive cell is created.
no_idealize:
bool: If True, it is disabled to idealize lengths and angles of
basis vectors and positions of atoms according to crystal
symmetry.
Return:
The standardized unit cell or primitive cell is returned by a tuple of
(lattice, positions, numbers).
If it fails, None is returned.
"""
_set_no_error()
lattice, _positions, _numbers, _ = _expand_cell(cell)
if lattice is None:
return None
# Atomic positions have to be specified by scaled positions for spglib.
num_atom = len(_positions)
positions = np.zeros((num_atom * 4, 3), dtype='double', order='C')
positions[:num_atom] = _positions
numbers = np.zeros(num_atom * 4, dtype='intc')
numbers[:num_atom] = _numbers
num_atom_std = spg.standardize_cell(lattice,
positions,
numbers,
num_atom,
to_primitive * 1,
no_idealize * 1,
symprec,
angle_tolerance)
_set_error_message()
if num_atom_std > 0:
return (np.array(lattice.T, dtype='double', order='C'),
np.array(positions[:num_atom_std], dtype='double', order='C'),
np.array(numbers[:num_atom_std], dtype='intc'))
else:
return None | 0.00052 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.