text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def detectUbuntuTablet(self):
"""Return detection of an Ubuntu Mobile OS tablet
Detects a tablet running the Ubuntu Mobile OS.
"""
if UAgentInfo.deviceUbuntu in self.__userAgent \
and UAgentInfo.deviceTablet in self.__userAgent:
return True
return False | 0.006289 |
def get_text(self, index=None):
"""
Gets text from a given index. If index=None, returns the current value
self.get_text(self.get_value())
"""
if index==None:
return self.get_text(self.get_value())
else:
return str(self._widget.itemText(index)) | 0.012658 |
def get_outputs_from_cm(index, cm):
"""Return indices of the outputs of node with the given index."""
return tuple(i for i in range(cm.shape[0]) if cm[index][i]) | 0.005917 |
def cycle(self):
"""
Request one batch of events from Skype, calling :meth:`onEvent` with each event in turn.
Subclasses may override this method to alter loop functionality.
"""
try:
events = self.getEvents()
except requests.ConnectionError:
return
for event in events:
self.onEvent(event)
if self.autoAck:
event.ack() | 0.006818 |
def _partition_group(self, group):
"""
Args:
group (list): A group of states
Return:
tuple: A set of two groups
"""
for (group1, group2, distinguish_string) in self.bookeeping:
if group & group1 != set() and not group.issubset(group1):
new_g1 = group & group1
new_g2 = group - group1
return (new_g1, new_g2, distinguish_string)
if group & group2 != set() and not group.issubset(group2):
new_g1 = group & group2
new_g2 = group - group2
return (new_g1, new_g2, distinguish_string)
assert False, "Unmatched group partition" | 0.002805 |
async def service_info(self, name):
"""Pull descriptive info of a service by name.
Information returned includes the service's user friendly
name and whether it was preregistered or added dynamically.
Returns:
dict: A dictionary of service information with the following keys
set:
long_name (string): The user friendly name of the service
preregistered (bool): Whether the service was explicitly
called out as a preregistered service.
"""
return await self.send_command(OPERATIONS.CMD_QUERY_INFO, {'name': name},
MESSAGES.QueryInfoResponse, timeout=5.0) | 0.004149 |
def _misalign_split(self,alns):
"""Requires alignment strings have been set so for each exon we have
query, target and query_quality
_has_quality will specify whether or not the quality is meaningful
"""
total = []
z = 0
for x in alns:
z += 1
exon_num = z
if self._alignment.strand == '-':
exon_num = (len(alns)-z)+1
buffer = {'query':x['query'][0],'target':x['target'][0],'query_quality':x['query_quality'][0],'exon':exon_num}
if buffer['query'] == '-': buffer['nt'] = buffer['target']
elif buffer['target'] == '-': buffer['nt'] = buffer['query']
elif buffer['query'] == buffer['target']: buffer['nt'] = buffer['query']
elif buffer['query'] != buffer['target']: buffer['nt'] = '*'
else:
sys.stderr.write("WARNING unkonwn case\n")
for i in range(1,len(x['query'])):
qchar = x['query'][i]
tchar = x['target'][i]
qualchar = x['query_quality'][i]
if qchar != tchar and (qchar != '-' and tchar != '-'):
#classic mismatch
#print 'mismatch'
#print buffer
total.append(buffer)
buffer = {'query':qchar,'target':tchar,'query_quality':qualchar,'exon':exon_num}
buffer['nt'] = '*'
elif qchar == buffer['nt'] or tchar == buffer['nt']:
# its a homopolymer match
buffer['query'] += qchar
buffer['target'] += tchar
buffer['query_quality'] += qualchar
#print 'homopoly'
else:
#print 'new thing'
#print buffer
total.append(buffer)
buffer = {'query':qchar,'target':tchar,'query_quality':qualchar,'exon':exon_num}
if qchar == '-': buffer['nt'] = tchar
else: buffer['nt'] = qchar
total.append(buffer)
result = [AlignmentErrors.HPAGroup(self,y) for y in total]
return result | 0.037566 |
def p_function_call_variable(p):
'function_call : variable_without_objects LPAREN function_call_parameter_list RPAREN'
p[0] = ast.FunctionCall(p[1], p[3], lineno=p.lineno(2)) | 0.010989 |
def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href')) | 0.004843 |
def create_SpikeGeneratorGroup(self,time_label=0,index_label=1,reorder_indices=False,index_offset=True):
"""
Creates a brian 2 create_SpikeGeneratorGroup object that contains the spikes in this container.
time_label: Name or number of the label that contains the spike times (default: 0 / first column)
index_label: Name or number of the label that contains the cell indices (default: 1 / the second column)
reorder_indices: If the cell indices do not matter, the SpikeGeneratorGroup can be created with only as many unique neurons as necessary (default: False / The indices are preserved)
index_offset: If set to a number, this will be subtracted from every index (default: True)
If set to True, the `.min` of the label dimension will be subtracted.
If set to False, nothing will be subtracted.
"""
import brian2
spike_times = self.spike_times.convert(time_label,'s')[time_label]*brian2.second
indices = [0] * len(spike_times)
if len(self.spike_times.find_labels(index_label)):
indices = self.spike_times[index_label]
if index_offset is not False:
if index_offset is True:
indices = indices - self.spike_times.get_label(index_label).min
else:
indices = indices - index_offset
N = np.max(indices)
else:
N = self.spike_times.get_label(index_label).max
if reorder_indices:
indices_levels = np.sort(np.unique(indices)).tolist()
indices = np.array([indices_levels.index(i) for i in indices])
N = len(indices_levels)
return brian2.SpikeGeneratorGroup(N+1,indices = indices,
times = spike_times) | 0.011041 |
def remove_alert_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Remove a tag from a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_alert_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.remove_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data | 0.002075 |
def get_installed_extension(name,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
runas=None):
'''
Get info about an installed postgresql extension
CLI Example:
.. code-block:: bash
salt '*' postgres.get_installed_extension plpgsql
'''
return installed_extensions(user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas).get(name, None) | 0.001316 |
def _get_max_subplot_ids(fig):
"""
Given an input figure, return a dict containing the max subplot number
for each subplot type in the figure
Parameters
----------
fig: dict
A plotly figure dict
Returns
-------
dict
A dict from subplot type strings to integers indicating the largest
subplot number in the figure of that subplot type
"""
max_subplot_ids = {subplot_type: 0
for subplot_type in _subplot_types}
max_subplot_ids['xaxis'] = 0
max_subplot_ids['yaxis'] = 0
for trace in fig.get('data', []):
trace_type = trace.get('type', 'scatter')
subplot_types = _trace_to_subplot.get(trace_type, [])
for subplot_type in subplot_types:
subplot_prop_name = _get_subplot_prop_name(subplot_type)
subplot_val_prefix = _get_subplot_val_prefix(subplot_type)
subplot_val = trace.get(subplot_prop_name, subplot_val_prefix)
# extract trailing number (if any)
subplot_number = _get_subplot_number(subplot_val)
max_subplot_ids[subplot_type] = max(
max_subplot_ids[subplot_type], subplot_number)
return max_subplot_ids | 0.000814 |
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1 | 0.00317 |
def from_xdr(cls, xdr):
"""Create an :class:`Asset` object from its base64 encoded XDR
representation.
:param bytes xdr: The base64 encoded XDR Asset object.
:return: A new :class:`Asset` object from its encoded XDR
representation.
"""
xdr_decoded = base64.b64decode(xdr)
asset = Xdr.StellarXDRUnpacker(xdr_decoded)
asset_xdr_object = asset.unpack_Asset()
asset = Asset.from_xdr_object(asset_xdr_object)
return asset | 0.003914 |
def _send(self, packet):
"""Add packet to send queue."""
fut = self.loop.create_future()
self.waiters.append((fut, packet))
if self.waiters and self.in_transaction is False:
self.protocol.send_packet()
return fut | 0.007576 |
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg | 0.00565 |
def _default_ns_prefix(nsmap):
"""
XML doc may have several prefix:namespace_url pairs, can also specify
a namespace_url as default, tags in that namespace don't need a prefix
NOTE:
we rely on default namespace also present in prefixed form, I'm not sure if
this is an XML certainty or a quirk of the eBay WSDLs
in our case the WSDL contains:
<wsdl:documentation>
<Version>1.0.0</Version>
</wsdl:documentation>
...but our query needs to give a prefix to the path of `Version` so we need
to determine the default namespace of the doc, find the matching prefix and
return it
"""
if None in nsmap:
default_url = nsmap[None]
prefix = None
for key, val in nsmap.iteritems():
if val == default_url and key is not None:
prefix = key
break
else:
raise ValueError(
"Default namespace {url} not found as a prefix".format(
url=default_url
)
)
return prefix
raise ValueError("No default namespace found in map") | 0.000877 |
def parse_raxml(handle):
"""Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned.
"""
s = ''.join(handle.readlines())
result = {}
try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s)
try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s)
result['empirical_frequencies'] = (
result['datatype'] != 'AA' or
re.search('empirical base frequencies', s, re.IGNORECASE) is not None)
try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s)
rates = {}
if result['datatype'] != 'AA':
try_set_fields(rates,
(r"rates\[0\] ac ag at cg ct gt: "
r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) "
r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"),
s, hook=float)
try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float)
if len(rates) > 0:
result['subs_rates'] = rates
result['gamma'] = {'n_cats': 4}
try_set_fields(result['gamma'],
r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float)
result['ras_model'] = 'gamma'
return result | 0.0006 |
def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):
""" basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
config (dict): config parameters
"""
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
_data.append(
{
"x": df[x].values.tolist(),
"y": df[y].values.tolist(),
"mode": mode
}
)
return {
"data": _data,
"layout": layout,
"config": config
} | 0.001919 |
def last_event(request, slug):
"Displays a list of all services and their current status."
try:
service = Service.objects.get(slug=slug)
except Service.DoesNotExist:
return HttpResponseRedirect(reverse('overseer:index'))
try:
evt = service.event_set.order_by('-date_created')[0]
except IndexError:
return HttpResponseRedirect(service.get_absolute_url())
return event(request, evt.pk) | 0.008811 |
def close_others(self):
"""
Closes every editors tabs except the current one.
"""
current_widget = self.currentWidget()
self._try_close_dirty_tabs(exept=current_widget)
i = 0
while self.count() > 1:
widget = self.widget(i)
if widget != current_widget:
self.removeTab(i)
else:
i = 1 | 0.004938 |
def consume_length_prefix(rlp, start):
"""Read a length prefix from an RLP string.
:param rlp: the rlp byte string to read from
:param start: the position at which to start reading
:returns: a tuple ``(prefix, type, length, end)``, where ``type`` is either ``str``
or ``list`` depending on the type of the following payload,
``length`` is the length of the payload in bytes, and ``end`` is
the position of the first payload byte in the rlp string
"""
b0 = rlp[start]
if b0 < 128: # single byte
return (b'', bytes, 1, start)
elif b0 < SHORT_STRING: # short string
if b0 - 128 == 1 and rlp[start + 1] < 128:
raise DecodingError('Encoded as short string although single byte was possible', rlp)
return (rlp[start:start + 1], bytes, b0 - 128, start + 1)
elif b0 < 192: # long string
ll = b0 - 183 # - (128 + 56 - 1)
if rlp[start + 1:start + 2] == b'\x00':
raise DecodingError('Length starts with zero bytes', rlp)
len_prefix = rlp[start + 1:start + 1 + ll]
l = big_endian_to_int(len_prefix) # noqa: E741
if l < 56:
raise DecodingError('Long string prefix used for short string', rlp)
return (rlp[start:start + 1] + len_prefix, bytes, l, start + 1 + ll)
elif b0 < 192 + 56: # short list
return (rlp[start:start + 1], list, b0 - 192, start + 1)
else: # long list
ll = b0 - 192 - 56 + 1
if rlp[start + 1:start + 2] == b'\x00':
raise DecodingError('Length starts with zero bytes', rlp)
len_prefix = rlp[start + 1:start + 1 + ll]
l = big_endian_to_int(len_prefix) # noqa: E741
if l < 56:
raise DecodingError('Long list prefix used for short list', rlp)
return (rlp[start:start + 1] + len_prefix, list, l, start + 1 + ll) | 0.003168 |
def edges(self, nodes=None):
"""Iterator for node values.
Yield:
node: the node.
"""
for source_node, dest_node, edge_data in self._multi_graph.edges(nodes, data=True):
yield source_node, dest_node, edge_data | 0.011321 |
def connected_grid_lines(network, busids):
""" Get grid lines connected to given buses.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
busids : list
List containing bus-ids.
Returns
-------
:class:`pandas.DataFrame
PyPSA lines.
"""
mask = network.lines.bus1.isin(busids) |\
network.lines.bus0.isin(busids)
return network.lines[mask] | 0.002237 |
def get_file_listing_sha(listing_paths: Iterable) -> str:
"""Return sha256 string for group of FTP listings."""
return sha256(''.join(sorted(listing_paths)).encode('utf-8')).hexdigest() | 0.005155 |
def s2dctmat(nfilt,ncep,freqstep):
"""Return the 'legacy' not-quite-DCT matrix used by Sphinx"""
melcos = numpy.empty((ncep, nfilt), 'double')
for i in range(0,ncep):
freq = numpy.pi * float(i) / nfilt
melcos[i] = numpy.cos(freq * numpy.arange(0.5, float(nfilt)+0.5, 1.0, 'double'))
melcos[:,0] = melcos[:,0] * 0.5
return melcos | 0.019231 |
def clear(name):
'''
Clear the namespace from the register
USAGE:
.. code-block:: yaml
clearns:
reg.clear:
- name: myregister
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if name in __reg__:
__reg__[name].clear()
return ret | 0.002817 |
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
# Items should be an array of type CoinState, not of ints!
corrected_items = list(map(lambda i: CoinState(i), self.Items))
return super(UnspentCoinState, self).Size() + GetVarSize(corrected_items) | 0.008523 |
def get_end_time_str(self):
"""
:return:
|attr_end_datetime| as a |str| formatted with
|attr_end_time_format|.
Return |NaT| if invalid datetime or format.
:rtype: str
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
print(time_range.get_end_time_str())
time_range.end_time_format = "%Y/%m/%d %H:%M:%S"
print(time_range.get_end_time_str())
:Output:
.. parsed-literal::
2015-03-22T10:10:00+0900
2015/03/22 10:10:00
"""
try:
return self.end_datetime.strftime(self.end_time_format)
except AttributeError:
return self.NOT_A_TIME_STR | 0.003348 |
def chunked(records, single=True):
""" Memory and performance friendly method to iterate over a potentially
large number of records. Yields either a whole chunk or a single record
at the time. Don't nest calls to this method. """
if version_info[0] > 10:
invalidate = records.env.cache.invalidate
elif version_info[0] > 7:
invalidate = records.env.invalidate_all
else:
raise Exception('Not supported Odoo version for this method.')
size = core.models.PREFETCH_MAX
model = records._name
ids = records.with_context(prefetch_fields=False).ids
for i in range(0, len(ids), size):
invalidate()
chunk = records.env[model].browse(ids[i:i + size])
if single:
for record in chunk:
yield record
continue
yield chunk | 0.00119 |
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary() | 0.004608 |
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered | 0.002481 |
def backup(mongo_username, mongo_password, local_backup_directory_path, database=None,
attached_directory_path=None, custom_prefix="backup",
mongo_backup_directory_path="/tmp/mongo_dump",
s3_bucket=None, s3_access_key_id=None, s3_secret_key=None,
purge_local=None, purge_attached=None, cleanup=True, silent=False):
"""
Runs a backup operation to At Least a local directory.
You must provide mongodb credentials along with a a directory for a dump
operation and a directory to contain your compressed backup.
backup_prefix: optionally provide a prefix to be prepended to your backups,
by default the prefix is "backup".
database: optionally provide the name of one specific database to back up
(instead of backing up all databases on the MongoDB server)
attached_directory_path: makes a second copy of the backup to a different
directory. This directory is checked before other operations and
will raise an error if it cannot be found.
s3_bucket: if you have an Amazon Web Services S3 account you can
automatically upload the backup to an S3 Bucket you provide;
requires s3_access_key_id and s3_secret key to be passed as well
s3_access_key_id, s3_secret_key: credentials for your AWS account.
purge_local: An integer value, the number of days of backups to purge
from local_backup_directory_path after operations have completed.
purge_attached: An integer value, the number of days of backups to purge
from attached_directory_path after operations have completed.
cleanup: set to False to leave the mongo_backup_directory_path after operations
have completed.
"""
if attached_directory_path:
if not path.exists(attached_directory_path):
raise Exception("ERROR. Would have to create %s for your attached storage, make sure that file paths already exist and re-run"
% (attached_directory_path))
# Dump mongo, tarbz, copy to attached storage, upload to s3, purge, clean.
full_file_name_path = local_backup_directory_path + custom_prefix + time_string()
mongodump(mongo_username, mongo_password, mongo_backup_directory_path, database, silent=silent)
local_backup_file = tarbz(mongo_backup_directory_path, full_file_name_path, silent=silent)
if attached_directory_path:
copy(local_backup_file, attached_directory_path + local_backup_file.split("/")[-1])
if s3_bucket:
s3_upload(local_backup_file, s3_bucket, s3_access_key_id, s3_secret_key)
if purge_local:
purge_date = (datetime.utcnow().replace(second=0, microsecond=0) -
timedelta(days=purge_local))
purge_old_files(purge_date, local_backup_directory_path, custom_prefix=custom_prefix)
if purge_attached and attached_directory_path:
purge_date = (datetime.utcnow().replace(second=0, microsecond=0) -
timedelta(days=purge_attached))
purge_old_files(purge_date, attached_directory_path, custom_prefix=custom_prefix)
if cleanup:
rmtree(mongo_backup_directory_path) | 0.007787 |
def opensignals_hierarchy(root=None, update=False, clone=False):
"""
Function that generates the OpenSignalsTools Notebooks File Hierarchy programatically.
----------
Parameters
----------
root : None or str
The file path where the OpenSignalsTools Environment will be stored.
update : bool
If True the old files will be replaced by the new ones.
clone : bool
If True then all the available Notebooks will be stored in the users computer.
If False only the folder hierarchy of OpenSignalsTools will be generated, giving to the
user a blank template for creating his own Notebook Environment.
Returns
-------
out : str
The root file path of OpenSignalsTools Environment is returned.
"""
if root is None:
root = os.getcwd()
categories = list(NOTEBOOK_KEYS.keys())
# ============================ Creation of the main directory ==================================
current_dir = root + "\\opensignalstools_environment"
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
# ================== Copy of 'images' 'styles' and 'signal_samples' folders ====================
for var in ["images", "styles", "signal_samples"]:
if not os.path.isdir(root + "\\opensignalstools_environment\\" + var):
src = os.getcwd() + "\\" + var
destination = current_dir + "\\" + var
shutil.copytree(src, destination)
elif update is True:
shutil.rmtree(root + "\\opensignalstools_environment\\" + var)
src = os.getcwd() + "\\" + var
destination = current_dir + "\\" + var
shutil.copytree(src, destination)
# =========================== Generation of 'Categories' folder ================================
current_dir = root + "\\opensignalstools_environment\\Categories"
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
for category in categories:
if not os.path.isdir(current_dir + "\\" + category):
os.makedirs(current_dir + "\\" + category)
if clone is True:
# Fill each folder inside "Categories" directory with the respective notebooks.
# Each notebook will be created by a specific function.
dir_path = root + "\\notebook_code"
list_of_code_dirs = os.listdir(dir_path)
for folder in list_of_code_dirs:
folder_path = root + "\\notebook_code\\" + folder
if folder != "MainFiles" and folder != "__pycache__":
list_of_code_files = os.listdir(folder_path)
for file in list_of_code_files:
if file != "__pycache__":
spec = importlib.util.spec_from_file_location(file, folder_path +
"\\" + file)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
foo.run(root + "\\opensignalstools_environment")
# Generation of opensignalstools environment main files.
main_page = notebook("Main_Files_By_Category")
main_page.write_to_file(root + "\\opensignalstools_environment", "opensignalstools",
footer=False)
by_difficulty = notebook("Main_Files_By_Difficulty", "Notebooks Grouped by Difficulty", notebook_description=DESCRIPTION_GROUP_BY)
by_difficulty.write_to_file(root + "\\opensignalstools_environment", "by_diff",
footer=False)
by_tags = notebook("Main_Files_By_Tag", "Notebooks Grouped by Tag Values",
notebook_description=DESCRIPTION_GROUP_BY)
by_tags.write_to_file(root + "\\opensignalstools_environment", "by_tag",
footer=False)
by_signal_type = notebook("Main_Files_By_Signal_Type", "Notebooks Grouped by Signal Type",
notebook_description=DESCRIPTION_GROUP_BY)
by_signal_type.write_to_file(root + "\\opensignalstools_environment",
"by_signal_type", footer=False)
signal_samples = notebook("Main_Files_Signal_Samples", "Signal Samples Library",
notebook_description=DESCRIPTION_SIGNAL_SAMPLES)
signal_samples.write_to_file(root + "\\opensignalstools_environment",
"signal_samples", footer=False)
return root + "\\opensignalstools_environment" | 0.003916 |
def send_update_port_statuses(self, context, port_ids, status):
"""Call the pluging to update the port status which updates the DB.
:param context: contains user information
:param port_ids: list of ids of the ports associated with the status
:param status: value of the status for the given port list (port_ids)
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_port_statuses_cfg',
port_ids=port_ids, status=status) | 0.003766 |
def config_get(pattern='*', host=None, port=None, db=None, password=None):
'''
Get redis server configuration values
CLI Example:
.. code-block:: bash
salt '*' redis.config_get
salt '*' redis.config_get port
'''
server = _connect(host, port, db, password)
return server.config_get(pattern) | 0.002976 |
def _init_logging(anteater_log):
""" Setup root logger for package """
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
# create the directory if it does not exist
path = os.path.dirname(anteater_log)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
handler = logging.FileHandler(anteater_log)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
del logging.root.handlers[:]
logging.root.addHandler(ch)
logging.root.addHandler(handler) | 0.001323 |
def path_complete(self, text: str, line: str, begidx: int, endidx: int,
path_filter: Optional[Callable[[str], bool]] = None) -> List[str]:
"""Performs completion of local file system paths
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param path_filter: optional filter function that determines if a path belongs in the results
this function takes a path as its argument and returns True if the path should
be kept in the results
:return: a list of possible tab completions
"""
# Used to complete ~ and ~user strings
def complete_users() -> List[str]:
# We are returning ~user strings that resolve to directories,
# so don't append a space or quote in the case of a single result.
self.allow_appended_space = False
self.allow_closing_quote = False
users = []
# Windows lacks the pwd module so we can't get a list of users.
# Instead we will return a result once the user enters text that
# resolves to an existing home directory.
if sys.platform.startswith('win'):
expanded_path = os.path.expanduser(text)
if os.path.isdir(expanded_path):
user = text
if add_trailing_sep_if_dir:
user += os.path.sep
users.append(user)
else:
import pwd
# Iterate through a list of users from the password database
for cur_pw in pwd.getpwall():
# Check if the user has an existing home dir
if os.path.isdir(cur_pw.pw_dir):
# Add a ~ to the user to match against text
cur_user = '~' + cur_pw.pw_name
if cur_user.startswith(text):
if add_trailing_sep_if_dir:
cur_user += os.path.sep
users.append(cur_user)
return users
# Determine if a trailing separator should be appended to directory completions
add_trailing_sep_if_dir = False
if endidx == len(line) or (endidx < len(line) and line[endidx] != os.path.sep):
add_trailing_sep_if_dir = True
# Used to replace cwd in the final results
cwd = os.getcwd()
cwd_added = False
# Used to replace expanded user path in final result
orig_tilde_path = ''
expanded_tilde_path = ''
# If the search text is blank, then search in the CWD for *
if not text:
search_str = os.path.join(os.getcwd(), '*')
cwd_added = True
else:
# Purposely don't match any path containing wildcards
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in text:
return []
# Start the search string
search_str = text + '*'
# Handle tilde expansion and completion
if text.startswith('~'):
sep_index = text.find(os.path.sep, 1)
# If there is no slash, then the user is still completing the user after the tilde
if sep_index == -1:
return complete_users()
# Otherwise expand the user dir
else:
search_str = os.path.expanduser(search_str)
# Get what we need to restore the original tilde path later
orig_tilde_path = text[:sep_index]
expanded_tilde_path = os.path.expanduser(orig_tilde_path)
# If the search text does not have a directory, then use the cwd
elif not os.path.dirname(text):
search_str = os.path.join(os.getcwd(), search_str)
cwd_added = True
# Set this to True for proper quoting of paths with spaces
self.matches_delimited = True
# Find all matching path completions
matches = glob.glob(search_str)
# Filter out results that don't belong
if path_filter is not None:
matches = [c for c in matches if path_filter(c)]
# Don't append a space or closing quote to directory
if len(matches) == 1 and os.path.isdir(matches[0]):
self.allow_appended_space = False
self.allow_closing_quote = False
# Sort the matches before any trailing slashes are added
matches.sort(key=self.matches_sort_key)
self.matches_sorted = True
# Build display_matches and add a slash to directories
for index, cur_match in enumerate(matches):
# Display only the basename of this path in the tab-completion suggestions
self.display_matches.append(os.path.basename(cur_match))
# Add a separator after directories if the next character isn't already a separator
if os.path.isdir(cur_match) and add_trailing_sep_if_dir:
matches[index] += os.path.sep
self.display_matches[index] += os.path.sep
# Remove cwd if it was added to match the text readline expects
if cwd_added:
if cwd == os.path.sep:
to_replace = cwd
else:
to_replace = cwd + os.path.sep
matches = [cur_path.replace(to_replace, '', 1) for cur_path in matches]
# Restore the tilde string if we expanded one to match the text readline expects
if expanded_tilde_path:
matches = [cur_path.replace(expanded_tilde_path, orig_tilde_path, 1) for cur_path in matches]
return matches | 0.00248 |
def show_keyword_help(cur, arg):
"""
Call the built-in "show <command>", to display help for an SQL keyword.
:param cur: cursor
:param arg: string
:return: list
"""
keyword = arg.strip('"').strip("'")
query = "help '{0}'".format(keyword)
log.debug(query)
cur.execute(query)
if cur.description and cur.rowcount > 0:
headers = [x[0] for x in cur.description]
return [(None, cur.fetchall(), headers, '')]
else:
return [(None, None, None, 'No help found for {0}.'.format(keyword))] | 0.001821 |
def sedfile(fpath, regexpr, repl, force=False, verbose=True, veryverbose=False):
"""
Executes sed on a specific file
Args:
fpath (str): file path string
regexpr (str):
repl (str):
force (bool): (default = False)
verbose (bool): verbosity flag(default = True)
veryverbose (bool): (default = False)
Returns:
list: changed_lines
CommandLine:
python -m utool.util_path --exec-sedfile --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> fpath = ut.get_modpath(ut.util_path)
>>> regexpr = 'sedfile'
>>> repl = 'saidfile'
>>> force = False
>>> verbose = True
>>> veryverbose = False
>>> changed_lines = sedfile(fpath, regexpr, repl, force, verbose, veryverbose)
>>> result = ('changed_lines = %s' % (ut.repr3(changed_lines),))
>>> print(result)
"""
# TODO: move to util_edit
path, name = split(fpath)
new_file_lines = []
if veryverbose:
print('[sedfile] fpath=%r' % fpath)
print('[sedfile] regexpr=%r' % regexpr)
print('[sedfile] repl=%r' % repl)
print('[sedfile] force=%r' % force)
import utool as ut
file_lines = ut.readfrom(fpath, aslines=True, verbose=False)
# with open(fpath, 'r') as file:
# import utool
# with utool.embed_on_exception_context:
# file_lines = file.readlines()
# Search each line for the desired regexpr
new_file_lines = [re.sub(regexpr, repl, line) for line in file_lines]
changed_lines = [(newline, line)
for newline, line in zip(new_file_lines, file_lines)
if newline != line]
n_changed = len(changed_lines)
if n_changed > 0:
rel_fpath = relpath(fpath, os.getcwd())
print(' * %s changed %d lines in %r ' %
(['(dry-run)', '(real-run)'][force], n_changed, rel_fpath))
print(' * --------------------')
import utool as ut
new_file_lines = ut.lmap(ut.ensure_unicode, new_file_lines)
new_file = ''.join(new_file_lines)
#print(new_file.replace('\n','\n))
if verbose:
if True:
import utool as ut
old_file = ut.ensure_unicode(
''.join(ut.lmap(ut.ensure_unicode, file_lines)))
ut.print_difftext(old_file, new_file)
else:
changed_new, changed_old = zip(*changed_lines)
prefixold = ' * old (%d, %r): \n | ' % (n_changed, name)
prefixnew = ' * new (%d, %r): \n | ' % (n_changed, name)
print(prefixold + (' | '.join(changed_old)).strip('\n'))
print(' * ____________________')
print(prefixnew + (' | '.join(changed_new)).strip('\n'))
print(' * --------------------')
print(' * =====================================================')
# Write back to file
if force:
print(' ! WRITING CHANGES')
ut.writeto(fpath, new_file)
# with open(fpath, 'w') as file:
# file.write(new_file.encode('utf8'))
else:
print(' dry run')
return changed_lines
#elif verbose:
# print('Nothing changed')
return None | 0.001762 |
def get_left_ngrams(mention, window=3, attrib="words", n_min=1, n_max=1, lower=True):
"""Get the ngrams within a window to the *left* from the sentence Context.
For higher-arity Candidates, defaults to the *first* argument.
:param mention: The Mention to evaluate. If a candidate is given, default
to its first Mention.
:param window: The number of tokens to the left of the first argument to
return.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
"""
span = _to_span(mention)
i = span.get_word_start_index()
for ngram in tokens_to_ngrams(
getattr(span.sentence, attrib)[max(0, i - window) : i],
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram | 0.002962 |
def density(self, r, rho0, Rs):
"""
computes the density
:param x:
:param y:
:param rho0:
:param a:
:param s:
:return:
"""
rho = rho0 / (r/Rs * (1 + (r/Rs))**3)
return rho | 0.007722 |
def read_file_offset(self, id, offset, limit, path="/"):
""" Read contents of a file in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-cat.html
arguments:
- id: (str) allocation_id required
- offset: (int) required
- limit: (int) required
- path: (str) optional
returns: (str) text
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.BadRequestNomadException
"""
params = {
"path": path,
"offset": offset,
"limit": limit
}
return self.request(id, params=params, method="get").text | 0.002717 |
def _loadable_get_(name, self):
"Used to lazily-evaluate & memoize an attribute."
func = getattr(self._attr_func_, name)
ret = func()
setattr(self._attr_data_, name, ret)
setattr(
type(self),
name,
property(
functools.partial(self._simple_get_, name)
)
)
delattr(self._attr_func_, name)
return ret | 0.004695 |
def overlap(self, other):
"""Determine whether this range overlaps with another."""
if self._start < other.end and self._end > other.start:
return True
return False | 0.01 |
def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
"""
Call the stripe API's create operation for this model.
:param api_key: The api key to use for this request. Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
"""
return cls.stripe_class.create(api_key=api_key, **kwargs) | 0.026866 |
def calculate_boundingbox(lng, lat, miles):
"""
Given a latitude, longitude and a distance in miles, calculate
the co-ordinates of the bounding box 2*miles on long each side with the
given co-ordinates at the center.
"""
latChange = change_in_latitude(miles)
latSouth = lat - latChange
latNorth = lat + latChange
lngChange = change_in_longitude(lat, miles)
lngWest = lng + lngChange
lngEast = lng - lngChange
return (lngWest, latSouth, lngEast, latNorth) | 0.001988 |
def build_fred(self):
'''Build a flat recurrent encoder-decoder dialogue model'''
encoder = Encoder(data=self.data, config=self.model_config)
decoder = Decoder(data=self.data, config=self.model_config, encoder=encoder)
return EncoderDecoder(config=self.model_config, encoder=encoder, decoder=decoder) | 0.011976 |
def validate_is_callable_or_none(option, value):
"""Validates that 'value' is a callable."""
if value is None:
return value
if not callable(value):
raise ValueError("%s must be a callable" % (option,))
return value | 0.004065 |
def drop_dims(self, drop_dims):
"""Drop dimensions and associated variables from this dataset.
Parameters
----------
drop_dims : str or list
Dimension or dimensions to drop.
Returns
-------
obj : Dataset
The dataset without the given dimensions (or any variables
containing those dimensions)
"""
if utils.is_scalar(drop_dims):
drop_dims = [drop_dims]
missing_dimensions = [d for d in drop_dims if d not in self.dims]
if missing_dimensions:
raise ValueError('Dataset does not contain the dimensions: %s'
% missing_dimensions)
drop_vars = set(k for k, v in self._variables.items()
for d in v.dims if d in drop_dims)
variables = OrderedDict((k, v) for k, v in self._variables.items()
if k not in drop_vars)
coord_names = set(k for k in self._coord_names if k in variables)
return self._replace_with_new_dims(variables, coord_names) | 0.00182 |
def close_trackbacks(self, request, queryset):
"""
Close the trackbacks for selected entries.
"""
queryset.update(trackback_enabled=False)
self.message_user(
request, _('Trackbacks are now closed for selected entries.')) | 0.007353 |
def serialize_query(func):
""" Ensure any SQLExpression instances are serialized"""
@functools.wraps(func)
def wrapper(self, query, *args, **kwargs):
if hasattr(query, 'serialize'):
query = query.serialize()
assert isinstance(query, basestring), 'Expected query to be string'
if self.debug:
print('SQL:', query)
return func(self, query, *args, **kwargs)
return wrapper | 0.004149 |
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut | 0.004264 |
def document_delete(index, doc_type, id, hosts=None, profile=None):
'''
Delete a document from an index
index
Index name where the document resides
doc_type
Type of the document
id
Document identifier
CLI example::
salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
return es.delete(index=index, doc_type=doc_type, id=id)
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot delete document {0} in index {1}, server returned code {2} with message {3}".format(id, index, e.status_code, e.error)) | 0.003953 |
def _set_slave_enabled(self, dpid, port, enabled):
"""set whether a slave i/f at some port of some datapath is
enable or not."""
slave = self._get_slave(dpid, port)
if slave:
slave['enabled'] = enabled | 0.008163 |
def read_header(self):
"""
Reads VPK file header from the file
"""
with fopen(self.vpk_path, 'rb') as f:
(self.signature,
self.version,
self.tree_length
) = struct.unpack("3I", f.read(3*4))
# original format - headerless
if self.signature != 0x55aa1234:
raise ValueError("File is not VPK (invalid magic)")
# v1
elif self.version == 1:
self.header_length += 4*3
# v2 with extended header
#
# according to http://forum.xentax.com/viewtopic.php?f=10&t=11208
# struct VPKDirHeader_t
# {
# int32 m_nHeaderMarker;
# int32 m_nVersion;
# int32 m_nDirectorySize;
# int32 m_nEmbeddedChunkSize;
# int32 m_nChunkHashesSize;
# int32 m_nSelfHashesSize;
# int32 m_nSignatureSize;
# }
elif self.version == 2:
(self.embed_chunk_length,
self.chunk_hashes_length,
self.self_hashes_length,
self.signature_length
) = struct.unpack("4I", f.read(4*4))
self.header_length += 4*7
f.seek(self.tree_length + self.embed_chunk_length + self.chunk_hashes_length, 1)
assert self.self_hashes_length == 48, "Self hashes section size mismatch"
(self.tree_checksum,
self.chunk_hashes_checksum,
self.file_checksum,
) = struct.unpack("16s16s16s", f.read(16*3))
else:
raise ValueError("Invalid header, or unsupported version") | 0.002259 |
def match(self, path):
"""Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path.
"""
match = self._re.search(path)
if match is None:
return None
kwargs_indexes = match.re.groupindex.values()
args_indexes = [i for i in range(1, match.re.groups + 1)
if i not in kwargs_indexes]
args = [match.group(i) for i in args_indexes]
kwargs = {}
for name, index in match.re.groupindex.items():
kwargs[name] = match.group(index)
return self._callback, args, kwargs | 0.003356 |
def workflow_remove_tags(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/removeTags API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags
"""
return DXHTTPRequest('/%s/removeTags' % object_id, input_params, always_retry=always_retry, **kwargs) | 0.010526 |
def extendMarkdown(self, md, md_globals):
""" Add an instance of FigcaptionProcessor to BlockParser. """
# def_list = 'def_list' in md.registeredExtensions
md.parser.blockprocessors.add(
'figcaption', FigcaptionProcessor(md.parser), '<ulist') | 0.007168 |
def execution(execution: Dict[str, Any], filler: Dict[str, Any]) -> Dict[str, Any]:
"""
For VM tests, specify the code that is being run as well as the current state of
the EVM. State tests don't support this object. The parameter is a dictionary specifying some
or all of the following keys:
+--------------------+------------------------------------------------------------+
| key | description |
+====================+============================================================+
| ``"address"`` | the address of the account executing the code |
+--------------------+------------------------------------------------------------+
| ``"caller"`` | the caller address |
+--------------------+------------------------------------------------------------+
| ``"origin"`` | the origin address (defaulting to the caller address) |
+--------------------+------------------------------------------------------------+
| ``"value"`` | the value of the call |
+--------------------+------------------------------------------------------------+
| ``"data"`` | the data passed with the call |
+--------------------+------------------------------------------------------------+
| ``"gasPrice"`` | the gas price of the call |
+--------------------+------------------------------------------------------------+
| ``"gas"`` | the amount of gas allocated for the call |
+--------------------+------------------------------------------------------------+
| ``"code"`` | the bytecode to execute |
+--------------------+------------------------------------------------------------+
| ``"vyperLLLCode"`` | the code in Vyper LLL (compiled to bytecode automatically) |
+--------------------+------------------------------------------------------------+
"""
execution = normalize_execution(execution or {})
# user caller as origin if not explicitly given
if "caller" in execution and "origin" not in execution:
execution = assoc(execution, "origin", execution["caller"])
if "vyperLLLCode" in execution:
code = compile_vyper_lll(execution["vyperLLLCode"])
if "code" in execution:
if code != execution["code"]:
raise ValueError("Compiled Vyper LLL code does not match")
execution = assoc(execution, "code", code)
execution = merge(DEFAULT_EXECUTION, execution)
test_name = get_test_name(filler)
return deep_merge(
filler,
{
test_name: {
"exec": execution,
}
}
) | 0.004783 |
def convert_schema(raml_schema, mime_type):
""" Restructure `raml_schema` to a dictionary that has 'properties'
as well as other schema keys/values.
The resulting dictionary looks like this::
{
"properties": {
"field1": {
"required": boolean,
"type": ...,
...more field options
},
...more properties
},
"public_fields": [...],
"auth_fields": [...],
...more schema options
}
:param raml_schema: RAML request body schema.
:param mime_type: ContentType of the schema as a string from RAML
file. Only JSON is currently supported.
"""
if mime_type == ContentTypes.JSON:
if not isinstance(raml_schema, dict):
raise TypeError(
'Schema is not a valid JSON. Please check your '
'schema syntax.\n{}...'.format(str(raml_schema)[:60]))
return raml_schema
if mime_type == ContentTypes.TEXT_XML:
# Process XML schema
pass | 0.000945 |
def update_layers_warper(service):
"""
Update layers for a Warper service.
Sample endpoint: http://warp.worldmap.harvard.edu/maps
"""
params = {'field': 'title', 'query': '', 'show_warped': '1', 'format': 'json'}
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
request = requests.get(service.url, headers=headers, params=params)
try:
records = json.loads(request.content)
total_pages = int(records['total_pages'])
# set srs
# Warper supports only 4326, 900913, 3857
for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']:
srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code)
service.srs.add(srs)
service.update_validity()
for i in range(1, total_pages + 1):
params = {'field': 'title', 'query': '', 'show_warped': '1', 'format': 'json', 'page': i}
request = requests.get(service.url, headers=headers, params=params)
records = json.loads(request.content)
LOGGER.debug('Fetched %s' % request.url)
layers = records['items']
layer_n = 0
total = len(layers)
for layer in layers:
name = layer['id']
title = layer['title']
abstract = layer['description']
bbox = layer['bbox']
# dates
dates = []
if 'published_date' in layer:
dates.append(layer['published_date'])
if 'date_depicted' in layer:
dates.append(layer['date_depicted'])
if 'depicts_year' in layer:
dates.append(layer['depicts_year'])
if 'issue_year' in layer:
dates.append(layer['issue_year'])
layer, created = Layer.objects.get_or_create(name=name, service=service, catalog=service.catalog)
if layer.active:
# update fields
# links = [['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]]
layer.type = 'Hypermap:WARPER'
layer.title = title
layer.abstract = abstract
layer.is_public = True
layer.url = '%s/wms/%s?' % (service.url, name)
layer.page_url = '%s/%s' % (service.url, name)
# bbox
x0 = None
y0 = None
x1 = None
y1 = None
if bbox:
bbox_list = bbox.split(',')
x0 = format_float(bbox_list[0])
y0 = format_float(bbox_list[1])
x1 = format_float(bbox_list[2])
y1 = format_float(bbox_list[3])
layer.bbox_x0 = x0
layer.bbox_y0 = y0
layer.bbox_x1 = x1
layer.bbox_y1 = y1
layer.save()
# dates
add_mined_dates(layer)
add_metadata_dates_to_layer(dates, layer)
layer_n = layer_n + 1
# exits if DEBUG_SERVICES
LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total))
if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER:
return
except Exception as err:
message = "update_layers_warper: {0}. request={1} response={2}".format(
err,
service.url,
request.text
)
check = Check(
content_object=service,
success=False,
response_time=0,
message=message
)
check.save() | 0.001821 |
def read_header(fd, endian):
"""Read and return the matrix header."""
flag_class, nzmax = read_elements(fd, endian, ['miUINT32'])
header = {
'mclass': flag_class & 0x0FF,
'is_logical': (flag_class >> 9 & 1) == 1,
'is_global': (flag_class >> 10 & 1) == 1,
'is_complex': (flag_class >> 11 & 1) == 1,
'nzmax': nzmax
}
header['dims'] = read_elements(fd, endian, ['miINT32'])
header['n_dims'] = len(header['dims'])
if header['n_dims'] != 2:
raise ParseError('Only matrices with dimension 2 are supported.')
header['name'] = read_elements(fd, endian, ['miINT8'], is_name=True)
return header | 0.001497 |
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype in ('default', 'row_sparse'):
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other))) | 0.005874 |
def flatten(iterable):
"""
flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
:param sequence: any object that implements iterable protocol (see: :ref:`typeiter`)
:return: list
Examples:
>>> from adminactions.utils import flatten
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = list()
for el in iterable:
if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
return list(result) | 0.002591 |
def get_html(url,
headers=None,
timeout=None,
errors="strict",
wait_time=None,
driver=None,
zillow_only=False,
cache_only=False,
zillow_first=False,
cache_first=False,
random=False,
**kwargs):
"""
Use Google Cached Url.
:param cache_only: if True, then real zillow site will never be used.
:param driver: selenium browser driver。
"""
if wait_time is None:
wait_time = Config.Crawler.wait_time
# prepare url
cache_url1 = prefix + url + "/"
cache_url2 = prefix + url
zillow_url = url
only_flags = [zillow_only, cache_only]
if sum(only_flags) == 0:
first_flags = [zillow_first, cache_first]
if sum(first_flags) == 0:
if random:
if randint(0, 1):
all_url = [zillow_url, cache_url1, cache_url2]
else:
all_url = [cache_url1, cache_url2, zillow_url]
else:
all_url = [zillow_url, cache_url1, cache_url2]
elif sum(first_flags) == 1:
if zillow_first:
all_url = [zillow_url, cache_url1, cache_url2]
elif cache_first:
all_url = [cache_url1, cache_url2, zillow_url]
else:
raise ValueError(
"Only zero or one `xxx_first` argument could be `True`!")
elif sum(only_flags) == 1:
if zillow_only:
all_url = [zillow_url, ]
elif cache_only:
all_url = [cache_url1, cache_url2]
else:
raise ValueError(
"Only zero or one `xxx_only` argument could be `True`!")
for url in all_url:
try:
html = _get_html(url, headers, timeout, errors,
wait_time, driver, **kwargs)
return html
except Exception as e:
pass
raise e | 0.000506 |
def _checkReplyTo(self, value):
'''WS-Address From
value -- From server returned in wsa:To
'''
if value != self._replyTo:
raise WSActionException, 'wrong WS-Address ReplyTo(%s), expecting %s'%(value,self._replyTo) | 0.023256 |
def install(self, plugin, name=None, **opts):
"""Install plugin to the application."""
source = plugin
if isinstance(plugin, str):
module, _, attr = plugin.partition(':')
module = import_module(module)
plugin = getattr(module, attr or 'Plugin', None)
if isinstance(plugin, types.ModuleType):
plugin = getattr(module, 'Plugin', None)
if plugin is None:
raise MuffinException('Plugin is not found %r' % source)
name = name or plugin.name
if name in self.ps:
raise MuffinException('Plugin with name `%s` is already intalled.' % name)
if isinstance(plugin, type):
plugin = plugin(**opts)
if hasattr(plugin, 'setup'):
plugin.setup(self)
if hasattr(plugin, 'middleware') and plugin.middleware not in self.middlewares:
self.middlewares.append(plugin.middleware)
if hasattr(plugin, 'startup'):
self.on_startup.append(plugin.startup)
if hasattr(plugin, 'cleanup'):
self.on_cleanup.append(plugin.cleanup)
# Save plugin links
self.ps[name] = plugin
return plugin | 0.003295 |
def digest(self, data=None):
"""
Finalizes digest operation and return digest value
Optionally hashes more data before finalizing
"""
if self.digest_finalized:
return self.digest_out.raw[:self.digest_size]
if data is not None:
self.update(data)
self.digest_out = create_string_buffer(256)
length = c_long(0)
result = libcrypto.EVP_DigestFinal_ex(self.ctx, self.digest_out,
byref(length))
if result != 1:
raise DigestError("Unable to finalize digest")
self.digest_finalized = True
return self.digest_out.raw[:self.digest_size] | 0.002845 |
def number_to_lower_endian(n, base):
"""Helper function: convert a number to a list of digits in the given base."""
if n < base:
return [n]
return [n % base] + number_to_lower_endian(n // base, base) | 0.023923 |
def _missing_required_parameters(rqset, **kwargs):
"""Helper function to do operation on sets.
Checks for any missing required parameters.
Returns non-empty or empty list. With empty
list being False.
::returns list
"""
key_set = set(list(iterkeys(kwargs)))
required_minus_received = rqset - key_set
if required_minus_received != set():
return list(required_minus_received) | 0.002387 |
def _create_storage_directories():
"""Create various storage directories, if those do not exist."""
# Create configuration directory
if not os.path.exists(common.CONFIG_DIR):
os.makedirs(common.CONFIG_DIR)
# Create data directory (for log file)
if not os.path.exists(common.DATA_DIR):
os.makedirs(common.DATA_DIR)
# Create run directory (for lock file)
if not os.path.exists(common.RUN_DIR):
os.makedirs(common.RUN_DIR) | 0.003914 |
def focusInEvent(self, event):
"""
Updates the focus in state for this edit.
:param event | <QFocusEvent>
"""
super(XLineEdit, self).focusInEvent(event)
self._focusedIn = True | 0.015748 |
def include_file_cb(include_path, line_ranges, symbol):
"""
Banana banana
"""
lang = ''
if include_path.endswith((".md", ".markdown")):
lang = 'markdown'
else:
split = os.path.splitext(include_path)
if len(split) == 2:
ext = split[1].strip('.')
lang = LANG_MAPPING.get(ext) or ext
if line_ranges:
res = []
for line_range in line_ranges:
for lineno in range(line_range[0] + 1, line_range[1] + 1):
line = linecache.getline(include_path, lineno)
if not line:
return None
res.append(line)
return ''.join(res), lang
with io.open(include_path, 'r', encoding='utf-8') as _:
return _.read(), lang | 0.002299 |
def _get_intra_event_phi(self, C, mag, rjb, vs30, num_sites):
"""
Returns the intra-event standard deviation (phi), dependent on
magnitude, distance and vs30
"""
base_vals = np.zeros(num_sites)
# Magnitude Dependent phi (Equation 17)
if mag <= 4.5:
base_vals += C["f1"]
elif mag >= 5.5:
base_vals += C["f2"]
else:
base_vals += (C["f1"] + (C["f2"] - C["f1"]) * (mag - 4.5))
# Distance dependent phi (Equation 16)
idx1 = rjb > C["R2"]
base_vals[idx1] += C["DfR"]
idx2 = np.logical_and(rjb > C["R1"], rjb <= C["R2"])
base_vals[idx2] += (C["DfR"] * (np.log(rjb[idx2] / C["R1"]) /
np.log(C["R2"] / C["R1"])))
# Site-dependent phi (Equation 15)
idx1 = vs30 <= self.CONSTS["v1"]
base_vals[idx1] -= C["DfV"]
idx2 = np.logical_and(vs30 >= self.CONSTS["v1"],
vs30 <= self.CONSTS["v2"])
base_vals[idx2] -= (
C["DfV"] * (np.log(self.CONSTS["v2"] / vs30[idx2]) /
np.log(self.CONSTS["v2"] / self.CONSTS["v1"])))
return base_vals | 0.001645 |
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret | 0.003902 |
def arrays(self):
""" Returns symbol instances corresponding to arrays
of the current scope.
"""
return [x for x in self[self.current_scope].values() if x.class_ == CLASS.array] | 0.014354 |
def _save_results(options, module, core_results, fit_results):
"""
Save results of analysis as tables and figures
Parameters
----------
options : dict
Option names and values for analysis
module : str
Module that contained function used to generate core_results
core_results : dataframe, array, value, list of tuples
Results of main analysis
fit_results : list or None
Results of comparing emp analysis to models, None if not applicable
"""
logging.info("Saving all results")
# Use custom plot format
mpl.rcParams.update(misc.rcparams.ggplot_rc)
# Make run directory
os.makedirs(options['run_dir'])
# Write core results
_write_core_tables(options, module, core_results)
# Write additional results if analysis from emp
if module == 'emp':
_write_subset_index_file(options, core_results)
# Write model/data comparison if models were given
if fit_results:
models = options['models'].replace(' ','').split(';')
for i, core_result in enumerate(core_results):
_write_fitted_params(i, models, options, fit_results)
_write_test_statistics(i, models, options, fit_results)
_write_comparison_plot_table(i, models, options,
core_results, fit_results) | 0.001471 |
def grok_for_node(element, default_vars):
"""Properly parses a For loop element"""
if isinstance(element.iter, jinja2.nodes.Filter):
if element.iter.name == 'default' \
and element.iter.node.name not in default_vars:
default_vars.append(element.iter.node.name)
default_vars = default_vars + grok_vars(element)
return default_vars | 0.002618 |
def new(self):
# type: () -> None
'''
Create a new Rock Ridge Child Link record.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('CL record already initialized!')
self.child_log_block_num = 0 # This gets set later
self._initialized = True | 0.00995 |
def get_execution_state(cluster, environ, topology, role=None):
'''
Get the execution state of a topology in a cluster
:param cluster:
:param environ:
:param topology:
:param role:
:return:
'''
params = dict(cluster=cluster, environ=environ, topology=topology)
if role is not None:
params['role'] = role
request_url = tornado.httputil.url_concat(create_url(EXECUTION_STATE_URL_FMT), params)
raise tornado.gen.Return((yield fetch_url_as_json(request_url))) | 0.014493 |
def _make_dir(self, client_kwargs):
"""
Make a directory.
args:
client_kwargs (dict): Client arguments.
"""
with _handle_oss_error():
bucket = self._get_bucket(client_kwargs)
# Object
if 'key' in client_kwargs:
return bucket.put_object(
key=client_kwargs['key'], data=b'')
# Bucket
return bucket.create_bucket() | 0.004329 |
def requestOpenOrders(self, all_clients=False):
"""
Request open orders - loads up orders that wasn't created using this session
"""
if all_clients:
self.ibConn.reqAllOpenOrders()
self.ibConn.reqOpenOrders() | 0.011583 |
def bind_device_to_gateway(
self,
parent,
gateway_id,
device_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Associates the device with the gateway.
Example:
>>> from google.cloud import iot_v1
>>>
>>> client = iot_v1.DeviceManagerClient()
>>>
>>> parent = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]')
>>>
>>> # TODO: Initialize `gateway_id`:
>>> gateway_id = ''
>>>
>>> # TODO: Initialize `device_id`:
>>> device_id = ''
>>>
>>> response = client.bind_device_to_gateway(parent, gateway_id, device_id)
Args:
parent (str): The name of the registry. For example,
``projects/example-project/locations/us-central1/registries/my-registry``.
gateway_id (str): The value of ``gateway_id`` can be either the device numeric ID or the
user-defined device identifier.
device_id (str): The device to associate with the specified gateway. The value of
``device_id`` can be either the device numeric ID or the user-defined
device identifier.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.iot_v1.types.BindDeviceToGatewayResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "bind_device_to_gateway" not in self._inner_api_calls:
self._inner_api_calls[
"bind_device_to_gateway"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.bind_device_to_gateway,
default_retry=self._method_configs["BindDeviceToGateway"].retry,
default_timeout=self._method_configs["BindDeviceToGateway"].timeout,
client_info=self._client_info,
)
request = device_manager_pb2.BindDeviceToGatewayRequest(
parent=parent, gateway_id=gateway_id, device_id=device_id
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["bind_device_to_gateway"](
request, retry=retry, timeout=timeout, metadata=metadata
) | 0.003336 |
def sync_policies(self, vault_client):
"""Synchronizes policies only"""
p_resources = [x for x in self.resources()
if isinstance(x, Policy)]
for resource in p_resources:
resource.sync(vault_client)
return [x for x in self.resources()
if not isinstance(x, Policy)] | 0.005764 |
def successfuly_encodes(msg, raise_err=False):
"""
boolean response if a message contains correct information to serialize
:param msg: <proto object>
:param raise_err: <bool>
:return: <bool>
"""
result = True
try:
msg.SerializeToString()
except EncodeError as encode_error:
if raise_err:
raise encode_error
result = False
return result | 0.002427 |
def print_with_header(header, message, color, indent=0):
"""
Use one of the functions below for printing, not this one.
"""
print()
padding = ' ' * indent
print(padding + color + BOLD + header + ENDC + color + message + ENDC) | 0.004016 |
def __field_callback(self, field, event, *args, **kwargs):
# type: (str, str, *Any, **Any) -> Any
"""
Calls the registered method in the component for the given field event
:param field: A field name
:param event: An event (IPOPO_CALLBACK_VALIDATE, ...)
:return: The callback result, or None
:raise Exception: Something went wrong
"""
# Get the field callback info
cb_info = self.context.get_field_callback(field, event)
if not cb_info:
# No registered callback
return True
# Extract information
callback, if_valid = cb_info
if if_valid and self.state != StoredInstance.VALID:
# Don't call the method if the component state isn't satisfying
return True
# Call it
result = callback(self.instance, field, *args, **kwargs)
if result is None:
# Special case, if the call back returns nothing
return True
return result | 0.002901 |
def _get_node_key(self, node_dict_item):
"""Return a tuple of sorted sources and targets given a node dict."""
s = tuple(sorted(node_dict_item['sources']))
t = tuple(sorted(node_dict_item['targets']))
return (s, t) | 0.00813 |
def get_name_from_abbrev(abbrev, case_sensitive=False):
"""
Given a country code abbreviation, get the full name from the table.
abbrev: (str) Country code to retrieve the full name of.
case_sensitive: (bool) When True, enforce case sensitivity.
"""
if case_sensitive:
country_code = abbrev
else:
country_code = abbrev.upper()
for code, full_name in COUNTRY_TUPLES:
if country_code == code:
return full_name
raise KeyError('No country with that country code.') | 0.001869 |
def org_find_members(org_id=None, level=None, describe=False):
"""
:param org_id: ID of the organization
:type org_id: string
:param level: The membership level in the org that each member in the result set must have (one of "MEMBER" or
"ADMIN")
:type level: string
:param describe: Whether or not to return the response of ``dxpy.api.user_describe`` for each result. False omits
the describe response; True includes it; a dict will be used as the input to ``dxpy.api.user_describe`` (to
customize the desired set of fields in the describe response).
:type describe: bool or dict
Returns a generator that yields all org members that match the query formed by intersecting all specified
constraints. The search is not restricted by any parameters that were unspecified.
"""
query = {}
if level is not None:
query["level"] = level
query["describe"] = describe
return _org_find(dxpy.api.org_find_members, org_id, query) | 0.005964 |
def gp_rdiff(version, nomed, noxerr, diffRel, divdNdy):
"""example for ratio or difference plots using QM12 data (see gp_panel)
- uses uncertainties package for easier error propagation and rebinning
- stat. error for medium = 0!
- stat. error for cocktail ~ 0!
- statistical error bar on data stays the same for diff
- TODO: implement ratio!
- TODO: adjust statistical error on data for ratio!
- TODO: adjust name and ylabel for ratio
.. image:: pics/diffAbsQM12.png
:width: 450 px
:param version: plot version
:type version: str
:param nomed: don't plot medium
:type nomed: bool
:param noxerr: don't plot x-errors
:type noxerr: bool
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data, cocktail, medium, rhofo, vacrho = \
OrderedDict(), OrderedDict(), OrderedDict(), OrderedDict(), OrderedDict()
#scale = { # QM14 (19 GeV skip later, factor here only informational)
# '19.6': 1.0340571932983775, '200': 1.0, '39': 0.7776679085382481,
# '27': 0.6412140408244136, '62.4': 0.9174700031778402
#}
scale = {
'19.6': 1.3410566491548412, '200': 1.1051002240771077,
'39': 1.2719203877292842, '27': 1.350873678084769,
'62.4': 1.2664666321635087
}
yunit = 1.0e-3 if not diffRel else 1.
for infile in os.listdir(inDir):
if infile == "cocktail_contribs": continue
energy = re.compile('\d+').search(infile).group()
data_type = re.sub('%s\.dat' % energy, '', infile)
energy = getEnergy4Key(energy)
file_url = os.path.join(inDir, infile)
data_import = np.loadtxt(open(file_url, 'rb'))
if data_type != 'data' and (
(version == 'QM14' and energy != '19.6') or version == 'LatestPatrickJieYi'
):
data_import[:,(1,3,4)] /= scale[energy]
if version == 'LatestPatrickJieYi':
if data_type == 'data':
data_import = data_import[(data_import[:,0] > 0.14) & (data_import[:,0] < 1.0)]
else:
data_import = data_import[data_import[:,0] < 1.0]
if data_type == 'data': data[energy] = data_import
elif data_type == 'cocktail': cocktail[energy] = data_import
elif data_type == 'rho' or data_type == 'vacRho' or data_type == 'medium':
if noxerr and not diffRel: data_import[:,2:] = 0.
data_import[:,1] /= yunit
if data_type == 'rho':
mask = data_import[:,1] > 0.1
rhofo[energy] = data_import if diffRel else data_import[mask]
elif data_type == 'vacRho':
mask = (data_import[:,0] > 0.35) & (data_import[:,1] > 0.01)
vacrho[energy] = data_import if diffRel else data_import[mask]
elif not nomed and data_type == 'medium':
medium[energy] = data_import
nSetsData = len(data)
shift = { '19.6': '1e0', '27': '1e1', '39': '1e2', '62.4': '1e3', '200': '1e4'
} if not diffRel else {
'19.6': '1', '27': '8', '39': '50', '62.4': '200', '200': '900'
}
dataOrdered = OrderedDict()
for energy in sorted(data, key=float, reverse=True):
# data & bin edges
# getUArray propagates stat/syst errors separately internally but
# errors need to be doubled to retrieve correct errors
uData = getUArray(data[energy])
eData = getEdges(data[energy])
uCocktail = getUArray(cocktail[energy])
eCocktail = getEdges(cocktail[energy])
loop = [eData]
if energy in medium and diffRel:
uMedium = getUArray(medium[energy])
eMedium = getEdges(medium[energy])
loop.append(eMedium)
if energy in rhofo and diffRel:
uRho = getUArray(rhofo[energy])
eRho = getEdges(rhofo[energy])
loop.append(eRho)
if energy in vacrho and diffRel:
uVacRho = getUArray(vacrho[energy])
eVacRho = getEdges(vacrho[energy])
loop.append(eVacRho)
# loop data/medium bins
for l, eArr in enumerate(loop):
for i, (e0, e1) in enumzipEdges(eArr):
logging.debug('%s/%d> %g - %g:' % (energy, l, e0, e1))
# get cocktail sum in data bin range
# value+/-0.5*tot.uncert.
uCocktailSum = getCocktailSum(e0, e1, eCocktail, uCocktail)
if uCocktailSum == 0.: continue
# calc. difference and divide by data binwidth again
# + set data point
if l == 0:
uDiff = uData[i] # value+/-0.5*tot.uncert.
if diffRel:
uDiff /= uCocktailSum # value+/-0.5*tot.uncert.
else:
uDiff -= uCocktailSum
uDiff /= data[energy][i,2] * 2 * yunit
dp = [
data[energy][i,0], uDiff.nominal_value,
data[energy][i,2] if not noxerr else 0.,
getErrorComponent(uDiff, 'stat'),
getErrorComponent(uDiff, 'syst')
]
key = ' '.join([energy, 'GeV'])
if noxerr:
if diffRel:
key += ' {/Symbol \264} %s' % shift[energy]
else:
expon = shift[energy].split('e')[1]
key += ' {/Symbol \264} 10^{%s}' % expon
elif l == 1:
# only done if diffRel
uDiff = uMedium[i]
uDiff /= uCocktailSum
dp = [
medium[energy][i,0], uDiff.nominal_value+1,
medium[energy][i,2] if not noxerr else 0.,
0., 0. # both errors included in data points
]
key = ' '.join([energy, 'GeV (Med.)'])
elif l == 2:
# only done if diffRel
uDiff = uRho[i]
uDiff /= uCocktailSum
dp = [
rhofo[energy][i,0], uDiff.nominal_value+1.,
rhofo[energy][i,2] if not noxerr else 0.,
0., 0. # both errors included in data points
]
key = ' '.join([energy, 'GeV (RhoFO.)'])
elif l == 3:
# only done if diffRel
uDiff = uVacRho[i]
uDiff /= uCocktailSum
dp = [
vacrho[energy][i,0], uDiff.nominal_value+1.,
vacrho[energy][i,2] if not noxerr else 0.,
0., 0. # both errors included in data points
]
key = ' '.join([energy, 'GeV (VacRho.)'])
# build list of data points
if diffRel or l == 0:
if dp[0] > 0.7425 and dp[0] < 0.825: continue # mask out omega region
if dp[0] > 0.97 and dp[0] < 1.0495: continue # mask out phi region
if key in dataOrdered:
dataOrdered[key] = np.vstack([dataOrdered[key], dp])
else:
dataOrdered[key] = np.array([ dp ])
if not diffRel:
if energy in medium:
dataOrdered[' '.join([energy, 'GeV (Med.)'])] = medium[energy]
if energy in rhofo:
dataOrdered[' '.join([energy, 'GeV (RhoFO.)'])] = rhofo[energy]
if energy in vacrho:
dataOrdered[' '.join([energy, 'GeV (VacRho.)'])] = vacrho[energy]
# make plot
nSets = len(dataOrdered)
nCats = 4
nSetsPlot = nSets/nCats if nSets > nSetsData else nSets
props = [
'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i]
for i in reversed(range(nSetsPlot))
]
titles = dataOrdered.keys()
if nSets > nSetsData:
props = zip_flat(props, *[
[
'with lines lt %d lw 4 lc %s' % (j+1, default_colors[i])
for i in reversed(range(nSetsPlot))
]
for j in xrange(nCats-1)
])
titles = zip_flat(dataOrdered.keys()[::nCats], *[ [''] * nSetsPlot for j in xrange(nCats-1) ])
global labels
labels = {
'{BES: STAR Preliminary}' if version == 'QM12Latest200' or \
version == 'QM14' or version == 'LatestPatrickJieYi'
else 'STAR Preliminary': [
0.4 if diffRel else 0.2,0.09 if not diffRel and noxerr else 0.75,False
],
'{200 GeV: PRL 113 022301' if version == 'QM12Latest200' \
or version == 'QM14' or version == 'LatestPatrickJieYi'
else '': [0.4 if diffRel else 0.2,0.04 if not diffRel and noxerr else 0.7,False],
}
yr = [.6,2.5e3] if diffRel else [0.05,1.5e5]
if noxerr:
for k,d in dataOrdered.iteritems():
energy = getEnergy4Key(re.compile('\d+').search(k).group())
d[:,(1,3,4)] *= float(shift[energy])
gpcalls = [
'object 1 rectangle back fc rgb "grey" from 0.75,%f to 0.825,%f' % \
(1.7 if diffRel else 0.5, yr[1]),
'object 2 rectangle back fc rgb "grey" from 0.96,%f to 1.0495,%f' % \
(1.7 if diffRel else 0.5, yr[1]),
'object 3 rectangle back fc rgb "#C6E2FF" from 0.4,%f to 0.75,%f' % \
(1.7 if diffRel else 0.5, yr[1]),
'boxwidth 0.01 absolute',
]
hline = 1. if diffRel else .5
lines = dict(
(('x=%g' % (hline*float(shift[energy]))), 'lc rgb "black" lw 4 lt 4')
for energy in shift
)
pseudo_point = np.array([[-1,1,0,0,0]])
make_plot(
data = dataOrdered.values() + [
pseudo_point, pseudo_point, pseudo_point, pseudo_point
],
properties = props + [
'with lines lt %d lw 4 lc rgb "black"' % (lt+1)
for lt in xrange(nCats)
],
titles = titles + [
'HMBT + QGP', 'BW/FO-{/Symbol \162}', '{/Symbol \162}/{/Symbol \167} VacSF+FB+FO',
'baseline', #'%g%s' % (hline, ' {/Symbol \264} 10^{-3}' if not diffRel else '')
],
name = os.path.join(outDir, 'diff%s%s%s%s' % (
'Rel' if diffRel else 'Abs', version,
'NoMed' if nomed else '', 'NoXErr' if noxerr else ''
)),
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
ylabel = 'Enhancement Ratio' if diffRel else 'Excess Yield / dM_{ee} ({/Symbol \264} 10^{-3} (GeV/c^2)^{-1})',
#labels = labels,
xr = [0.18,0.97], yr = yr, ylog = True,
key = ['at graph 0.96,1.17', 'maxrows 3', 'width -4', 'nobox', 'samplen 0.9'],
lines = lines if noxerr else {},
gpcalls = gpcalls,
lmargin = 0.17, bmargin = 0.1, tmargin = 0.86, rmargin = 0.98,
size = '9in,11in', arrow_offset = 0.9, #arrow_length = 0.4,
)
if nomed or noxerr or version == 'QM12': return 'done'
# integrated enhancement factor
if diffRel:
enhance = {}
data_enhance, medium_enhance, rhofo_enhance, vacrho_enhance = None, None, None, None
for energy in sorted(data, key=float):
for systLMR in [False, True]:
suffix = str(energy)
uEnhanceData = getMassRangesSums(
data[energy], onlyLMR = True,
systLMR = systLMR, suffix = suffix
)
uEnhanceCocktail = getMassRangesSums(
cocktail[energy], onlyLMR = True,
systLMR = systLMR, suffix = suffix
)
if energy in medium:
uEnhanceMed = getMassRangesSums(
medium[energy], onlyLMR = True,
systLMR = systLMR, suffix = suffix
)
if energy in rhofo:
uEnhanceRhoFO = getMassRangesSums(
rhofo[energy], onlyLMR = True,
systLMR = systLMR, suffix = suffix
)
if energy in vacrho:
uEnhanceVacRho = getMassRangesSums(
vacrho[energy], onlyLMR = True,
systLMR = systLMR, suffix = suffix
)
if not systLMR: # uEnhance's are ufloats
uEnhanceData /= uEnhanceCocktail
dp = [
float(energy), uEnhanceData.nominal_value, 0,
getErrorComponent(uEnhanceData, 'stat'),
getErrorComponent(uEnhanceData, 'syst')
]
if data_enhance is None: data_enhance = [ dp ]
else: data_enhance.append(dp)
if energy in medium:
uEnhanceMed /= uEnhanceCocktail
dpM = [ float(energy), uEnhanceMed.nominal_value+1., 0, 0, 0 ]
if medium_enhance is None: medium_enhance = [ dpM ]
else: medium_enhance.append(dpM)
if energy in rhofo:
uEnhanceRhoFO /= uEnhanceCocktail
dpM = [ float(energy), uEnhanceRhoFO.nominal_value+1., 0, 0, 0 ]
if rhofo_enhance is None: rhofo_enhance = [ dpM ]
else: rhofo_enhance.append(dpM)
if energy in vacrho:
uEnhanceVacRho /= uEnhanceCocktail
dpM = [ float(energy), uEnhanceVacRho.nominal_value+1., 0, 0, 0 ]
if vacrho_enhance is None: vacrho_enhance = [ dpM ]
else: vacrho_enhance.append(dpM)
else: # uEnhance's are dicts of ufloats
for k in uEnhanceData:
uEnhanceData[k] /= uEnhanceCocktail[k]
dp = [
float(energy), uEnhanceData[k].nominal_value, 0,
getErrorComponent(uEnhanceData[k], 'stat'),
getErrorComponent(uEnhanceData[k], 'syst')
]
rngstr = k.split('_')[-1]
data_key = 'data_' + rngstr
if data_key not in enhance: enhance[data_key] = [ dp ]
else: enhance[data_key].append(dp)
if k in uEnhanceMed:
uEnhanceMed[k] /= uEnhanceCocktail[k]
dpM = [ float(energy), uEnhanceMed[k].nominal_value ]
med_key = 'model_' + rngstr
if med_key not in enhance: enhance[med_key] = [ dpM ]
else: enhance[med_key].append(dpM)
if k in uEnhanceRhoFO:
uEnhanceRhoFO[k] /= uEnhanceCocktail[k]
dpM = [ float(energy), uEnhanceRhoFO[k].nominal_value+1. ]
rhofo_key = 'rhofo_' + rngstr
if rhofo_key not in enhance: enhance[rhofo_key] = [ dpM ]
else: enhance[rhofo_key].append(dpM)
if k in uEnhanceVacRho:
uEnhanceVacRho[k] /= uEnhanceCocktail[k]
dpM = [ float(energy), uEnhanceVacRho[k].nominal_value+1. ]
vacrho_key = 'vacrho_' + rngstr
if vacrho_key not in enhance: enhance[vacrho_key] = [ dpM ]
else: enhance[vacrho_key].append(dpM)
xfacs = os.path.join(outDir, 'xfacs%s.dat' % version)
if os.path.exists(xfacs): os.remove(xfacs)
fSystLMR = open(xfacs, 'ab')
for k in sorted(enhance.keys()):
np.savetxt(fSystLMR, enhance[k], fmt = '%g', header = k, comments = '\n\n')
fSystLMR.close()
yr_upp = 4 if version == 'QM12Latest200' or version == 'QM14' else 7
if version == 'LatestPatrickJieYi': yr_upp = 5.5
#labels.update({
# '{LMR: %.2f < M_{ee} < %.2f GeV/c^{2}}' % (eRanges[1], eRanges[2]): [0.4,0.15,False]
#})
make_plot(
data = [
pseudo_point, pseudo_point, pseudo_point,
np.array([[17.3,2.73,0,0.25,1.47]]),
np.array([[200,4.7,0,0.4,1.5]]),
np.array(enhance['data_0.15-0.75']),
np.array(enhance['data_0.4-0.75']),
np.array(medium_enhance),
np.array(rhofo_enhance), np.array(vacrho_enhance)
],
properties = [
'lt 1 lw 4 ps 2 lc rgb "white" pt 19',
'lt 1 lw 4 ps 2 lc rgb "white" pt 20',
'lt 1 lw 4 ps 2 lc rgb "white" pt 18',
'lt 1 lw 4 ps 2 lc %s pt 19' % default_colors[1],
'lt 1 lw 4 ps 2 lc %s pt 20' % default_colors[3],
'lt 1 lw 4 ps 2 lc %s pt 18' % default_colors[4],
'lt 1 lw 4 ps 2 lc %s pt 18' % default_colors[0],
'with lines lt 2 lw 4 lc %s' % default_colors[-1],
'with lines lt 3 lw 4 lc %s' % default_colors[-1],
'with lines lt 4 lw 4 lc %s' % default_colors[-1],
],
titles = [
'CERES Pb+Au', 'PHENIX Au+Au', 'STAR Au+Au',
'', '', '', '',
'HMBT + QGP', 'BW/FO-{/Symbol \162}', '{/Symbol \162}/{/Symbol \167} VacSF+FB',
],
name = os.path.join(outDir, 'enhance%s' % version),
xlabel = '{/Symbol \326}s_{NN} (GeV)',
ylabel = 'LMR Enhancement Factor',
xlog = True, key = [ 'at graph 0.9,0.98', 'nobox', 'maxrows 4' ],
size = '10in,8in', bmargin = 0.13, tmargin = 0.92, rmargin = 0.99,
yr = [1.,yr_upp], xr = [14,220], gpcalls = [
'format x "%g"',
'xtics (10, 20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
'boxwidth 0.025 absolute',
'label 50 "{/=18 0.2 < M_{ee} < 0.6 GeV/c^{2}}" at 15.5,3 tc %s rotate center' % default_colors[1],
'label 51 "{/=18 0.15 < M_{ee} < 0.75 GeV/c^{2}}" at 180,4.2 tc %s rotate center' % default_colors[3],
'label 52 "{/=18 0.4 < M_{ee} < 0.75 GeV/c^{2}}" at 75,3.1 tc %s rotate by -20' % default_colors[0],
'label 53 "{/=18 0.15 < M_{ee} < 0.75 GeV/c^{2}}" at 50,1.2 tc %s' % default_colors[4]
], #labels = labels
)
return 'done'
# integrated excess yield in mass ranges
excess = {}
for k, v in dataOrdered.iteritems():
suffix = ''
energy = getEnergy4Key(re.compile('\d+').search(k).group())
if fnmatch(k, '*Med.*'):
suffix = '_Med'
if version != 'LatestPatrickJieYi' and energy == '27': continue # TODO
if fnmatch(k, '*RhoFO.*'): suffix = '_RhoFO'
if fnmatch(k, '*VacRho.*'): suffix = '_VacRho'
exc = getMassRangesSums(np.array(v), onlyLMR = True)
if divdNdy: exc /= dNdyPi0[energy] * 1e-2
dp = [
float(energy), exc.nominal_value, 0,
getErrorComponent(exc, 'stat'), getErrorComponent(exc, 'syst')
]
if suffix == '_Med' and not diffRel and not divdNdy:
print dp
key = 'LMR' + suffix
if key not in excess: excess[key] = [ dp ]
else: excess[key].append(dp)
logging.debug(excess)
avdata = np.array(excess['LMR'])
avg = np.average(avdata[:,1], weights = avdata[:,3])
graph_data = [
np.array([
[ 7.7, avg, 0, 0, avdata[-1][-1]],
[ 19.6, avg, 0, 0, avdata[-1][-1]]
]),
np.array([
[ 19.6, avg, 0, 0, 0], [ 200., avg, 0, 0, 0]
]),
np.array([
[ 7.7, 2*avg, 0, 0, 0], [ 19.6, avg, 0, 0, 0],
]),
np.array(excess['LMR']),
]
props = [
'with filledcurves pt 0 lc %s lw 4 lt 2' % default_colors[8],
'with lines lc %s lw 4 lt 2' % default_colors[8],
'with lines lc %s lw 8 lt 2' % default_colors[1],
'lt 1 lw 4 ps 2 lc %s pt 18' % default_colors[0],
]
tits = [
'BES-I extrapolation', '', 'model expectation', 'STAR Au+Au',
]
if version != 'QM14':
graph_data += [
np.array(excess['LMR_Med']),
np.array(excess['LMR_VacRho']),
np.array(excess['LMR_RhoFO']),
]
props += [
'with lines lt 2 lw 4 lc %s' % default_colors[-1],
'with lines lt 3 lw 4 lc %s' % default_colors[-1],
'with lines lt 4 lw 4 lc %s' % default_colors[-1],
]
tits += [
'HMBT + QGP', '{/Symbol \162}/{/Symbol \167} VacSF+FB', 'BW/FO-{/Symbol \162}',
]
yr_upp = 4.5 if version == 'QM12Latest200' or version == 'QM14' else 7
if version == 'LatestPatrickJieYi': yr_upp = 2 if divdNdy else 2.
labels = {} if version != 'QM14' else labels
if divdNdy:
labels.update(dict((str(v), [float(k)*0.9,yr_upp*1.05,True]) for k,v in dNdyPi0.items()))
labels.update({ 'dN/dy|_{/Symbol \\160}': [100,yr_upp*1.05,True]})
gpcalls = [
'format x "%g"',
'xtics (7,10,20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
'boxwidth 0.025 absolute',
]
if version == 'QM14':
labels.update({
'{LMR: %.2f < M_{ee} < %.2f GeV/c^{2}}' % (eRanges[1], eRanges[2]): [0.4,0.15,False],
})
else:
gpcalls.append('label 52 "{/=18 0.4 < M_{ee} < 0.75 GeV/c^{2}}" at 60,0.4 tc %s' % default_colors[0])
make_plot(
data = graph_data, properties = props, titles = tits,
name = os.path.join(outDir, 'excess%s%s' % (version,'DivdNdy' if divdNdy else '')),
xlabel = '{/Symbol \326}s_{NN} (GeV)',
ylabel = 'LMR Excess Yield %s({/Symbol \264} 10^{-%d})' % (
'/ dN/dy|_{/Symbol \\160} ' if divdNdy else '', 5 if divdNdy else 3
),
xlog = True, xr = [7,220], size = '10in,8in',
key = ['at graph 1.05,0.98', 'width -3', 'nobox', 'maxrows 3'],
bmargin = 0.13, tmargin = 0.92, rmargin = 0.99,
yr = [0,yr_upp], gpcalls = gpcalls, labels = labels,
)
return 'done' | 0.023741 |
def create_jwt_token(secret, client_id):
"""
Create JWT token for GOV.UK Notify
Tokens have standard header:
{
"typ": "JWT",
"alg": "HS256"
}
Claims consist of:
iss: identifier for the client
iat: issued at in epoch seconds (UTC)
:param secret: Application signing secret
:param client_id: Identifier for the client
:return: JWT token for this request
"""
assert secret, "Missing secret key"
assert client_id, "Missing client id"
headers = {
"typ": __type__,
"alg": __algorithm__
}
claims = {
'iss': client_id,
'iat': epoch_seconds()
}
return jwt.encode(payload=claims, key=secret, headers=headers).decode() | 0.001357 |
def dyld_find(name, executable_path=None, env=None):
"""
Find a library or framework using dyld semantics
"""
name = _ensure_utf8(name)
executable_path = _ensure_utf8(executable_path)
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
raise ValueError, "dylib %s could not be found" % (name,) | 0.003597 |
def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value | 0.002445 |
def _restore_port_binding(self,
switch_ip, pvlan_ids,
port, native_vlan):
"""Restores a set of vlans for a given port."""
intf_type, nexus_port = nexus_help.split_interface_name(port)
# If native_vlan is configured, this is isolated since
# two configs (native + trunk) must be sent for this vlan only.
if native_vlan != 0:
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, native_vlan,
intf_type, nexus_port, True)
# If this is the only vlan
if len(pvlan_ids) == 1:
return
concat_vlans = ''
compressed_vlans = self._get_compressed_vlan_list(pvlan_ids)
for pvlan in compressed_vlans:
if concat_vlans == '':
concat_vlans = "%s" % pvlan
else:
concat_vlans += ",%s" % pvlan
# if string starts getting a bit long, send it.
if len(concat_vlans) >= const.CREATE_PORT_VLAN_LENGTH:
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, concat_vlans,
intf_type, nexus_port, False)
concat_vlans = ''
# Send remaining vlans if any
if len(concat_vlans):
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, concat_vlans,
intf_type, nexus_port, False) | 0.002712 |
def _conditions(self, full_path, environ):
"""Return a tuple of etag, last_modified by mtime from stat."""
mtime = stat(full_path).st_mtime
return str(mtime), rfc822.formatdate(mtime) | 0.009662 |
def download(link, outdir='.', chunk_size=4096):
'''
This is the Main function, which downloads a given link
and saves on outdir (default = current directory)
'''
url = None
fh = None
eta = 'unknown '
bytes_so_far = 0
filename = filename_from_url(link) or "."
cj = cjar.CookieJar()
# get filename for temp file in current directory
(fd_tmp, tmpfile) = tempfile.mkstemp(
".tmp", prefix=filename + ".", dir=outdir)
os.close(fd_tmp)
os.unlink(tmpfile)
try:
opener = ulib.build_opener(ulib.HTTPCookieProcessor(cj))
url = opener.open(link)
fh = open(tmpfile, mode='wb')
headers = url.info()
try:
total_size = int(headers['Content-Length'])
except (ValueError, KeyError, TypeError):
total_size = 'unknown'
try:
md5_header = headers['Content-MD5']
except (ValueError, KeyError, TypeError):
md5_header = None
# Define which callback we're gonna use
if total_size != 'unknown':
if CONSOLE_WIDTH > 57:
reporthook = report_bar
else:
reporthook = report_onlysize
else:
reporthook = report_unknown
# Below are the registers to calculate network transfer rate
time_register = time()
speed = 0.0
speed_list = []
bytes_register = 0.0
eta = 'unknown '
# Loop that reads in chunks, calculates speed and does the callback to
# print the progress
while True:
chunk = url.read(chunk_size)
# Update Download Speed every 1 second
if time() - time_register > 0.5:
speed = (bytes_so_far - bytes_register) / \
(time() - time_register)
speed_list.append(speed)
# Set register properly for future use
time_register = time()
bytes_register = bytes_so_far
# Estimative of remaining download time
if total_size != 'unknown' and len(speed_list) == 3:
speed_mean = sum(speed_list) / 3
eta_sec = int((total_size - bytes_so_far) / speed_mean)
eta = str(datetime.timedelta(seconds=eta_sec))
speed_list = []
bytes_so_far += len(chunk)
if not chunk:
sys.stdout.write('\n')
break
fh.write(chunk)
reporthook(bytes_so_far, total_size, speed, eta)
except KeyboardInterrupt:
print('\n\nCtrl + C: Download aborted by user')
print('Partial downloaded file:\n{0}'.format(os.path.abspath(tmpfile)))
sys.exit(1)
finally:
if url:
url.close()
if fh:
fh.close()
filenamealt = filename_from_headers(headers)
if filenamealt:
filename = filenamealt
# add numeric '(x)' suffix if filename already exists
if os.path.exists(os.path.join(outdir, filename)):
filename = filename_fix_existing(filename, outdir)
filename = os.path.join(outdir, filename)
shutil.move(tmpfile, filename)
# Check if sizes matches
if total_size != 'unknown' and total_size != bytes_so_far:
print(
'\n\nWARNING!! Downloaded file size mismatches... Probably corrupted...')
# Check md5 if it was in html header
if md5_header:
print('\nValidating MD5 checksum...')
if md5_header == md5sum(filename):
print('MD5 checksum passed!')
else:
print('MD5 checksum do NOT passed!!!')
return filename | 0.000541 |
def restart(self, container, instances=None, map_name=None, **kwargs):
"""
Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs) | 0.006116 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.