text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_siblings(self):
"""Get a list of sibling accounts associated with provided account."""
method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings'.format(
self.client.sauce_username)
return self.client.request(method, endpoint) | 0.007246 |
def rel_path(filename):
"""
Function that gets relative path to the filename
"""
return os.path.join(os.getcwd(), os.path.dirname(__file__), filename) | 0.006024 |
def build_diagonals(self):
"""
Builds the diagonals for the coefficient array
"""
##########################################################
# INCORPORATE BOUNDARY CONDITIONS INTO COEFFICIENT ARRAY #
##########################################################
# Roll to keep the proper coefficients at the proper places in the
# arrays: Python will naturally just do vertical shifts instead of
# diagonal shifts, so this takes into account the horizontal compoent
# to ensure that boundary values are at the right place.
self.l2 = np.roll(self.l2, -2)
self.l1 = np.roll(self.l1, -1)
self.r1 = np.roll(self.r1, 1)
self.r2 = np.roll(self.r2, 2)
# Then assemble these rows: this is where the periodic boundary condition
# can matter.
if self.coeff_matrix is not None:
pass
elif self.BC_E == 'Periodic' and self.BC_W == 'Periodic':
# In this case, the boundary-condition-related stacking has already
# happened inside b.c.-handling function. This is because periodic
# boundary conditions require extra diagonals to exist on the edges of
# the solution array
pass
else:
self.diags = np.vstack((self.l2,self.l1,self.c0,self.r1,self.r2))
self.offsets = np.array([-2,-1,0,1,2])
# Everybody now (including periodic b.c. cases)
self.coeff_matrix = spdiags(self.diags, self.offsets, self.nx, self.nx, format='csr') | 0.015572 |
def login(self):
"""
Gets and stores an OAUTH token from Rightscale.
"""
log.debug('Logging into RightScale...')
login_data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
}
response = self._request('post', self.oauth_path, data=login_data)
raw_token = response.json()
auth_token = "Bearer %s" % raw_token['access_token']
self.s.headers['Authorization'] = auth_token
# Generate an expiration time for our token of 60-seconds before the
# standard time returned by RightScale. This will be used in the
# self.client property to validate that our token is still usable on
# every API call.
log.debug('Auth Token expires in %s(s)' % raw_token['expires_in'])
self.auth_expires_at = time.time() + int(raw_token['expires_in']) - 60 | 0.002205 |
def _generate_next_token_helper(self, past_states, transitions):
""" generates next token based previous states """
key = tuple(past_states)
assert key in transitions, "%s" % str(key)
return utils.weighted_choice(transitions[key].items()) | 0.007407 |
def aStockQoutation(self,code):
'''
订阅一只股票的实时行情数据,接收推送
:param code: 股票代码
:return:
'''
#设置监听-->订阅-->调用接口
# 分时
self.quote_ctx.set_handler(RTDataTest())
self.quote_ctx.subscribe(code, SubType.RT_DATA)
ret_code_rt_data, ret_data_rt_data = self.quote_ctx.get_rt_data(code)
# 逐笔
self.quote_ctx.set_handler(TickerTest())
self.quote_ctx.subscribe(code, SubType.TICKER)
ret_code_rt_ticker, ret_data_rt_ticker = self.quote_ctx.get_rt_ticker(code)
# 报价
self.quote_ctx.set_handler(StockQuoteTest())
self.quote_ctx.subscribe(code, SubType.QUOTE)
ret_code_stock_quote, ret_data_stock_quote = self.quote_ctx.get_stock_quote([code])
# 实时K线
self.quote_ctx.set_handler(CurKlineTest())
kTypes = [SubType.K_1M, SubType.K_5M, SubType.K_15M, SubType.K_30M, SubType.K_60M, SubType.K_DAY,
SubType.K_WEEK, SubType.K_MON]
auTypes = [AuType.NONE, AuType.QFQ, AuType.HFQ]
num = 10
ret_code_cur_kline = RET_OK
for kType in kTypes:
self.quote_ctx.subscribe(code, kType)
for auType in auTypes:
ret_code_cur_kline_temp, ret_data_cur_kline = self.quote_ctx.get_cur_kline(code, num, kType, auType)
if ret_code_cur_kline_temp is RET_ERROR:
ret_code_cur_kline = RET_ERROR
# 摆盘
self.quote_ctx.set_handler(OrderBookTest())
self.quote_ctx.subscribe(code, SubType.ORDER_BOOK)
ret_code_order_book, ret_data_order_book = self.quote_ctx.get_order_book(code)
# 经纪队列
self.quote_ctx.set_handler(BrokerTest())
self.quote_ctx.subscribe(code, SubType.BROKER)
ret_code_broker_queue, bid_frame_table, ask_frame_table = self.quote_ctx.get_broker_queue(code)
return ret_code_rt_data+ret_code_rt_ticker+ret_code_stock_quote+ret_code_cur_kline+ret_code_order_book+ret_code_broker_queue | 0.005503 |
def strip_ccmp(self, idx):
"""strip(8 byte) wlan.ccmp.extiv
CCMP Extended Initialization Vector
:return: int
number of processed bytes
:return: ctypes.raw
ccmp vector
"""
ccmp_extiv = None
if len(self._packet[idx:]) >= 8:
raw_bytes = self._packet[idx:idx + 8]
ccmp_extiv, = struct.unpack_from('Q', raw_bytes, 0)
return 8, ccmp_extiv | 0.004505 |
def get_fields_class(self, class_name):
"""
Return all fields of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedField` objects
"""
l = []
for i in self.get_classes():
for j in i.get_fields():
if class_name == j.get_class_name():
l.append(j)
return l | 0.006881 |
def from_dict(cls, d):
"""Instantiate a SemI from a dictionary representation."""
read = lambda cls: (lambda pair: (pair[0], cls.from_dict(pair[1])))
return cls(
variables=map(read(Variable), d.get('variables', {}).items()),
properties=map(read(Property), d.get('properties', {}).items()),
roles=map(read(Role), d.get('roles', {}).items()),
predicates=map(read(Predicate), d.get('predicates', {}).items())
) | 0.00616 |
def encrypt(data, key):
'''encrypt the data with the key'''
data = __tobytes(data)
data_len = len(data)
data = ffi.from_buffer(data)
key = ffi.from_buffer(__tobytes(key))
out_len = ffi.new('size_t *')
result = lib.xxtea_encrypt(data, data_len, key, out_len)
ret = ffi.buffer(result, out_len[0])[:]
lib.free(result)
return ret | 0.00274 |
def translate_file(input_path, output_path):
'''
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
'''
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(
lib_name)
tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name
out = head + py_code + tail
write_file_contents(output_path, out) | 0.004469 |
def wait_for_link_text(self, link_text, timeout=settings.LARGE_TIMEOUT):
""" The shorter version of wait_for_link_text_visible() """
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_link_text_visible(link_text, timeout=timeout) | 0.005831 |
def _parse_btrfs_info(data):
'''
Parse BTRFS device info data.
'''
ret = {}
for line in [line for line in data.split("\n") if line][:-1]:
if line.startswith("Label:"):
line = re.sub(r"Label:\s+", "", line)
label, uuid_ = [tkn.strip() for tkn in line.split("uuid:")]
ret['label'] = label != 'none' and label or None
ret['uuid'] = uuid_
continue
if line.startswith("\tdevid"):
dev_data = re.split(r"\s+", line.strip())
dev_id = dev_data[-1]
ret[dev_id] = {
'device_id': dev_data[1],
'size': dev_data[3],
'used': dev_data[5],
}
return ret | 0.001357 |
def savemat(filename, data):
"""Save data to MAT-file:
savemat(filename, data)
The filename argument is either a string with the filename, or
a file like object.
The parameter ``data`` shall be a dict with the variables.
A ``ValueError`` exception is raised if data has invalid format, or if the
data structure cannot be mapped to a known MAT array type.
"""
if not isinstance(data, Mapping):
raise ValueError('Data should be a dict of variable arrays')
if isinstance(filename, basestring):
fd = open(filename, 'wb')
else:
fd = filename
write_file_header(fd)
# write variables
for name, array in data.items():
write_compressed_var_array(fd, array, name)
fd.close() | 0.001307 |
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {}) | 0.006006 |
async def handle_frame(self, frame):
"""Handle incoming API frame, return True if this was the expected frame."""
if not isinstance(frame, FrameGetVersionConfirmation):
return False
self.version = frame.version
self.success = True
return True | 0.010204 |
def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert leaky relu layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting lrelu ...')
if names == 'short':
tf_name = 'lRELU' + random_string(3)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
leakyrelu = \
keras.layers.LeakyReLU(alpha=params['alpha'], name=tf_name)
layers[scope_name] = leakyrelu(layers[inputs[0]]) | 0.001274 |
def config_name_from_full_name(full_name):
"""Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
"""
projects, _, configs, result = full_name.split("/")
if projects != "projects" or configs != "configs":
raise ValueError(
"Unexpected format of resource",
full_name,
'Expected "projects/{proj}/configs/{cfg}"',
)
return result | 0.00098 |
def _get_logger_file_handles(self):
"""
Find the file handles used by our logger's handlers.
"""
handles = []
for handler in self.logger.handlers:
# The following code works for logging's SysLogHandler,
# StreamHandler, SocketHandler, and their subclasses.
for attr in ['sock', 'socket', 'stream']:
try:
handle = getattr(handler, attr)
if handle:
handles.append(handle)
break
except AttributeError:
continue
return handles | 0.003115 |
def remove_phenotype(self, institute, case, user, link, phenotype_id,
is_group=False):
"""Remove an existing phenotype from a case
Args:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (dict): The url to be used in the event
phenotype_id (str): A phenotype id
Returns:
updated_case(dict)
"""
LOG.info("Removing HPO term from case {0}".format(case['display_name']))
if is_group:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$pull': {
'phenotype_terms': {'phenotype_id': phenotype_id},
'phenotype_groups': {'phenotype_id': phenotype_id},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
else:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$pull': {
'phenotype_terms': {'phenotype_id': phenotype_id},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.info("Creating event for removing phenotype term {0}" \
" from case {1}".format(phenotype_id, case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='remove_phenotype',
subject=case['display_name']
)
LOG.debug("Case updated")
return updated_case | 0.00385 |
def status(self):
"""Provides current status of processing episode.
Structure of status:
original_filename => formatted_filename, state, messages
:returns: mapping of current processing state
:rtype: dict
"""
return {
self.original: {
'formatted_filename': self.out_location,
'state': self.state,
'messages': '\n\t'.join(self.messages),
}
} | 0.004141 |
def search_next(self, obj):
"""
Takes the dictionary that is returned by 'search' or 'search_next' function and gets the next batch of results
Args:
obj: dictionary returned by the 'search' or 'search_next' function
Returns:
A dictionary with a data returned by the server
Raises:
HttpException with the error message from the server
"""
if 'meta' in obj and 'next' in obj['meta'] and obj['meta']['next'] != None:
uri = self.api_url % obj['meta']['next']
header, content = self._http_uri_request(uri)
resp = json.loads(content)
if not self._is_http_response_ok(header):
error = resp.get('error_message', 'Unknown Error')
raise HttpException(header.status, header.reason, error)
return resp
return {} | 0.007865 |
def sub_retab(match):
r"""Remove all tabs and convert them into spaces.
PARAMETERS:
match -- regex match; uses re_retab pattern: \1 is text before tab,
\2 is a consecutive string of tabs.
A simple substitution of 4 spaces would result in the following:
to\tlive # original
to live # simple substitution
Instead, we convert tabs like the following:
to\tlive # original
to live # the tab *looks* like two spaces, so we convert
# it to two spaces
"""
before = match.group(1)
tabs = len(match.group(2))
return before + (' ' * (TAB_SIZE * tabs - len(before) % TAB_SIZE)) | 0.00146 |
async def cancel_task(app: web.Application,
task: asyncio.Task,
*args, **kwargs
) -> Any:
"""
Convenience function for calling `TaskScheduler.cancel(task)`
This will use the default `TaskScheduler` to cancel the given task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def stop_after(app, task, duration):
await asyncio.sleep(duration)
await scheduler.cancel_task(app, task)
print('stopped!')
async def start(app):
# Start first task
task = await scheduler.create_task(app, current_time(interval=2))
# Start second task to stop the first
await scheduler.create_task(app, stop_after(app, task, duration=10))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).cancel(task, *args, **kwargs) | 0.001552 |
def list_namespaced_config_map(self, namespace, **kwargs):
"""
list or watch objects of kind ConfigMap
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_config_map(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ConfigMapList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_config_map_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_config_map_with_http_info(namespace, **kwargs)
return data | 0.002556 |
def add_child_resource_client(self, res_name, res_spec):
"""Add a resource client to the container and start the resource connection"""
res_spec = dict(res_spec)
res_spec['name'] = res_name
res = self.client_resource_factory(
res_spec, parent=self, logger=self._logger)
self.children[resource.escape_name(res_name)] = res;
self._children_dirty = True
res.set_ioloop(self.ioloop)
res.start()
return res | 0.00818 |
def get_2d_markers(
self, component_info=None, data=None, component_position=None, index=None
):
"""Get 2D markers.
:param index: Specify which camera to get 2D from, will be returned as
first entry in the returned array.
"""
return self._get_2d_markers(
data, component_info, component_position, index=index
) | 0.010076 |
def RestoreSnapshot(self,name=None):
"""Restores an existing Hypervisor level snapshot.
Supply snapshot name to restore
If no snapshot name is supplied will restore the first snapshot found
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').RestoreSnapshot().WaitUntilComplete()
0
"""
if not len(self.data['details']['snapshots']): raise(clc.CLCException("No snapshots exist"))
if name is None: name = self.GetSnapshots()[0]
name_links = [obj['links'] for obj in self.data['details']['snapshots'] if obj['name']==name][0]
return(clc.v2.Requests(clc.v2.API.Call('POST',
[obj['href'] for obj in name_links if obj['rel']=='restore'][0],
session=self.session),
alias=self.alias,
session=self.session)) | 0.038911 |
def get_meta_image_url(request, image):
"""
Resize an image for metadata tags, and return an absolute URL to it.
"""
rendition = image.get_rendition(filter='original')
return request.build_absolute_uri(rendition.url) | 0.004237 |
def filter(self, table, security_groups, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [security_group for security_group in security_groups
if query in security_group.name.lower()] | 0.007547 |
def get_wellseries(self, matrix):
"""
Returns the grid as a WellSeries of WellSeries
"""
res = OrderedDict()
for col, cells in matrix.items():
if col not in res:
res[col] = OrderedDict()
for row, cell in cells.items():
res[col][row] = self.children_by_name[
''.join(cell)
]
res[col] = WellSeries(res[col], name=col)
return WellSeries(res) | 0.004082 |
def clear(self):
"""Remove all items."""
self._fwdm.clear()
self._invm.clear()
self._sntl.nxt = self._sntl.prv = self._sntl | 0.012903 |
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename) | 0.000888 |
def download_member_project_data(cls, member_data, target_member_dir,
max_size=MAX_SIZE_DEFAULT,
id_filename=False):
"""
Download files to sync a local dir to match OH member project data.
:param member_data: This field is data related to member in a project.
:param target_member_dir: This field is the target directory where data
will be downloaded.
:param max_size: This field is the maximum file size. It's default
value is 128m.
"""
logging.debug('Download member project data...')
sources_shared = member_data['sources_shared']
file_data = cls._get_member_file_data(member_data,
id_filename=id_filename)
for basename in file_data:
# This is using a trick to identify a project's own data in an API
# response, without knowing the project's identifier: if the data
# isn't a shared data source, it must be the project's own data.
if file_data[basename]['source'] in sources_shared:
continue
target_filepath = os.path.join(target_member_dir, basename)
download_file(download_url=file_data[basename]['download_url'],
target_filepath=target_filepath,
max_bytes=parse_size(max_size)) | 0.002761 |
def sigmoid_cross_entropy_with_logits(logits, targets):
"""Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
x = logits
z = targets
return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x))) | 0.010435 |
def index(
self,
symbol='000001',
market='sh',
category='9',
start='0',
offset='100'):
'''
获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None
'''
market = 1 if market == 'sh' else 0
with self.client.connect(*self.bestip):
data = self.client.get_index_bars(
int(category), int(market), str(symbol), int(start), int(offset))
return self.client.to_df(data) | 0.003567 |
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None | 0.00489 |
def usePointsForInterpolation(self,cNrm,mNrm,interpolator):
'''
Make a basic solution object with a consumption function and marginal
value function (unconditional on the preference shock).
Parameters
----------
cNrm : np.array
Consumption points for interpolation.
mNrm : np.array
Corresponding market resource points for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
# Make the preference-shock specific consumption functions
PrefShkCount = self.PrefShkVals.size
cFunc_list = []
for j in range(PrefShkCount):
MPCmin_j = self.MPCminNow*self.PrefShkVals[j]**(1.0/self.CRRA)
cFunc_this_shock = LowerEnvelope(LinearInterp(mNrm[j,:],cNrm[j,:],
intercept_limit=self.hNrmNow*MPCmin_j,
slope_limit=MPCmin_j),self.cFuncNowCnst)
cFunc_list.append(cFunc_this_shock)
# Combine the list of consumption functions into a single interpolation
cFuncNow = LinearInterpOnInterp1D(cFunc_list,self.PrefShkVals)
# Make the ex ante marginal value function (before the preference shock)
m_grid = self.aXtraGrid + self.mNrmMinNow
vP_vec = np.zeros_like(m_grid)
for j in range(PrefShkCount): # numeric integration over the preference shock
vP_vec += self.uP(cFunc_list[j](m_grid))*self.PrefShkPrbs[j]*self.PrefShkVals[j]
vPnvrs_vec = self.uPinv(vP_vec)
vPfuncNow = MargValueFunc(LinearInterp(m_grid,vPnvrs_vec),self.CRRA)
# Store the results in a solution object and return it
solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow)
return solution_now | 0.010808 |
def _pipeline_cell(args, cell_body):
"""Implements the pipeline subcommand in the %%bq magic.
Args:
args: the arguments following '%%bq pipeline'.
cell_body: Cell contents.
"""
name = args.get('name')
if name is None:
raise Exception('Pipeline name was not specified.')
import google.datalab.utils as utils
bq_pipeline_config = utils.commands.parse_config(
cell_body, utils.commands.notebook_environment())
try:
airflow_spec = \
google.datalab.contrib.bigquery.commands.get_airflow_spec_from_config(name,
bq_pipeline_config)
except AttributeError:
return "Perhaps you're missing: import google.datalab.contrib.bigquery.commands"
# If a gcs_dag_bucket is specified, we deploy to it so that the Airflow VM rsyncs it.
error_message = ''
gcs_dag_bucket = args.get('gcs_dag_bucket')
gcs_dag_file_path = args.get('gcs_dag_file_path')
if gcs_dag_bucket:
try:
airflow = google.datalab.contrib.pipeline.airflow.Airflow(gcs_dag_bucket, gcs_dag_file_path)
airflow.deploy(name, airflow_spec)
error_message += ("Airflow pipeline successfully deployed! View dashboard for more "
"details.\n")
except AttributeError:
return "Perhaps you're missing: import google.datalab.contrib.pipeline.airflow"
location = args.get('location')
environment = args.get('environment')
if location and environment:
try:
composer = google.datalab.contrib.pipeline.composer.Composer(location, environment)
composer.deploy(name, airflow_spec)
error_message += ("Composer pipeline successfully deployed! View dashboard for more "
"details.\n")
except AttributeError:
return "Perhaps you're missing: import google.datalab.contrib.pipeline.composer"
if args.get('debug'):
error_message += '\n\n' + airflow_spec
return error_message | 0.008806 |
def addFactory(self, identifier, factory):
"""Adds a factory.
After calling this method, remote clients will be able to
connect to it.
This will call ``factory.doStart``.
"""
factory.doStart()
self._factories[identifier] = factory | 0.00692 |
def closed(self):
""" True if ticket was closed in given time frame """
for who, what, old, new in self.history():
if what == "status" and new == "closed":
return True
return False | 0.008621 |
def plugin(name):
"""Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory
"""
env.host_string = lib.get_env_host_string()
plug = lib.import_plugin(name)
lib.print_header("Executing plugin '{0}' on "
"{1}".format(name, env.host_string))
node = lib.get_node(env.host_string)
if node == {'run_list': []}:
node['name'] = env.host_string
plug.execute(node)
print("Finished executing plugin") | 0.001992 |
def caom2(mpc_filename, search_date="2014 07 24.0"):
"""
builds a TSV file in the format of SSOIS by querying for possilbe observations in CADC/CAOM2.
This is a fall back program, should only be useful when SSOIS is behind.
"""
columns = ('Image',
'Ext',
'X',
'Y',
'MJD',
'Filter',
'Exptime',
'Object_RA',
'Object_Dec',
'Image_target',
'Telescope/Instrument',
'MetaData',
'Datalink')
ephem_table = Table(names=columns,
dtypes=('S10', 'i4', 'f8', 'f8',
'f8', 'S10', 'f8', 'f8', 'f8', 'S20', 'S20', 'S20', 'S50'))
ephem_table.pprint()
o = orbfit.Orbfit(mpc.MPCReader(mpc_filename).mpc_observations)
o.predict(search_date)
fields = storage.cone_search(o.coordinate.ra.degrees, o.coordinate.dec.degrees, dra=0.3, ddec=0.3,
calibration_level=1)
mjdates = numpy.unique(fields['mjdate'])
collectionIDs = []
for mjdate in mjdates:
jd = 2400000.5 + mjdate
o.predict(jd)
for field in storage.cone_search(o.coordinate.ra.degrees, o.coordinate.dec.degrees,
dra=30./3600.0, ddec=30./3600.0,
mjdate=mjdate, calibration_level=1):
collectionIDs.append(field['collectionID'])
expnums = numpy.unique(numpy.array(collectionIDs))
for expnum in expnums:
header = storage.get_astheader(expnum, 22)
o.predict(header['MJDATE']+2400000.5)
print o.time.iso, o.coordinate.ra.degrees, o.coordinate.dec.degrees
for ccd in range(36):
header = storage.get_astheader(expnum, ccd)
w = wcs.WCS(header)
(x, y) = w.sky2xy(o.coordinate.ra.degrees, o.coordinate.dec.degrees)
print ccd, x, y
if 0 < x < header['NAXIS1'] and 0 < y < header['NAXIS2']:
ephem_table.add_row([expnum, ccd+1, x, y,
header['MJDATE'], header['FILTER'], header['EXPTIME'],
o.coordinate.ra.degrees, o.coordinate.dec.degrees,
header['OBJECT'],
'CFHT/MegaCam',
None,
"http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/CFHT/{}p[{}]".format(expnum, ccd)])
break
ephem_table.pprint()
ephem_table.write('backdoor.tsv', format='ascii', delimiter='\t') | 0.004071 |
def main(logfile=False):
""" Solve River Pollution problem with NAUTILUS V1 and E-NAUTILUS Methods
"""
# Duplicate output to log file
class NAUTILUSOptionValidator(Validator):
def validate(self, document):
if document.text not in "ao":
raise ValidationError(
message="Please select a for apriori or o for optimization option",
cursor_position=0,
)
if logfile:
Tee(logfile)
first = True
current_iter = 0
while first or current_iter:
# SciPy breaks box constraints
nautilus_v1 = NAUTILUSv1(RiverPollution(), SciPyDE)
if not first:
nautilus_v1.current_iter = current_iter
first = False
nadir = nautilus_v1.problem.nadir
ideal = nautilus_v1.problem.ideal
solution = tui.iter_nautilus(nautilus_v1)
current_iter = nautilus_v1.current_iter
# TODO: Move to tui module
method_e = None
if current_iter > 0:
option = _prompt_wrapper(
"select a for apriori or o for optimization option: ",
default="o",
validator=NAUTILUSOptionValidator(),
)
if option.lower() == "a":
wi = _prompt_wrapper(
"Number of PO solutions (10 or 20): ",
default="20",
validator=tui.NumberValidator(),
)
weights = WEIGHTS[wi]
factory = IterationPointFactory(
SciPyDE(NautilusAchievementProblem(RiverPollution()))
)
points = misc.new_points(factory, solution, weights=weights)
method_e = ENAUTILUS(PreGeneratedProblem(points=points), PointSearch)
method_e.zh_prev = solution
else:
method_e = ENAUTILUS(RiverPollution(), SciPyDE)
# method_e.zh = solution
method_e.current_iter = nautilus_v1.current_iter
method_e.user_iters = nautilus_v1.user_iters
print(
"E-NAUTILUS\nselected iteration point: %s:"
% ",".join(map(str, solution))
)
while method_e and method_e.current_iter > 0:
if solution is None:
solution = method_e.problem.nadir
method_e.problem.nadir = nadir
method_e.problem.ideal = ideal
cmd = tui.iter_enautilus(
method_e, initial_iterpoint=solution, initial_bound=method_e.fh_lo
)
if cmd:
print(method_e.current_iter)
current_iter = method_e.current_iter
break
if tui.HAS_INPUT:
input("Press ENTER to exit") | 0.001401 |
def bitmask(*args):
"""! @brief Returns a mask with specified bit ranges set.
An integer mask is generated based on the bits and bit ranges specified by the
arguments. Any number of arguments can be provided. Each argument may be either
a 2-tuple of integers, a list of integers, or an individual integer. The result
is the combination of masks produced by the arguments.
- 2-tuple: The tuple is a bit range with the first element being the MSB and the
second element the LSB. All bits from LSB up to and included MSB are set.
- list: Each bit position specified by the list elements is set.
- int: The specified bit position is set.
@return An integer mask value computed from the logical OR'ing of masks generated
by each argument.
Example:
@code
>>> hex(bitmask((23,17),1))
0xfe0002
>>> hex(bitmask([4,0,2],(31,24))
0xff000015
@endcode
"""
mask = 0
for a in args:
if type(a) is tuple:
for b in range(a[1], a[0]+1):
mask |= 1 << b
elif type(a) is list:
for b in a:
mask |= 1 << b
elif type(a) is int:
mask |= 1 << a
return mask | 0.008821 |
def describe(self, chunk_summary=False):
"""
Generate an in-depth description of this H2OFrame.
This will print to the console the dimensions of the frame; names/types/summary statistics for each column;
and finally first ten rows of the frame.
:param bool chunk_summary: Retrieve the chunk summary along with the distribution summary
"""
if self._has_content():
res = h2o.api("GET /3/Frames/%s" % self.frame_id, data={"row_count": 10})["frames"][0]
self._ex._cache._fill_data(res)
print("Rows:{}".format(self.nrow))
print("Cols:{}".format(self.ncol))
#The chunk & distribution summaries are not cached, so must be pulled if chunk_summary=True.
if chunk_summary:
res["chunk_summary"].show()
res["distribution_summary"].show()
print("\n")
self.summary() | 0.007479 |
def add_permissions(self, group_name, resource, permissions, url_prefix, auth, session, send_opts):
"""
Args:
group_name (string): Name of group.
resource (intern.resource.boss.BossResource): Identifies which data model object to operate on.
permissions (list): List of permissions to add to the given resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
"""
post_data = {"group": group_name,
"permissions": permissions,
}
post_data.update(resource.get_dict_route())
req = self.get_permission_request('POST', 'application/json',
url_prefix, auth, post_data=post_data)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code != 201:
msg = ('Failed adding permissions to group {}, got HTTP response: ({}) - {}'.format(group_name,
resp.status_code,
resp.text))
raise HTTPError(msg, request=req, response=resp) | 0.006614 |
def _encode_codepage(codepage, text):
"""
Args:
codepage (int)
text (text)
Returns:
`bytes`
Encode text using the given code page. Will not fail if a char
can't be encoded using that codepage.
"""
assert isinstance(text, text_type)
if not text:
return b""
size = (len(text.encode("utf-16-le", _surrogatepass)) //
ctypes.sizeof(winapi.WCHAR))
# get the required buffer size
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, None, 0, None, None)
if length == 0:
raise ctypes.WinError()
# decode to the buffer
buf = ctypes.create_string_buffer(length)
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, buf, length, None, None)
if length == 0:
raise ctypes.WinError()
return buf[:length] | 0.001167 |
def encode(data):
"""
Encodes data using PackBits encoding.
"""
if len(data) == 0:
return data
if len(data) == 1:
return b'\x00' + data
data = bytearray(data)
result = bytearray()
buf = bytearray()
pos = 0
repeat_count = 0
MAX_LENGTH = 127
# we can safely start with RAW as empty RAW sequences
# are handled by finish_raw()
state = 'RAW'
def finish_raw():
if len(buf) == 0:
return
result.append(len(buf)-1)
result.extend(buf)
buf[:] = bytearray()
def finish_rle():
result.append(256-(repeat_count - 1))
result.append(data[pos])
while pos < len(data)-1:
current_byte = data[pos]
if data[pos] == data[pos+1]:
if state == 'RAW':
# end of RAW data
finish_raw()
state = 'RLE'
repeat_count = 1
elif state == 'RLE':
if repeat_count == MAX_LENGTH:
# restart the encoding
finish_rle()
repeat_count = 0
# move to next byte
repeat_count += 1
else:
if state == 'RLE':
repeat_count += 1
finish_rle()
state = 'RAW'
repeat_count = 0
elif state == 'RAW':
if len(buf) == MAX_LENGTH:
# restart the encoding
finish_raw()
buf.append(current_byte)
pos += 1
if state == 'RAW':
buf.append(data[pos])
finish_raw()
else:
repeat_count += 1
finish_rle()
return bytes(result) | 0.000576 |
def parse(cls, fptr, offset, length):
"""Parse JPEG 2000 header box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
JP2HeaderBox
Instance of the current JP2 header box.
"""
box = cls(length=length, offset=offset)
# The JP2 header box is a superbox, so go ahead and parse its child
# boxes.
box.box = box.parse_superbox(fptr)
return box | 0.003257 |
def sortino_ratio(self, threshold=0.0, ddof=0, freq=None):
"""Return over a threshold per unit of downside deviation.
A performance appraisal ratio that replaces standard deviation
in the Sharpe ratio with downside deviation.
[Source: CFA Institute]
Parameters
----------
threshold : {float, TSeries, pd.Series}, default 0.
While zero is the default, it is also customary to use
a "minimum acceptable return" (MAR) or a risk-free rate.
Note: this is assumed to be a *periodic*, not necessarily
annualized, return.
ddof : int, default 0
Degrees of freedom, passed to pd.Series.std().
freq : str or None, default None
A frequency string used to create an annualization factor.
If None, `self.freq` will be used. If that is also None,
a frequency will be inferred. If none can be inferred,
an exception is raised.
It may be any frequency string or anchored offset string
recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or
'BQS-APR'.
Returns
-------
float
"""
stdev = self.semi_stdev(threshold=threshold, ddof=ddof, freq=freq)
return (self.anlzd_ret() - threshold) / stdev | 0.001486 |
def update_session(self, alias, headers=None, cookies=None):
"""Update Session Headers: update a HTTP Session Headers
``alias`` Robot Framework alias to identify the session
``headers`` Dictionary of headers merge into session
"""
session = self._cache.switch(alias)
session.headers = merge_setting(headers, session.headers)
session.cookies = merge_cookies(session.cookies, cookies) | 0.004545 |
def _evaluate_trigger_rule(
self,
ti,
successes,
skipped,
failed,
upstream_failed,
done,
flag_upstream_failed,
session):
"""
Yields a dependency status that indicate whether the given task instance's trigger
rule was met.
:param ti: the task instance to evaluate the trigger rule of
:type ti: airflow.models.TaskInstance
:param successes: Number of successful upstream tasks
:type successes: bool
:param skipped: Number of skipped upstream tasks
:type skipped: bool
:param failed: Number of failed upstream tasks
:type failed: bool
:param upstream_failed: Number of upstream_failed upstream tasks
:type upstream_failed: bool
:param done: Number of completed upstream tasks
:type done: bool
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:type flag_upstream_failed: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
TR = airflow.utils.trigger_rule.TriggerRule
task = ti.task
upstream = len(task.upstream_task_ids)
tr = task.trigger_rule
upstream_done = done >= upstream
upstream_tasks_state = {
"total": upstream, "successes": successes, "skipped": skipped,
"failed": failed, "upstream_failed": upstream_failed, "done": done
}
# TODO(aoen): Ideally each individual trigger rules would be its own class, but
# this isn't very feasible at the moment since the database queries need to be
# bundled together for efficiency.
# handling instant state assignment based on trigger rules
if flag_upstream_failed:
if tr == TR.ALL_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ALL_FAILED:
if successes or skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_SUCCESS:
if upstream_done and not successes:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_FAILED:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped == upstream:
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_SKIPPED:
if skipped:
ti.set_state(State.SKIPPED, session)
if tr == TR.ONE_SUCCESS:
if successes <= 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task success, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task failure, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ALL_SUCCESS:
num_failures = upstream - successes
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_FAILED:
num_successes = upstream - failed - upstream_failed
if num_successes > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have failed, but found {1} non-failure(s). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_successes, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have completed, but found {1} task(s) that "
"weren't done. upstream_tasks_state={2}, "
"upstream_task_ids={3}"
.format(tr, upstream_done, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_FAILED:
num_failures = upstream - successes - skipped
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded or been skipped, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_SKIPPED:
if skipped > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to not have been skipped, but found {1} task(s) skipped. "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, skipped, upstream_tasks_state,
task.upstream_task_ids))
else:
yield self._failing_status(
reason="No strategy to evaluate trigger rule '{0}'.".format(tr)) | 0.001225 |
def get_end(pos, alt, category, snvend=None, svend=None, svlen=None):
"""Return the end coordinate for a variant
Args:
pos(int)
alt(str)
category(str)
snvend(str)
svend(int)
svlen(int)
Returns:
end(int)
"""
# If nothing is known we set end to be same as start
end = pos
# If variant is snv or indel we know that cyvcf2 can handle end pos
if category in ('snv', 'indel', 'cancer'):
end = snvend
# With SVs we have to be a bit more careful
elif category == 'sv':
# The END field from INFO usually works fine
end = svend
# For some cases like insertions the callers set end to same as pos
# In those cases we can hope that there is a svlen...
if svend == pos:
if svlen:
end = pos + svlen
# If variant is 'BND' they have ':' in alt field
# Information about other end is in the alt field
if ':' in alt:
match = BND_ALT_PATTERN.match(alt)
if match:
end = int(match.group(2))
return end | 0.000889 |
def env():
"""Verify NVME variables and construct exported variables"""
if cij.ssh.env():
cij.err("cij.nvme.env: invalid SSH environment")
return 1
nvme = cij.env_to_dict(PREFIX, REQUIRED)
nvme["DEV_PATH"] = os.path.join("/dev", nvme["DEV_NAME"])
# get version, chunks, luns and chs
try:
sysfs = os.path.join("/sys/class/block", nvme["DEV_NAME"], "lightnvm")
nvme["LNVM_VERSION"] = cat_file(os.path.join(sysfs, "version"))
if nvme["LNVM_VERSION"] == "2.0":
luns = "punits"
chs = "groups"
elif nvme["LNVM_VERSION"] == "1.2":
luns = "num_luns"
chs = "num_channels"
else:
raise RuntimeError("cij.nvme.env: invalid lnvm version: %s" % nvme["LNVM_VERSION"])
nvme["LNVM_NUM_CHUNKS"] = cat_file(os.path.join(sysfs, "chunks"))
nvme["LNVM_NUM_LUNS"] = cat_file(os.path.join(sysfs, luns))
nvme["LNVM_NUM_CHS"] = cat_file(os.path.join(sysfs, chs))
nvme["LNVM_TOTAL_LUNS"] = str(int(nvme["LNVM_NUM_LUNS"]) * int(nvme["LNVM_NUM_CHS"]))
nvme["LNVM_TOTAL_CHUNKS"] = str(int(nvme["LNVM_TOTAL_LUNS"]) * int(nvme["LNVM_NUM_CHUNKS"]))
# get spec version by identify namespace data struct
if nvme["LNVM_VERSION"] == "2.0":
cmd = ["nvme", "id-ctrl", nvme["DEV_PATH"], "--raw-binary"]
status, stdout, _ = cij.ssh.command(cmd, shell=True)
if status:
raise RuntimeError("cij.nvme.env: nvme id-ctrl fail")
buff = cij.bin.Buffer(types=IdentifyCDS, length=1)
buff.memcopy(stdout)
if buff[0].VS[1023] == 0x5a:
nvme["SPEC_VERSION"] = "Denali"
else:
nvme["SPEC_VERSION"] = "Spec20"
else:
nvme["SPEC_VERSION"] = "Spec12"
# get chunk meta information
nvme["LNVM_CHUNK_META_LENGTH"] = str(get_sizeof_descriptor_table(nvme["SPEC_VERSION"]))
nvme["LNVM_CHUNK_META_SIZE"] = str(int(nvme["LNVM_CHUNK_META_LENGTH"]) *
int(nvme["LNVM_TOTAL_CHUNKS"]))
except StandardError:
traceback.print_exc()
return 1
cij.env_export(PREFIX, EXPORTED, nvme)
return 0 | 0.002644 |
def speziale_debyetemp(v, v0, gamma0, q0, q1, theta0):
"""
calculate Debye temperature for the Speziale equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q0: logarithmic derivative of Gruneisen parameter
:param q1: logarithmic derivative of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K
"""
if isuncertainties([v, v0, gamma0, q0, q1, theta0]):
f_vu = np.vectorize(uct.wrap(integrate_gamma),
excluded=[1, 2, 3, 4, 5, 6])
integ = f_vu(v, v0, gamma0, q0, q1, theta0)
theta = unp.exp(unp.log(theta0) - integ)
else:
f_v = np.vectorize(integrate_gamma, excluded=[1, 2, 3, 4, 5, 6])
integ = f_v(v, v0, gamma0, q0, q1, theta0)
theta = np.exp(np.log(theta0) - integ)
return theta | 0.001064 |
def set_selected_radio_button(self):
"""Set selected radio button to 'Do not report'."""
dont_use_button = self.default_input_button_group.button(
len(self._parameter.default_values) - 2)
dont_use_button.setChecked(True) | 0.007813 |
def geometry_linestring(lat, lon, elev):
"""
GeoJSON Linestring. Latitude and Longitude have 2 values each.
:param list lat: Latitude values
:param list lon: Longitude values
:return dict:
"""
logger_excel.info("enter geometry_linestring")
d = OrderedDict()
coordinates = []
temp = ["", ""]
# Point type, Matching pairs.
if lat[0] == lat[1] and lon[0] == lon[1]:
logger_excel.info("matching geo coordinate")
lat.pop()
lon.pop()
d = geometry_point(lat, lon, elev)
else:
# Creates coordinates list
logger_excel.info("unique geo coordinates")
for i in lon:
temp[0] = i
for j in lat:
temp[1] = j
coordinates.append(copy.copy(temp))
if elev:
for i in coordinates:
i.append(elev)
# Create geometry block
d['type'] = 'Linestring'
d['coordinates'] = coordinates
logger_excel.info("exit geometry_linestring")
return d | 0.000959 |
def remove_address(self, fqdn, address):
" Remove an address of a domain."
# Get a list of addresses.
for record in self.list_address(fqdn):
if record.address == address:
record.delete()
break | 0.007663 |
def find_cross_contamination(databases, pair, tmpdir='tmp', log='log.txt', threads=1):
"""
Usese mash to find out whether or not a sample has more than one genus present, indicating cross-contamination.
:param databases: A databases folder, which must contain refseq.msh, a mash sketch that has one representative
per genus from refseq.
:param tmpdir: Temporary directory to store mash result files in.
:param pair: Array with path to forward reads at index 0 and path to reverse reads at index o
:param log: Logfile to write to.
:param threads: Number of threads to run mash wit.
:return: cross_contam: a bool that is True if more than one genus is found, and False otherwise.
:return: genera_present: A string. If only one genus is found, string is just genus. If more than one genus is found,
the string is a list of genera present, separated by colons (i.e. for Escherichia and Salmonella found, string would
be 'Escherichia:Salmonella'. If no genus found, return 'NA'
"""
genera_present = list()
out, err, cmd = mash.screen('{}/refseq.msh'.format(databases), pair[0],
pair[1], threads=threads, w='', i='0.95',
output_file=os.path.join(tmpdir, 'screen.tab'), returncmd=True)
write_to_logfile(log, out, err, cmd)
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
for item in screen_output:
mash_genus = item.query_id.split('/')[-3]
if mash_genus == 'Shigella':
mash_genus = 'Escherichia'
if mash_genus not in genera_present:
genera_present.append(mash_genus)
if len(genera_present) == 1:
genera_present = genera_present[0]
elif len(genera_present) == 0:
genera_present = 'NA'
else:
tmpstr = ''
for mash_genus in genera_present:
tmpstr += mash_genus + ':'
genera_present = tmpstr[:-1]
return genera_present | 0.004536 |
def get_enabled():
'''
Return the enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
ret = set()
for name in _iter_service_names():
if _service_is_upstart(name):
if _upstart_is_enabled(name):
ret.add(name)
else:
if _service_is_sysv(name):
if _sysv_is_enabled(name):
ret.add(name)
return sorted(ret) | 0.002146 |
async def probe_node_type_major(self, client):
"""Determine if import source node is a CN or MN and which major version API to
use."""
try:
node_pyxb = await self.get_node_doc(client)
except d1_common.types.exceptions.DataONEException as e:
raise django.core.management.base.CommandError(
"Could not find a functional CN or MN at the provided BaseURL. "
'base_url="{}" error="{}"'.format(
self.options["baseurl"], e.friendly_format()
)
)
is_cn = d1_common.type_conversions.pyxb_get_type_name(node_pyxb) == "NodeList"
if is_cn:
self.assert_is_known_node_id(
node_pyxb, django.conf.settings.NODE_IDENTIFIER
)
self._logger.info(
"Importing from CN: {}. filtered on MN: {}".format(
d1_common.xml.get_req_val(
self.find_node(node_pyxb, self.options["baseurl"]).identifier
),
django.conf.settings.NODE_IDENTIFIER,
)
)
return "cn", "v2"
else:
self._logger.info(
"Importing from MN: {}".format(
d1_common.xml.get_req_val(node_pyxb.identifier)
)
)
return "mn", self.find_node_api_version(node_pyxb) | 0.004193 |
def _print_options_help(self):
"""Print a help screen.
Assumes that self._help_request is an instance of OptionsHelp.
Note: Ony useful if called after options have been registered.
"""
show_all_help = self._help_request.all_scopes
if show_all_help:
help_scopes = list(self._options.known_scope_to_info.keys())
else:
# The scopes explicitly mentioned by the user on the cmd line.
help_scopes = set(self._options.scope_to_flags.keys()) - {GLOBAL_SCOPE}
scope_infos = list(ScopeInfoIterator(self._options.known_scope_to_info).iterate(help_scopes))
if scope_infos:
for scope_info in scope_infos:
help_str = self._format_help(scope_info)
if help_str:
print(help_str)
return
else:
print(pants_release())
print('\nUsage:')
print(' ./pants [option ...] [goal ...] [target...] Attempt the specified goals.')
print(' ./pants help Get help.')
print(' ./pants help [goal] Get help for a goal.')
print(' ./pants help-advanced [goal] Get help for a goal\'s advanced options.')
print(' ./pants help-all Get help for all goals.')
print(' ./pants goals List all installed goals.')
print('')
print(' [target] accepts two special forms:')
print(' dir: to include all targets in the specified directory.')
print(' dir:: to include all targets found recursively under the directory.')
print('\nFriendly docs:\n http://pantsbuild.org/')
print(self._format_help(ScopeInfo(GLOBAL_SCOPE, ScopeInfo.GLOBAL))) | 0.016336 |
def __scale_image(image, scale: float):
"""
Scales the image to a given scale.
:param image:
:param scale:
:return:
"""
height, width, _ = image.shape
width_scaled = int(np.ceil(width * scale))
height_scaled = int(np.ceil(height * scale))
im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA)
# Normalize the image's pixels
im_data_normalized = (im_data - 127.5) * 0.0078125
return im_data_normalized | 0.005525 |
def get_provides(self, ignored=tuple(), private=False):
"""
The provided API, including the class itself, its fields, and its
methods.
"""
if private:
if self._provides_private is None:
self._provides_private = set(self._get_provides(True))
provides = self._provides_private
else:
if self._provides is None:
self._provides = set(self._get_provides(False))
provides = self._provides
return [prov for prov in provides if not fnmatches(prov, *ignored)] | 0.003407 |
def get_attr_desc(instance, attribute, action):
"""
Fetch the appropriate descriptor for the attribute.
:param instance: Model instance
:param attribute: Name of the attribute
:param action: AttributeAction
"""
descs = instance.__jsonapi_attribute_descriptors__.get(attribute, {})
if action == AttributeActions.GET:
check_permission(instance, attribute, Permissions.VIEW)
return descs.get(action, lambda x: getattr(x, attribute))
check_permission(instance, attribute, Permissions.EDIT)
return descs.get(action, lambda x, v: setattr(x, attribute, v)) | 0.00165 |
def getSignature(self, signatureKey, serialized):
"""
:type signatureKey: ECPrivateKey
:type serialized: bytearray
"""
try:
return Curve.calculateSignature(signatureKey, serialized)
except InvalidKeyException as e:
raise AssertionError(e) | 0.006452 |
def p_inline_fragment1(self, p):
"""
inline_fragment : SPREAD ON type_condition directives selection_set
"""
p[0] = InlineFragment(type_condition=p[3], selections=p[5],
directives=p[4]) | 0.008097 |
def _handle_final_metric_data(self, data):
"""Call tuner to process final results
"""
id_ = data['parameter_id']
value = data['value']
if id_ in _customized_parameter_ids:
self.tuner.receive_customized_trial_result(id_, _trial_params[id_], value)
else:
self.tuner.receive_trial_result(id_, _trial_params[id_], value) | 0.007732 |
def create_deployment(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
create a deployment with the specified name
"""
headers = token_manager.get_access_token_headers()
payload = {
'name': deployment_name,
'isAdmin': True
}
deployment_url = environment.get_deployment_url(app_url=app_url)
response = requests.post('%s/api/v1/deployments' % deployment_url,
data=json.dumps(payload),
headers=headers)
if response.status_code == 201:
return response.json()
else:
raise JutException('Error %s: %s' % (response.status_code, response.text)) | 0.002729 |
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
"DW", [filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding="SAME") | 0.003953 |
def version_from_xml_filename(filename):
"extract the numeric version from the xml filename"
try:
filename_parts = filename.split(os.sep)[-1].split('-')
except AttributeError:
return None
if len(filename_parts) == 3:
try:
return int(filename_parts[-1].lstrip('v').rstrip('.xml'))
except ValueError:
return None
else:
return None | 0.002427 |
def get_wake_on_network():
'''
Displays whether 'wake on network' is on or off if supported
:return: A string value representing the "wake on network" settings
:rtype: string
CLI Example:
.. code-block:: bash
salt '*' power.get_wake_on_network
'''
ret = salt.utils.mac_utils.execute_return_result(
'systemsetup -getwakeonnetworkaccess')
return salt.utils.mac_utils.validate_enabled(
salt.utils.mac_utils.parse_return(ret)) == 'on' | 0.002024 |
def set_log_file(self, logfile):
"""
Set the log file full path including directory path basename and extension.
:Parameters:
#. logFile (string): the full log file path including basename and
extension. If this is given, all of logFileBasename and logFileExtension
will be discarded. logfile is equivalent to logFileBasename.logFileExtension
"""
assert isinstance(logfile, basestring), "logfile must be a string"
basename, extension = os.path.splitext(logfile)
self.__set_log_file_basename(logFileBasename=basename)
self.set_log_file_extension(logFileExtension=extension) | 0.007396 |
def _parse_ip_addr_show(raw_result):
"""
Parse the 'ip addr list dev' command raw output.
:param str raw_result: os raw result string.
:rtype: dict
:return: The parsed result of the show interface command in a \
dictionary of the form:
::
{
'os_index' : '0',
'dev' : 'eth0',
'falgs_str': 'BROADCAST,MULTICAST,UP,LOWER_UP',
'mtu': 1500,
'state': 'down',
'link_type' 'ether',
'mac_address': '00:50:56:01:2e:f6',
'inet': '20.1.1.2',
'inet_mask': '24',
'inet6': 'fe80::42:acff:fe11:2',
'inte6_mask': '64'
}
"""
# does link exist?
show_re = (
r'"(?P<dev>\S+)"\s+does not exist'
)
re_result = search(show_re, raw_result)
result = None
if not (re_result):
# match top two lines for serveral 'always there' variables
show_re = (
r'\s*(?P<os_index>\d+):\s+(?P<dev>\S+):\s+<(?P<falgs_str>.*)?>.*?'
r'mtu\s+(?P<mtu>\d+).+?state\s+(?P<state>\w+).*'
r'\s*link/(?P<link_type>\w+)\s+(?P<mac_address>\S+)'
)
re_result = search(show_re, raw_result, DOTALL)
result = re_result.groupdict()
# seek inet if its there
show_re = (
r'((inet )\s*(?P<inet>[^/]+)/(?P<inet_mask>\d{1,2}))'
)
re_result = search(show_re, raw_result)
if (re_result):
result.update(re_result.groupdict())
# seek inet6 if its there
show_re = (
r'((?<=inet6 )(?P<inet6>[^/]+)/(?P<inet6_mask>\d{1,2}))'
)
re_result = search(show_re, raw_result)
if (re_result):
result.update(re_result.groupdict())
# cleanup dictionary before returning
for key, value in result.items():
if value is not None:
if value.isdigit():
result[key] = int(value)
return result | 0.000497 |
def all_files(models=[]):
r'''
Return a list of full path of files matching 'models', sorted in human
numerical order (i.e., 0 1 2 ..., 10 11 12, ..., 100, ..., 1000).
Files are supposed to be named identically except one variable component
e.g. the list,
test.weights.e5.lstm1200.ldc93s1.pb
test.weights.e5.lstm1000.ldc93s1.pb
test.weights.e5.lstm800.ldc93s1.pb
gets sorted:
test.weights.e5.lstm800.ldc93s1.pb
test.weights.e5.lstm1000.ldc93s1.pb
test.weights.e5.lstm1200.ldc93s1.pb
'''
def nsort(a, b):
fa = os.path.basename(a).split('.')
fb = os.path.basename(b).split('.')
elements_to_remove = []
assert len(fa) == len(fb)
for i in range(0, len(fa)):
if fa[i] == fb[i]:
elements_to_remove.append(fa[i])
for e in elements_to_remove:
fa.remove(e)
fb.remove(e)
assert len(fa) == len(fb)
assert len(fa) == 1
fa = keep_only_digits(fa[0])
fb = keep_only_digits(fb[0])
if fa < fb:
return -1
if fa == fb:
return 0
if fa > fb:
return 1
base = list(map(lambda x: os.path.abspath(x), maybe_inspect_zip(models)))
base.sort(cmp=nsort)
return base | 0.000759 |
def read(self, job_id):
"""
Reads the information for a specific Batch API request
:param job_id: The id of the job to be read from
:type job_id: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
data = self.send(self.api_url + job_id, http_method="GET")
return data | 0.004505 |
def get_playback_callback(resampler, samplerate, params):
"""Return a sound playback callback.
Parameters
----------
resampler
The resampler from which samples are read.
samplerate : float
The sample rate.
params : dict
Parameters for FM generation.
"""
def callback(outdata, frames, time, _):
"""Playback callback.
Read samples from the resampler and modulate them onto a carrier
frequency.
"""
last_fmphase = getattr(callback, 'last_fmphase', 0)
df = params['fm_gain'] * resampler.read(frames)
df = np.pad(df, (0, frames - len(df)), mode='constant')
t = time.outputBufferDacTime + np.arange(frames) / samplerate
phase = 2 * np.pi * params['carrier_frequency'] * t
fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate
outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase)
callback.last_fmphase = fmphase[-1]
return callback | 0.000993 |
def assert_lock(fname):
"""
If file is locked then terminate program else lock file.
"""
if not set_lock(fname):
logger.error('File {} is already locked. Terminating.'.format(fname))
sys.exit() | 0.004425 |
def draw(self, mode='triangles', indices=None, check_error=True):
""" Draw the attribute arrays in the specified mode.
Parameters
----------
mode : str | GL_ENUM
'points', 'lines', 'line_strip', 'line_loop', 'triangles',
'triangle_strip', or 'triangle_fan'.
indices : array
Array of indices to draw.
check_error:
Check error after draw.
"""
# Invalidate buffer (data has already been sent)
self._buffer = None
# Check if mode is valid
mode = check_enum(mode)
if mode not in ['points', 'lines', 'line_strip', 'line_loop',
'triangles', 'triangle_strip', 'triangle_fan']:
raise ValueError('Invalid draw mode: %r' % mode)
# Check leftover variables, warn, discard them
# In GLIR we check whether all attributes are indeed set
for name in self._pending_variables:
logger.warn('Variable %r is given but not known.' % name)
self._pending_variables = {}
# Check attribute sizes
attributes = [vbo for vbo in self._user_variables.values()
if isinstance(vbo, DataBuffer)]
sizes = [a.size for a in attributes]
if len(attributes) < 1:
raise RuntimeError('Must have at least one attribute')
if not all(s == sizes[0] for s in sizes[1:]):
msg = '\n'.join(['%s: %s' % (str(a), a.size) for a in attributes])
raise RuntimeError('All attributes must have the same size, got:\n'
'%s' % msg)
# Get the glir queue that we need now
canvas = get_current_canvas()
assert canvas is not None
# Associate canvas
canvas.context.glir.associate(self.glir)
# Indexbuffer
if isinstance(indices, IndexBuffer):
canvas.context.glir.associate(indices.glir)
logger.debug("Program drawing %r with index buffer" % mode)
gltypes = {np.dtype(np.uint8): 'UNSIGNED_BYTE',
np.dtype(np.uint16): 'UNSIGNED_SHORT',
np.dtype(np.uint32): 'UNSIGNED_INT'}
selection = indices.id, gltypes[indices.dtype], indices.size
canvas.context.glir.command('DRAW', self._id, mode, selection)
elif indices is None:
selection = 0, attributes[0].size
logger.debug("Program drawing %r with %r" % (mode, selection))
canvas.context.glir.command('DRAW', self._id, mode, selection)
else:
raise TypeError("Invalid index: %r (must be IndexBuffer)" %
indices)
# Process GLIR commands
canvas.context.flush_commands() | 0.004218 |
def _server_whitelist(self):
'''
Returns list of servers that have not errored in the last five minutes.
If all servers have errored in the last five minutes, returns list with
one item, the server that errored least recently.
'''
whitelist = []
for server in self.servers:
if (server not in self.last_error
or self.last_error[server] < time.time() - self.PENALTY_BOX_TIME):
whitelist.append(server)
if not whitelist:
whitelist.append(sorted(
self.last_error.items(), key=lambda kv: kv[1])[0][0])
return whitelist | 0.004539 |
def prcntiles(x,percents):
'''Equivalent to matlab prctile(x,p), uses linear interpolation.'''
x=np.array(x).flatten()
listx = np.sort(x)
xpcts=[]
lenlistx=len(listx)
refs=[]
for i in range(0,lenlistx):
r=100*((.5+i)/lenlistx) #refs[i] is percentile of listx[i] in matrix x
refs.append(r)
rpcts=[]
for p in percents:
if p<refs[0]:
rpcts.append(listx[0])
elif p>refs[-1]:
rpcts.append(listx[-1])
else:
for j in range(0,lenlistx): #lenlistx=len(refs)
if refs[j]<=p and refs[j+1]>=p:
my=listx[j+1]-listx[j]
mx=refs[j+1]-refs[j]
m=my/mx #slope of line between points
rpcts.append((m*(p-refs[j]))+listx[j])
break
xpcts.append(rpcts)
return np.array(xpcts).transpose() | 0.025584 |
def AppendUnique(self, delete_existing=0, **kw):
"""Append values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to end.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if key == 'CPPDEFINES':
tmp = []
for i in val:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
val = tmp
# Construct a list of (key, value) tuples.
if SCons.Util.is_Dict(dk):
tmp = []
for (k, v) in dk.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
dk = tmp
elif SCons.Util.is_String(dk):
dk = [(dk,)]
else:
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
else:
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = dk + val
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
if key == 'CPPDEFINES':
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
# Construct a list of (key, value) tuples.
if SCons.Util.is_Dict(val):
tmp = []
for (k, v) in val.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
val = tmp
elif SCons.Util.is_String(val):
val = [(val,)]
if delete_existing:
dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + val
else:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
else:
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + [val]
else:
if not val in dk:
self._dict[key] = dk + [val]
else:
if key == 'CPPDEFINES':
if SCons.Util.is_String(dk):
dk = [dk]
elif SCons.Util.is_Dict(dk):
tmp = []
for (k, v) in dk.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
dk = tmp
if SCons.Util.is_String(val):
if val in dk:
val = []
else:
val = [val]
elif SCons.Util.is_Dict(val):
tmp = []
for i,j in val.items():
if j is not None:
tmp.append((i,j))
else:
tmp.append(i)
val = tmp
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
self.scanner_map_delete(kw) | 0.001318 |
def get_kbr_values(self, searchkey="", searchvalue="", searchtype='s'):
"""
Return dicts of 'key' and 'value' from a knowledge base.
:param kb_name the name of the knowledge base
:param searchkey search using this key
:param searchvalue search using this value
:param searchtype s=substring, e=exact, sw=startswith
:return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
import warnings
warnings.warn("The function is deprecated. Please use the "
"`KnwKBRVAL.query_kb_mappings()` instead. "
"E.g. [(kval.m_value,) for kval in "
"KnwKBRVAL.query_kb_mappings(kb_id).all()]")
# prepare filters
if searchtype == 's':
searchkey = '%' + searchkey + '%'
if searchtype == 's' and searchvalue:
searchvalue = '%' + searchvalue + '%'
if searchtype == 'sw' and searchvalue: # startswith
searchvalue = searchvalue + '%'
if not searchvalue:
searchvalue = '%'
# execute query
return db.session.execute(
db.select([KnwKBRVAL.m_value],
db.and_(KnwKBRVAL.id_knwKB.like(self.id),
KnwKBRVAL.m_value.like(searchvalue),
KnwKBRVAL.m_key.like(searchkey)))) | 0.001444 |
def exists(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Check if path exists
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.exists /test/name profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return bool(conn.exists(path)) | 0.004292 |
def bboxiter(tile_bounds, tiles_per_row_per_region=1):
"""
Iterate through a grid of regions defined by a TileBB.
Args:
tile_bounds (GridBB):
tiles_per_row_per_region: Combine multiple tiles in one region.
E.g. if set to two, four tiles will be combined in one region.
See `kml` module description for more details. Leaving the
default value '1' simply yields all tiles in the bounding box.
Note:
If the number of regions would not be an integer due to specification
of the `tiles_per_row_per_region` parameter, the boundaries will
be rounded to the next smaller or next larger integer respectively.
Example:
We have the following bounding box with size 2x2 and set
tiles_per_row_per_region = 2, delimited by the coordinates (x, y)::
(5,5)--- ---
| |
| |
| |
--- ---(9,7)
Although this could be represented in one single region with two
tiles per row, it will create four regions::
(2,2)--- --- (5/2 = 2.5 -> 2, 5/2 = 2.5 -> 2)
| | |
--- ---
| | |
--- ---(5,4) (9/2 = 4.5 -> 5, 7/2 = 3.5 -> 4)
Yields:
Tuple: all tuples (x, y) in the region delimited by the TileBB
"""
x_lower = math.floor(tile_bounds.min.x / tiles_per_row_per_region)
y_lower = math.floor(tile_bounds.min.y / tiles_per_row_per_region)
ncol = math.ceil(tile_bounds.max.x / tiles_per_row_per_region) - x_lower
nrow = math.ceil(tile_bounds.max.y / tiles_per_row_per_region) - y_lower
yield from griditer(x_lower, y_lower, ncol, nrow) | 0.000563 |
def min_fill_heuristic(G):
"""Computes an upper bound on the treewidth of graph G based on
the min-fill heuristic for the elimination ordering.
Parameters
----------
G : NetworkX graph
The graph on which to compute an upper bound for the treewidth.
Returns
-------
treewidth_upper_bound : int
An upper bound on the treewidth of the graph G.
order : list
An elimination order that induces the treewidth.
Examples
--------
This example computes an upper bound for the treewidth of the :math:`K_4`
complete graph.
>>> import dwave_networkx as dnx
>>> import networkx as nx
>>> K_4 = nx.complete_graph(4)
>>> tw, order = dnx.min_fill_heuristic(K_4)
References
----------
Based on the algorithm presented in [GD]_
"""
# we need only deal with the adjacency structure of G. We will also
# be manipulating it directly so let's go ahead and make a new one
adj = {v: set(G[v]) for v in G}
num_nodes = len(adj)
# preallocate the return values
order = [0] * num_nodes
upper_bound = 0
for i in range(num_nodes):
# get the node that adds the fewest number of edges when eliminated from the graph
v = min(adj, key=lambda x: _min_fill_needed_edges(adj, x))
# if the number of neighbours of v is higher than upper_bound, update
dv = len(adj[v])
if dv > upper_bound:
upper_bound = dv
# make v simplicial by making its neighborhood a clique then remove the
# node
_elim_adj(adj, v)
order[i] = v
return upper_bound, order | 0.001217 |
def tone_chat(self,
utterances,
content_language=None,
accept_language=None,
**kwargs):
"""
Analyze customer engagement tone.
Use the customer engagement endpoint to analyze the tone of customer service and
customer support conversations. For each utterance of a conversation, the method
reports the most prevalent subset of the following seven tones: sad, frustrated,
satisfied, excited, polite, impolite, and sympathetic.
If you submit more than 50 utterances, the service returns a warning for the
overall content and analyzes only the first 50 utterances. If you submit a single
utterance that contains more than 500 characters, the service returns an error for
that utterance and does not analyze the utterance. The request fails if all
utterances have more than 500 characters. Per the JSON specification, the default
character encoding for JSON content is effectively always UTF-8.
**See also:** [Using the customer-engagement
endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone-chat.html#using-the-customer-engagement-endpoint).
:param list[Utterance] utterances: An array of `Utterance` objects that provides
the input content that the service is to analyze.
:param str content_language: The language of the input text for the request:
English or French. Regional variants are treated as their parent language; for
example, `en-US` is interpreted as `en`. The input content must match the
specified language. Do not submit content that contains both languages. You can
use different languages for **Content-Language** and **Accept-Language**.
* **`2017-09-21`:** Accepts `en` or `fr`.
* **`2016-05-19`:** Accepts only `en`.
:param str accept_language: The desired language of the response. For
two-character arguments, regional variants are treated as their parent language;
for example, `en-US` is interpreted as `en`. You can use different languages for
**Content-Language** and **Accept-Language**.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if utterances is None:
raise ValueError('utterances must be provided')
utterances = [self._convert_model(x, Utterance) for x in utterances]
headers = {
'Content-Language': content_language,
'Accept-Language': accept_language
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('tone_analyzer', 'V3', 'tone_chat')
headers.update(sdk_headers)
params = {'version': self.version}
data = {'utterances': utterances}
url = '/v3/tone_chat'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | 0.007088 |
def ldirectory(inpath, outpath, args, scope):
"""Compile all *.less files in directory
Args:
inpath (str): Path to compile
outpath (str): Output directory
args (object): Argparse Object
scope (Scope): Scope object or None
"""
yacctab = 'yacctab' if args.debug else None
if not outpath:
sys.exit("Compile directory option needs -o ...")
else:
if not os.path.isdir(outpath):
if args.verbose:
print("Creating '%s'" % outpath, file=sys.stderr)
if not args.dry_run:
os.mkdir(outpath)
less = glob.glob(os.path.join(inpath, '*.less'))
f = formatter.Formatter(args)
for lf in less:
outf = os.path.splitext(os.path.basename(lf))
minx = '.min' if args.min_ending else ''
outf = "%s/%s%s.css" % (outpath, outf[0], minx)
if not args.force and os.path.exists(outf):
recompile = os.path.getmtime(outf) < os.path.getmtime(lf)
else:
recompile = True
if recompile:
print('%s -> %s' % (lf, outf))
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
scope=scope,
tabfile=yacctab,
verbose=args.verbose)
p.parse(filename=lf, debuglevel=0)
css = f.format(p)
if not args.dry_run:
with open(outf, 'w') as outfile:
outfile.write(css)
elif args.verbose:
print('skipping %s, not modified' % lf, file=sys.stderr)
sys.stdout.flush()
if args.recurse:
[
ldirectory(
os.path.join(inpath, name), os.path.join(outpath, name), args,
scope) for name in os.listdir(inpath)
if os.path.isdir(os.path.join(inpath, name))
and not name.startswith('.') and not name == outpath
] | 0.0005 |
def merge_bibtex_with_aux(auxpath, mainpath, extradir, parse=get_bibtex_dict, allow_missing=False):
"""Merge multiple BibTeX files into a single homogeneously-formatted output,
using a LaTeX .aux file to know which records are worth paying attention
to.
The file identified by `mainpath` will be overwritten with the new .bib
contents. This function is intended to be used in a version-control
context.
Files matching the glob "*.bib" in `extradir` will be read in to
supplement the information in `mainpath`. Records already in the file in
`mainpath` always take precedence.
"""
auxpath = Path(auxpath)
mainpath = Path(mainpath)
extradir = Path(extradir)
with auxpath.open('rt') as aux:
citednames = sorted(cited_names_from_aux_file(aux))
main = mainpath.try_open(mode='rt')
if main is None:
maindict = {}
else:
maindict = parse(main)
main.close()
def gen_extra_dicts():
# If extradir does not exist, Path.glob() will return an empty list,
# which seems acceptable to me.
for item in sorted(extradir.glob('*.bib')):
with item.open('rt') as extra:
yield parse(extra)
merged = merge_bibtex_collections(citednames, maindict, gen_extra_dicts(),
allow_missing=allow_missing)
with mainpath.make_tempfile(want='handle', resolution='overwrite') as newbib:
write_bibtex_dict(newbib, six.viewvalues(merged)) | 0.002639 |
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S') | 0.004132 |
def change_custom_svc_var(self, service, varname, varvalue):
"""Change custom service variable
Format of the line that triggers function call::
CHANGE_CUSTOM_SVC_VAR;<host_name>;<service_description>;<varname>;<varvalue>
:param service: service to edit
:type service: alignak.objects.service.Service
:param varname: variable name to change
:type varvalue: str
:param varvalue: variable new value
:type varname: str
:return: None
"""
if varname.upper() in service.customs:
service.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value
service.customs[varname.upper()] = varvalue
self.send_an_element(service.get_update_status_brok()) | 0.003841 |
def serve(content):
"""Write content to a temp file and serve it in browser"""
temp_folder = tempfile.gettempdir()
temp_file_name = tempfile.gettempprefix() + str(uuid.uuid4()) + ".html"
# Generate a file path with a random name in temporary dir
temp_file_path = os.path.join(temp_folder, temp_file_name)
# save content to temp file
save(temp_file_path, content)
# Open templfile in a browser
webbrowser.open("file://{}".format(temp_file_path))
# Block the thread while content is served
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
# cleanup the temp file
os.remove(temp_file_path) | 0.001468 |
def read_cz_lsm_info(fd, byte_order, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
result = numpy.rec.fromfile(fd, CZ_LSM_INFO, 1,
byteorder=byte_order)[0]
{50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation
return result | 0.003086 |
def register_modele(self, modele: Modele):
""" Register a modele onto the lemmatizer
:param modele: Modele to register
"""
self.lemmatiseur._modeles[modele.gr()] = modele | 0.009852 |
def is_child_of_log(self, id_, log_id):
"""Tests if an ``Id`` is a direct child of a log.
arg: id (osid.id.Id): an ``Id``
arg: log_id (osid.id.Id): the ``Id`` of a log
return: (boolean) - ``true`` if this ``id`` is a child of
``log_id,`` ``false`` otherwise
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``id`` or ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_child_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=log_id)
return self._hierarchy_session.is_child(id_=log_id, child_id=id_) | 0.002982 |
def __load_dump(self, message):
"""
Calls the hook method to modify the loaded peer description before
giving it to the directory
:param message: The received Herald message
:return: The updated peer description
"""
dump = message.content
if self._hook is not None:
# Call the hook
try:
updated_dump = self._hook(message, dump)
if updated_dump is not None:
# Use the new description
dump = updated_dump
except (TypeError, ValueError) as ex:
self._logger("Invalid description hook: %s", ex)
return dump | 0.002865 |
def classes(self):
"""Iterate over the defined Classes."""
defclass = lib.EnvGetNextDefclass(self._env, ffi.NULL)
while defclass != ffi.NULL:
yield Class(self._env, defclass)
defclass = lib.EnvGetNextDefclass(self._env, defclass) | 0.007168 |
def derivative(self, x):
"""Return the derivative at ``x``.
The derivative of the right scalar operator multiplication
follows the chain rule:
``OperatorRightScalarMult(op, s).derivative(y) ==
OperatorLeftScalarMult(op.derivative(s * y), s)``
Parameters
----------
x : `domain` `element-like`
Evaluation point of the derivative.
Examples
--------
>>> space = odl.rn(3)
>>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1])
>>> left_mul_op = OperatorRightScalarMult(operator, 3)
>>> derivative = left_mul_op.derivative([0, 0, 0])
>>> derivative([1, 1, 1])
rn(3).element([ 3., 3., 3.])
"""
return self.scalar * self.operator.derivative(self.scalar * x) | 0.002392 |
def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info) | 0.002141 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.