text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def remove_element_attributes(elem_to_parse, *args):
"""
Removes the specified keys from the element's attributes, and
returns a dict containing the attributes that have been removed.
"""
element = get_element(elem_to_parse)
if element is None:
return element
if len(args):
attribs = element.attrib
return {key: attribs.pop(key) for key in args if key in attribs}
return {} | 0.002309 |
def get_syllabus(self, site):
"""
Gets the syllabus for a course. The syllabus may or may not
contain HTML, depending on the site. TSquare does not enforce
whether or not pages are allowed to have HTML, so it is impossible
to tell.
"""
tools = self.get_tools(site)
syllabus_filter = [x.href for x in tools if x.name == 'syllabus']
if not syllabus_filter:
return ''
response = self._session.get(syllabus_filter[0])
response.raise_for_status()
iframes = self._html_iface.get_iframes(response.text)
iframe_url = ''
for frame in iframes:
if frame['title'] == 'Syllabus ':
iframe_url = frame['src']
if iframe_url == '':
print "WARHING: NO SYLLABUS IFRAME FOUND"
response = self._session.get(iframe_url)
response.raise_for_status()
syllabus_html = self._html_iface.get_syllabus(response.text)
return syllabus_html | 0.001978 |
def validate(self):
"""Apply validation rules for loaded settings."""
if self.GROUP_ATTRIBUTES and self.GROUPNAME_FIELD not in self.GROUP_ATTRIBUTES.values():
raise ImproperlyConfigured("LDAP_SYNC_GROUP_ATTRIBUTES must contain '%s'" % self.GROUPNAME_FIELD)
if not self.model._meta.get_field(self.USERNAME_FIELD).unique:
raise ImproperlyConfigured("LDAP_SYNC_USERNAME_FIELD '%s' must be unique" % self.USERNAME_FIELD)
if self.USER_ATTRIBUTES and self.USERNAME_FIELD not in self.USER_ATTRIBUTES.values():
raise ImproperlyConfigured("LDAP_SYNC_USER_ATTRIBUTES must contain '%s'" % self.USERNAME_FIELD) | 0.010479 |
def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None):
"""Apply weight decay and weight noise."""
if var_list is None:
var_list = tf.trainable_variables()
decay_vars = [v for v in var_list]
noise_vars = [v for v in var_list if "/body/" in v.name]
weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars)
if hparams.weight_decay and common_layers.should_generate_summaries():
tf.summary.scalar("losses/weight_decay", weight_decay_loss)
weight_noise_ops = weight_noise(hparams.weight_noise, learning_rate,
noise_vars)
with tf.control_dependencies(weight_noise_ops):
loss = tf.identity(loss)
loss += weight_decay_loss
return loss | 0.015193 |
def running_instances(self, context, process_name):
"""Get a list of running instances.
Args:
context (`ResolvedContext`): Context the process is running in.
process_name (str): Name of the process.
Returns:
List of (`subprocess.Popen`, start-time) 2-tuples, where start_time
is the epoch time the process was added.
"""
handle = (id(context), process_name)
it = self.processes.get(handle, {}).itervalues()
entries = [x for x in it if x[0].poll() is None]
return entries | 0.003425 |
def update_service_endpoints(self, endpoints, project):
"""UpdateServiceEndpoints.
[Preview API] Update the service endpoints.
:param [ServiceEndpoint] endpoints: Names of the service endpoints to update.
:param str project: Project ID or project name
:rtype: [ServiceEndpoint]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(endpoints, '[ServiceEndpoint]')
response = self._send(http_method='PUT',
location_id='e85f1c62-adfc-4b74-b618-11a150fb195e',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response)) | 0.006508 |
def tokenize_akkadian_words(line):
"""
Operates on a single line of text, returns all words in the line as a
tuple in a list.
input: "1. isz-pur-ram a-na"
output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")]
:param: line: text string
:return: list of tuples: (word, language)
"""
beginning_underscore = "_[^_]+(?!_)$"
# only match a string if it has a beginning underscore anywhere
ending_underscore = "^(?<!_)[^_]+_"
# only match a string if it has an ending underscore anywhere
two_underscores = "_[^_]+_"
# only match a string if it has two underscores
words = line.split()
# split the line on spaces ignoring the first split (which is the
# line number)
language = "akkadian"
output_words = []
for word in words:
if re.search(two_underscores, word):
# If the string has two underscores in it then the word is
# in Sumerian while the neighboring words are in Akkadian.
output_words.append((word, "sumerian"))
elif re.search(beginning_underscore, word):
# If the word has an initial underscore somewhere
# but no other underscores than we're starting a block
# of Sumerian.
language = "sumerian"
output_words.append((word, language))
elif re.search(ending_underscore, word):
# If the word has an ending underscore somewhere
# but not other underscores than we're ending a block
# of Sumerian.
output_words.append((word, language))
language = "akkadian"
else:
# If there are no underscore than we are continuing
# whatever language we're currently in.
output_words.append((word, language))
return output_words | 0.000547 |
def add_inputs(self, xs):
"""
returns the list of states obtained by adding the given inputs
to the current state, one by one.
"""
states = []
cur = self
for x in xs:
cur = cur.add_input(x)
states.append(cur)
return states | 0.006452 |
def _pad(self, data):
"""
Pad value with bytes so it's a multiple of 16
See: http://stackoverflow.com/questions/14179784/python-encrypting-with-pycrypto-aes
:param data:
:return data:
"""
length = 16 - (len(data) % 16)
data += chr(length)*length
return data | 0.009119 |
def get_deploy_hosts_list(cwd, key=None, file="propel.yml"):
"""
Returns the remote hosts in propel
:param cwd:
:param key:
:param file:
:return: list
"""
config = propel_deploy_config(cwd=cwd, file=file)["hosts"]
return config[key] if key else [v for k, l in config.items() for v in l] | 0.003106 |
def receive_message(self, message):
'''Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error.
'''
if self._protocol is JSONRPCAutoDetect:
self._protocol = JSONRPCAutoDetect.detect_protocol(message)
try:
item, request_id = self._protocol.message_to_item(message)
except ProtocolError as e:
if e.response_msg_id is not id:
return self._receive_response(e, e.response_msg_id)
raise
if isinstance(item, Request):
item.send_result = partial(self._send_result, request_id)
return [item]
if isinstance(item, Notification):
return [item]
if isinstance(item, Response):
return self._receive_response(item.result, request_id)
assert isinstance(item, list)
if all(isinstance(payload, dict) and ('result' in payload or 'error' in payload)
for payload in item):
return self._receive_response_batch(item)
else:
return self._receive_request_batch(item) | 0.002203 |
def _create_record(self, rtype, name, content):
"""
Create a resource record. If a record already exists with the same
content, do nothing.
"""
result = False
name = self._relative_name(name)
ttl = None
# TODO: shoud assert that this is an int
if self.ttl:
ttl = self.ttl
with localzone.manage(self.filename, self.origin, autosave=True) as zone:
if zone.add_record(name, rtype, content, ttl=ttl): # pylint: disable=no-member
result = True
LOGGER.debug("create_record: %s", result)
return result | 0.006309 |
def save(self, filename):
"""Write this trigger to gracedb compatible xml format
Parameters
----------
filename: str
Name of file to write to disk.
"""
gz = filename.endswith('.gz')
ligolw_utils.write_filename(self.outdoc, filename, gz=gz) | 0.006494 |
def fetch(self):
"""
Fetch a FieldValueInstance
:returns: Fetched FieldValueInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FieldValueInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
field_type_sid=self._solution['field_type_sid'],
sid=self._solution['sid'],
) | 0.004862 |
def folders(self):
"""Return list of folders in root directory"""
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isdir(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths() | 0.004914 |
def _on_click(self, event):
"""
Function bound to event of selection in the Combobox, calls callback if callable
:param event: Tkinter event
"""
if callable(self.__callback):
self.__callback(self.selection) | 0.014981 |
def get_hyperparameter_configurations(self, num, r, config_generator):
"""generate num hyperparameter configurations from search space using Bayesian optimization
Parameters
----------
num: int
the number of hyperparameter configurations
Returns
-------
list
a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...]
"""
global _KEY
assert self.i == 0
hyperparameter_configs = dict()
for _ in range(num):
params_id = create_bracket_parameter_id(self.s, self.i)
params = config_generator.get_config(r)
params[_KEY] = r
hyperparameter_configs[params_id] = params
self._record_hyper_configs(hyperparameter_configs)
return [[key, value] for key, value in hyperparameter_configs.items()] | 0.004459 |
def is_tuple_type(tp):
"""Test if the type is a generic tuple type, including subclasses excluding
non-generic classes.
Examples::
is_tuple_type(int) == False
is_tuple_type(tuple) == False
is_tuple_type(Tuple) == True
is_tuple_type(Tuple[str, int]) == True
class MyClass(Tuple[str, int]):
...
is_tuple_type(MyClass) == True
For more general tests use issubclass(..., tuple), for more precise test
(excluding subclasses) use::
get_origin(tp) is tuple # Tuple prior to Python 3.7
"""
if NEW_TYPING:
return (tp is Tuple or isinstance(tp, _GenericAlias) and
tp.__origin__ is tuple or
isinstance(tp, type) and issubclass(tp, Generic) and
issubclass(tp, tuple))
return type(tp) is TupleMeta | 0.001185 |
def schedule_messages(messages, recipients=None, sender=None, priority=None):
"""Schedules a message or messages.
:param MessageBase|str|list messages: str or MessageBase heir or list - use str to create PlainTextMessage.
:param list|None recipients: recipients addresses or Django User model heir instances
If `None` Dispatches should be created before send using `prepare_dispatches()`.
:param User|None sender: User model heir instance
:param int priority: number describing message priority. If set overrides priority provided with message type.
:return: list of tuples - (message_model, dispatches_models)
:rtype: list
"""
if not is_iterable(messages):
messages = (messages,)
results = []
for message in messages:
if isinstance(message, six.string_types):
message = PlainTextMessage(message)
resulting_priority = message.priority
if priority is not None:
resulting_priority = priority
results.append(message.schedule(sender=sender, recipients=recipients, priority=resulting_priority))
return results | 0.005319 |
def id_tuple_list(self, id_node):
"""Return a list of (name, index) tuples for this id node."""
if id_node.type != "id":
raise QasmError("internal error, id_tuple_list")
bit_list = []
try:
g_sym = self.current_symtab[id_node.name]
except KeyError:
g_sym = self.global_symtab[id_node.name]
if g_sym.type == "qreg" or g_sym.type == "creg":
# Return list of (name, idx) for reg ids
for idx in range(g_sym.index):
bit_list.append((id_node.name, idx))
else:
# Return (name, -1) for other ids
bit_list.append((id_node.name, -1))
return bit_list | 0.002849 |
def json2excel(items, keys, filename, page_size=60000):
""" max_page_size is 65000 because we output old excel .xls format
"""
wb = xlwt.Workbook()
rowindex = 0
sheetindex = 0
for item in items:
if rowindex % page_size == 0:
sheetname = "%02d" % sheetindex
ws = wb.add_sheet(sheetname)
rowindex = 0
sheetindex += 1
colindex = 0
for key in keys:
ws.write(rowindex, colindex, key)
colindex += 1
rowindex += 1
colindex = 0
for key in keys:
v = item.get(key, "")
if type(v) == list:
v = ','.join(v)
if type(v) == set:
v = ','.join(v)
ws.write(rowindex, colindex, v)
colindex += 1
rowindex += 1
logging.debug(filename)
wb.save(filename) | 0.001104 |
def balanced_accuracy(y_true, y_pred):
"""Default scoring function: balanced accuracy.
Balanced accuracy computes each class' accuracy on a per-class basis using a
one-vs-rest encoding, then computes an unweighted average of the class accuracies.
Parameters
----------
y_true: numpy.ndarray {n_samples}
True class labels
y_pred: numpy.ndarray {n_samples}
Predicted class labels by the estimator
Returns
-------
fitness: float
Returns a float value indicating the individual's balanced accuracy
0.5 is as good as chance, and 1.0 is perfect predictive accuracy
"""
all_classes = list(set(np.append(y_true, y_pred)))
all_class_accuracies = []
for this_class in all_classes:
this_class_sensitivity = 0.
this_class_specificity = 0.
if sum(y_true == this_class) != 0:
this_class_sensitivity = \
float(sum((y_pred == this_class) & (y_true == this_class))) /\
float(sum((y_true == this_class)))
this_class_specificity = \
float(sum((y_pred != this_class) & (y_true != this_class))) /\
float(sum((y_true != this_class)))
this_class_accuracy = (this_class_sensitivity + this_class_specificity) / 2.
all_class_accuracies.append(this_class_accuracy)
return np.mean(all_class_accuracies) | 0.002857 |
def facts(client, channel, nick, message, *args):
"""
A plugin for helga to automatically remember important things. Unless specified
by the setting ``FACTS_REQUIRE_NICKNAME``, facts are automatically stored when
a user says: ``something is something else``. Otherwise, facts must be explicitly
added: ``helga something is something else``.
Response format is, by default, the full message that was sent, including an author
and timestamp. However, if a user specifies the string '<reply>' in their message,
then only the words that follow '<reply>' will be returned in the response.
For example::
<sduncan> foo is bar
<sduncan> foo?
<helga> foo is bar (sduncan on 12/02/2013)
Or::
<sduncan> foo is <reply> bar
<sduncan> foo?
<helga> bar (sduncan on 12/02/2013)
To remove a fact, you must ask specifically using the ``forget`` command::
<sduncan> helga forget foo
<helga> forgotten
To replace a fact, you must use the ``replace`` command, andprovide the
term as well as the new definition, separated by '<watch>'::
<sduncan> helga replace foo <with> new def
<helga> replaced
"""
if len(args) == 2:
return facts_command(client, channel, nick, message, *args)
# Anything else is a match
return facts_match(client, channel, nick, message, args[0]) | 0.004252 |
def encode_for_locale(s):
"""
Encode text items for system locale. If encoding fails, fall back to ASCII.
"""
try:
return s.encode(LOCALE_ENCODING, 'ignore')
except (AttributeError, UnicodeDecodeError):
return s.decode('ascii', 'ignore').encode(LOCALE_ENCODING) | 0.003289 |
def __create(self, account_id, name, short_description, amount, period,
**kwargs):
"""Call documentation: `/subscription_plan/create
<https://www.wepay.com/developer/reference/subscription_plan#create>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'account_id': account_id,
'name': name,
'short_description': short_description,
'amount': amount,
'period': period
}
return self.make_call(self.__create, params, kwargs) | 0.004713 |
def sub_request(self, method, request, uri, headers):
"""Handle the supplied sub-service request on the specified routing URI.
:param method: string - HTTP Verb
:param request: request object describing the HTTP request
:param uri: URI of the reuqest
:param headers: case-insensitive headers dict
:returns: tuple - (int, dict, string) containing:
int - the http response status code
dict - the headers for the http response
string - http string response
"""
logger.debug('StackInABoxService ({0}:{1}): Sub-Request Received '
'{2} - {3}'
.format(self.__id, self.name, method, uri))
return self.request(method, request, uri, headers) | 0.003636 |
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
APFSFileEntry: file entry.
"""
path_spec = apfs_path_spec.APFSPathSpec(
location=self.LOCATION_ROOT, identifier=self.ROOT_DIRECTORY_IDENTIFIER,
parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | 0.002994 |
def get_keys_from_ldap(self, username=None):
"""
Fetch keys from ldap.
Args:
username Username associated with keys to fetch (optional)
Returns:
Array of dictionaries in '{username: [public keys]}' format
"""
result_dict = {}
filter = ['(sshPublicKey=*)']
if username is not None:
filter.append('(uid={})'.format(username))
attributes = ['uid', 'sshPublicKey']
results = self.client.search(filter, attributes)
for result in results:
result_dict[result.uid.value] = result.sshPublicKey.values
return result_dict | 0.003044 |
async def artwork_save(self):
"""Download artwork and save it to artwork.png."""
artwork = await self.atv.metadata.artwork()
if artwork is not None:
with open('artwork.png', 'wb') as file:
file.write(artwork)
else:
print('No artwork is currently available.')
return 1
return 0 | 0.005435 |
def plot_grid(grid_arcsec, array, units, kpc_per_arcsec, pointsize, zoom_offset_arcsec):
"""Plot a grid of points over the array of data on the figure.
Parameters
-----------.
grid_arcsec : ndarray or data.array.grids.RegularGrid
A grid of (y,x) coordinates in arc-seconds which may be plotted over the array.
array : data.array.scaled_array.ScaledArray
The 2D array of data which is plotted.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
grid_pointsize : int
The size of the points plotted to show the grid.
"""
if grid_arcsec is not None:
if zoom_offset_arcsec is not None:
grid_arcsec -= zoom_offset_arcsec
grid_units = convert_grid_units(grid_arcsec=grid_arcsec, array=array, units=units,
kpc_per_arcsec=kpc_per_arcsec)
plt.scatter(y=np.asarray(grid_units[:, 0]), x=np.asarray(grid_units[:, 1]), s=pointsize, c='k') | 0.005963 |
def _set_pfiles(dry_run, **kwargs):
"""Set the PFILES env var
Parameters
----------
dry_run : bool
Don't actually run
Keyword arguments
-----------------
pfiles : str
Value to set PFILES
Returns
-------
pfiles_orig : str
Current value of PFILES envar
"""
pfiles_orig = os.environ['PFILES']
pfiles = kwargs.get('pfiles', None)
if pfiles:
if dry_run:
print("mkdir %s" % pfiles)
else:
try:
os.makedirs(pfiles)
except OSError:
pass
pfiles = "%s:%s" % (pfiles, pfiles_orig)
os.environ['PFILES'] = pfiles
return pfiles_orig | 0.002813 |
def _copyFontInfo(self, targetInfo, sourceInfo):
""" Copy the non-calculating fields from the source info.
"""
infoAttributes = [
"versionMajor",
"versionMinor",
"copyright",
"trademark",
"note",
"openTypeGaspRangeRecords",
"openTypeHeadCreated",
"openTypeHeadFlags",
"openTypeNameDesigner",
"openTypeNameDesignerURL",
"openTypeNameManufacturer",
"openTypeNameManufacturerURL",
"openTypeNameLicense",
"openTypeNameLicenseURL",
"openTypeNameVersion",
"openTypeNameUniqueID",
"openTypeNameDescription",
"#openTypeNamePreferredFamilyName",
"#openTypeNamePreferredSubfamilyName",
"#openTypeNameCompatibleFullName",
"openTypeNameSampleText",
"openTypeNameWWSFamilyName",
"openTypeNameWWSSubfamilyName",
"openTypeNameRecords",
"openTypeOS2Selection",
"openTypeOS2VendorID",
"openTypeOS2Panose",
"openTypeOS2FamilyClass",
"openTypeOS2UnicodeRanges",
"openTypeOS2CodePageRanges",
"openTypeOS2Type",
"postscriptIsFixedPitch",
"postscriptForceBold",
"postscriptDefaultCharacter",
"postscriptWindowsCharacterSet"
]
for infoAttribute in infoAttributes:
copy = False
if self.ufoVersion == 1 and infoAttribute in fontInfoAttributesVersion1:
copy = True
elif self.ufoVersion == 2 and infoAttribute in fontInfoAttributesVersion2:
copy = True
elif self.ufoVersion == 3 and infoAttribute in fontInfoAttributesVersion3:
copy = True
if copy:
value = getattr(sourceInfo, infoAttribute)
setattr(targetInfo, infoAttribute, value) | 0.002493 |
def single_send(self, param, must=[APIKEY, MOBILE, TEXT]):
'''单条发送
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
mobile String 是
接收的手机号;仅支持单号码发送;国际号码需包含国际地区前缀号码,格式必须是"+"号开头("+"号需要urlencode处理,否则会出现格式错误),国际号码不以"+"开头将被认为是中国地区的号码
(针对国际短信,mobile参数会自动格式化到E.164格式,可能会造成传入mobile参数跟后续的状态报告中的号码不一致。E.164格式说明,参见:
https://en.wikipedia.org/wiki/E.164) 国内号码:15205201314
国际号码:urlencode("+93701234567");
text String 是 短信内容 【云片网】您的验证码是1234
extend String 否 扩展号。默认不开放,如有需要请联系客服申请 001
uid String 否 该条短信在您业务系统内的ID,比如订单号或者短信发送记录的流水号。填写后发送状态返回值内将包含这个ID
默认不开放,如有需要请联系客服申请 10001
callback_url String 否
本条短信状态报告推送地址。短信发送后将向这个地址推送短信发送报告。"后台-系统设置-数据推送与获取”可以做批量设置。如果后台已经设置地址的情况下,单次请求内也包含此参数,将以请求内的推送地址为准。
http://your_receive_url_address
Args:
param:
Results:
Result
'''
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V2:rsp}[self.version()])
return self.path('single_send.json').post(param, h, r) | 0.005055 |
def metadata(access_token, text): # (Legacy)
'''
Name: metadata_only
Parameters: access_token, text (string)
Return: dictionary
'''
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + str(access_token)
}
payload = {'text': text}
request = requests.post(metadata_url, json=payload, headers=headers)
if request.status_code == 201:
metadata = request.json()
return metadata
return {'status': request.status_code, "message": request.text} | 0.021033 |
def f_load_skeleton(self):
"""Loads the full skeleton from the storage service.
This needs to be done after a successful exploration in order to update the
trajectory tree with all results and derived parameters from the individual single runs.
This will only add empty results and derived parameters (i.e. the skeleton)
and load annotations.
"""
self.f_load(self.v_name, as_new=False, load_parameters=pypetconstants.LOAD_SKELETON,
load_derived_parameters=pypetconstants.LOAD_SKELETON,
load_results=pypetconstants.LOAD_SKELETON,
load_other_data=pypetconstants.LOAD_SKELETON,
with_run_information=False) | 0.008108 |
def is_answer_valid(self, ans):
"""Validate user's answer against available choices."""
return ans in [str(i+1) for i in range(len(self.choices))] | 0.012346 |
def verify_file(fp, password):
'Returns whether a scrypt encrypted file is valid.'
sf = ScryptFile(fp = fp, password = password)
for line in sf: pass
sf.close()
return sf.valid | 0.032258 |
def directory_name_with_course(self):
''' The assignment name in a format that is suitable for a directory name. '''
coursename = self.course.directory_name()
assignmentname = self.title.replace(" ", "_").replace("\\", "_").replace(",","").lower()
return coursename + os.sep + assignmentname | 0.015432 |
def import_key(kwargs=None, call=None):
'''
List the keys available
CLI Example:
.. code-block:: bash
salt-cloud -f import_key joyent keyname=mykey keyfile=/tmp/mykey.pub
'''
if call != 'function':
log.error(
'The import_key function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
if 'keyfile' not in kwargs:
log.error('The location of the SSH keyfile is required.')
return False
if not os.path.isfile(kwargs['keyfile']):
log.error('The specified keyfile (%s) does not exist.', kwargs['keyfile'])
return False
with salt.utils.files.fopen(kwargs['keyfile'], 'r') as fp_:
kwargs['key'] = salt.utils.stringutils.to_unicode(fp_.read())
send_data = {'name': kwargs['keyname'], 'key': kwargs['key']}
kwargs['data'] = salt.utils.json.dumps(send_data)
rcode, data = query(
command='my/keys',
method='POST',
data=kwargs['data'],
)
log.debug(pprint.pformat(data))
return {'keys': {data['name']: data['key']}} | 0.001641 |
def seqs_to_stream(seqs, ih):
"""Converts seqs into stream of FASTA records, depending on input handler.
Each FASTA record will be a list of lines.
"""
if ih == '_input_as_multiline_string':
recs = FastaFinder(seqs.split('\n'))
elif ih == '_input_as_string':
recs = FastaFinder(open(seqs))
elif ih == '_input_as_seqs':
recs = [['>'+str(i), s] for i, s in enumerate(seqs)]
elif ih == '_input_as_lines':
recs = FastaFinder(seqs)
else:
raise TypeError, "Unknown input handler %s" % ih
return recs | 0.003509 |
def fill_document(doc):
"""Add a section, a subsection and some text to the document.
:param doc: the document
:type doc: :class:`pylatex.document.Document` instance
"""
with doc.create(Section('A section')):
doc.append('Some regular text and some ')
doc.append(italic('italic text. '))
with doc.create(Subsection('A subsection')):
doc.append('Also some crazy characters: $&#{}') | 0.002288 |
def peek(self) -> str:
"""Return the next character without advancing offset.
Raises:
EndOfInput: If past the end of `self.input`.
"""
try:
return self.input[self.offset]
except IndexError:
raise EndOfInput(self) | 0.00692 |
def load(self, graphic):
"""
Loads information for this item from the xml data.
:param graphic | <XWalkthroughItem>
"""
for prop in graphic.properties():
key = prop.name()
value = prop.value()
if key == 'caption':
value = projex.wikitext.render(value.strip())
self.setProperty(key, value)
for attr, attr_value in prop.attributes().items():
self.setProperty('{0}_{1}'.format(key, attr), attr_value)
self.prepare() | 0.009709 |
async def index_page(self, request):
"""
Return index page with initial state for admin
"""
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
) | 0.006173 |
def earth_accel_df(IMU,ATT):
'''return earth frame acceleration vector from df log'''
r = rotation_df(ATT)
accel = Vector3(IMU.AccX, IMU.AccY, IMU.AccZ)
return r * accel | 0.010811 |
async def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
async with await self.pipeline(True, shard_hint) as pipe:
while True:
try:
if watches:
await pipe.watch(*watches)
func_value = await func(pipe)
exec_value = await pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
await asyncio.sleep(
watch_delay,
loop=self.connection_pool.loop
)
continue | 0.001732 |
def make_limited_stream(stream, limit):
"""Makes a stream limited."""
if not isinstance(stream, LimitedStream):
if limit is None:
raise TypeError('stream not limited and no limit provided.')
stream = LimitedStream(stream, limit)
return stream | 0.003546 |
def adjust_size(self, dst_x, dst_y, mode=FIT):
"""
given a x and y of dest, determine the ratio and return
an (x,y,w,h) for a output image.
"""
# get image size
image = Image.open(self.path)
width, height = image.size
if mode == FIT:
return adjust_crop(dst_x, dst_y, width, height) | 0.005602 |
def found_check():
"""
Temporarily enables spiceypy default behavior which raises exceptions for
false found flags for certain spice functions. All spice
functions executed within the context manager will check the found
flag return parameter and the found flag will be removed from the return for
the given function.
For Example bodc2n in spiceypy is normally called like::
name = spice.bodc2n(399)
With the possibility that an exception is thrown in the even of a invalid ID::
name = spice.bodc2n(-999991) # throws a SpiceyError
With this function however, we can use it as a context manager to do this::
with spice.found_check():
found = spice.bodc2n(-999991) # will raise an exception!
Within the context any spice functions called that normally check the found
flags will pass through the check without raising an exception if they are false.
"""
current_catch_state = config.catch_false_founds
config.catch_false_founds = True
yield
config.catch_false_founds = current_catch_state | 0.00366 |
def file_hash(content):
"""Generate hash for file or string and avoid strings starting with "ad"
to workaround ad blocks being over aggressiv.
The current implementation is based on sha256.
:param str|FileIO content: The content to hash, either as string or as file-like object
"""
h = hashlib.sha256()
if isinstance(content, bytes_type):
h.update(content)
else:
data = True
while data:
data = content.read(1024 * 1024)
h.update(data)
h_digest = h.digest()
# base64url
# | char | substitute |
# | + | - |
# | / | _ |
#
result = base64.b64encode(h_digest, altchars=b'-_')
# ensure this is a str object in 3.x
result = result.decode('ascii')
result = result.rstrip('=')
if result[:2].lower() == 'ad':
# workaround adblockers blocking everything starting with "ad"
# by replacing the "d" with another charackter
if result[1] == 'd':
result = result[0] + '~' + result[2:]
else:
# upper case D
result = result[0] + '.' + result[2:]
return result | 0.001705 |
def remove_op_node(self, node):
"""Remove an operation node n.
Add edges from predecessors to successors.
"""
if isinstance(node, int):
warnings.warn('Calling remove_op_node() with a node id is deprecated,'
' use a DAGNode instead',
DeprecationWarning, 2)
node = self._id_to_node[node]
if node.type != 'op':
raise DAGCircuitError('The method remove_op_node only works on op node types. An "%s" '
'node type was wrongly provided.' % node.type)
pred_map, succ_map = self._make_pred_succ_maps(node)
# remove from graph and map
self._multi_graph.remove_node(node)
for w in pred_map.keys():
self._multi_graph.add_edge(pred_map[w], succ_map[w],
name="%s[%s]" % (w[0].name, w[1]), wire=w) | 0.006445 |
def do_glob_math(self, cont):
"""Performs #{}-interpolation. The result is always treated as a fixed
syntactic unit and will not be re-evaluated.
"""
# TODO that's a lie! this should be in the parser for most cases.
if not isinstance(cont, six.string_types):
warn(FutureWarning(
"do_glob_math was passed a non-string {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(cont)
))
cont = six.text_type(cont)
if '#{' not in cont:
return cont
cont = _expr_glob_re.sub(self._pound_substitute, cont)
return cont | 0.002933 |
def _prepare_menu(self, node, flat=None):
"""
Prepare the menu hierarchy from the given device tree.
:param Device node: root node of device hierarchy
:returns: menu hierarchy as list
"""
if flat is None:
flat = self.flat
ItemGroup = MenuSection if flat else SubMenu
return [
ItemGroup(branch.label, self._collapse_device(branch, flat))
for branch in node.branches
if branch.methods or branch.branches
] | 0.003817 |
def _spec_to_globs(address_mapper, specs):
"""Given a Specs object, return a PathGlobs object for the build files that it matches."""
patterns = set()
for spec in specs:
patterns.update(spec.make_glob_patterns(address_mapper))
return PathGlobs(include=patterns, exclude=address_mapper.build_ignore_patterns) | 0.021944 |
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter) | 0.001623 |
def process_json_line(self, data, name, idx):
"""
Processes single json line
:param data:
:param name:
:param idx:
:return:
"""
data = data.strip()
if len(data) == 0:
return
ret = []
try:
js = json.loads(data)
self.num_json += 1
ret.append(self.process_json_rec(js, name, idx, []))
except Exception as e:
logger.debug('Exception in processing JSON %s idx %s : %s' % (name, idx, e))
self.trace_logger.log(e)
return ret | 0.005042 |
def get_element_id(self, complete_name):
"""Get the TocElement element id-number of the element with the
supplied name."""
[group, name] = complete_name.split('.')
element = self.get_element(group, name)
if element:
return element.ident
else:
logger.warning('Unable to find variable [%s]', complete_name)
return None | 0.005 |
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames | 0.004762 |
def guess_dir_structure(dir):
"""
Return the directory structure of "dir".
Args:
dir(str): something like '/path/to/imagenet/val'
Returns:
either 'train' or 'original'
"""
subdir = os.listdir(dir)[0]
# find a subdir starting with 'n'
if subdir.startswith('n') and \
os.path.isdir(os.path.join(dir, subdir)):
dir_structure = 'train'
else:
dir_structure = 'original'
logger.info(
"[ILSVRC12] Assuming directory {} has '{}' structure.".format(
dir, dir_structure))
return dir_structure | 0.003012 |
def create(self, metric_id, value, timestamp=None):
"""Add a Metric Point to a Metric
:param int metric_id: Metric ID
:param int value: Value to plot on the metric graph
:param str timestamp: Unix timestamp of the point was measured
:return: Created metric point data (:class:`dict`)
.. seealso:: https://docs.cachethq.io/reference#post-metric-points
"""
data = ApiParams()
data['value'] = value
data['timestamp'] = timestamp
return self._post('metrics/%s/points' % metric_id, data=data)['data'] | 0.003419 |
def key_press(keys):
"""returns a handler that can be used with EventListener.listen()
and returns when a key in keys is pressed"""
return lambda e: e.key if e.type == pygame.KEYDOWN \
and e.key in keys else EventConsumerInfo.DONT_CARE | 0.00738 |
def unlock(self, request, *args, **kwargs):
""" Unlocks the considered topic and retirects the user to the success URL. """
self.object = self.get_object()
success_url = self.get_success_url()
self.object.status = Topic.TOPIC_UNLOCKED
self.object.save()
messages.success(self.request, self.success_message)
return HttpResponseRedirect(success_url) | 0.007444 |
def npv(ico, nci, r, n):
""" This capital budgeting function computes the net present
value on a cash flow generating investment.
ico = Initial Capital Outlay
nci = net cash inflows per period
r = discounted rate
n = number of periods
Example: npv(100000, 15000, .03, 10)
"""
pv_nci = 0
for x in range(n):
pv_nci = pv_nci + (nci/((1 + r) ** (x + 1)))
return pv_nci - ico | 0.004706 |
def explore(layer=None):
"""Function used to discover the Scapy layers and protocols.
It helps to see which packets exists in contrib or layer files.
params:
- layer: If specified, the function will explore the layer. If not,
the GUI mode will be activated, to browse the available layers
examples:
>>> explore() # Launches the GUI
>>> explore("dns") # Explore scapy.layers.dns
>>> explore("http2") # Explore scapy.contrib.http2
>>> explore(scapy.layers.bluetooth4LE)
Note: to search a packet by name, use ls("name") rather than explore.
"""
if layer is None: # GUI MODE
if not conf.interactive:
raise Scapy_Exception("explore() GUI-mode cannot be run in "
"interactive mode. Please provide a "
"'layer' parameter !")
# 0 - Imports
try:
import prompt_toolkit
except ImportError:
raise ImportError("prompt_toolkit is not installed ! "
"You may install IPython, which contains it, via"
" `pip install ipython`")
if not _version_checker(prompt_toolkit, (2, 0)):
raise ImportError("prompt_toolkit >= 2.0.0 is required !")
# Only available with prompt_toolkit > 2.0, not released on PyPi yet
from prompt_toolkit.shortcuts.dialogs import radiolist_dialog, \
button_dialog
from prompt_toolkit.formatted_text import HTML
# 1 - Ask for layer or contrib
action = button_dialog(
title="Scapy v%s" % conf.version,
text=HTML(
six.text_type(
'<style bg="white" fg="red">Chose the type of packets'
' you want to explore:</style>'
)
),
buttons=[
(six.text_type("Layers"), "layers"),
(six.text_type("Contribs"), "contribs"),
(six.text_type("Cancel"), "cancel")
])
# 2 - Retrieve list of Packets
if action == "layers":
# Get all loaded layers
_radio_values = conf.layers.layers()
# Restrict to layers-only (not contribs) + packet.py and asn1*.py
_radio_values = [x for x in _radio_values if ("layers" in x[0] or
"packet" in x[0] or
"asn1" in x[0])]
elif action == "contribs":
# Get all existing contribs
from scapy.main import list_contrib
_radio_values = list_contrib(ret=True)
_radio_values = [(x['name'], x['description'])
for x in _radio_values]
# Remove very specific modules
_radio_values = [x for x in _radio_values if not ("can" in x[0])]
else:
# Escape/Cancel was pressed
return
# Python 2 compat
if six.PY2:
_radio_values = [(six.text_type(x), six.text_type(y))
for x, y in _radio_values]
# 3 - Ask for the layer/contrib module to explore
result = radiolist_dialog(
values=_radio_values,
title="Scapy v%s" % conf.version,
text=HTML(
six.text_type(
'<style bg="white" fg="red">Please select a layer among'
' the following, to see all packets contained in'
' it:</style>'
)
))
if result is None:
return # User pressed "Cancel"
# 4 - (Contrib only): load contrib
if action == "contribs":
from scapy.main import load_contrib
load_contrib(result)
result = "scapy.contrib." + result
else: # NON-GUI MODE
# We handle layer as a short layer name, full layer name
# or the module itself
if isinstance(layer, types.ModuleType):
layer = layer.__name__
if isinstance(layer, str):
if layer.startswith("scapy.layers."):
result = layer
else:
if layer.startswith("scapy.contrib."):
layer = layer.replace("scapy.contrib.", "")
from scapy.main import load_contrib
load_contrib(layer)
result_layer, result_contrib = (("scapy.layers.%s" % layer),
("scapy.contrib.%s" % layer))
if result_layer in conf.layers.ldict:
result = result_layer
elif result_contrib in conf.layers.ldict:
result = result_contrib
else:
raise Scapy_Exception("Unknown scapy module '%s'" % layer)
else:
warning("Wrong usage ! Check out help(explore)")
return
# COMMON PART
# Get the list of all Packets contained in that module
try:
all_layers = conf.layers.ldict[result]
except KeyError:
raise Scapy_Exception("Unknown scapy module '%s'" % layer)
# Print
print(conf.color_theme.layer_name("Packets contained in %s:" % result))
rtlst = [(lay.__name__ or "", lay._name or "") for lay in all_layers]
print(pretty_list(rtlst, [("Class", "Name")], borders=True)) | 0.000184 |
def write(self, version):
# type: (str) -> None
""" Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version.
"""
with open(self.version_file) as fp:
content = fp.read()
ver_statement = "__version__ = '{}'".format(version)
new_content = RE_PY_VERSION.sub(ver_statement, content)
fs.write_file(self.version_file, new_content) | 0.005682 |
def plotter_cls(self):
"""The plotter class"""
ret = self._plotter_cls
if ret is None:
self._logger.debug('importing %s', self.module)
mod = import_module(self.module)
plotter = self.plotter_name
if plotter not in vars(mod):
raise ImportError("Module %r does not have a %r plotter!" % (
mod, plotter))
ret = self._plotter_cls = getattr(mod, plotter)
_versions.update(get_versions(key=lambda s: s == self._plugin))
return ret | 0.003546 |
def search_normalize(self, results):
"""Append host id to search results to be able to initialize found
:class:`Interface` successfully
"""
for interface in results:
interface[u'host_id'] = self.host.id # pylint:disable=no-member
return super(Interface, self).search_normalize(results) | 0.005917 |
def invalidate_stored_oembeds(self, sender, instance, created, **kwargs):
"""
A hook for django-based oembed providers to delete any stored oembeds
"""
ctype = ContentType.objects.get_for_model(instance)
StoredOEmbed.objects.filter(
object_id=instance.pk,
content_type=ctype).delete() | 0.005747 |
def _laplace_fit(self,obj_type):
""" Performs a Laplace approximation to the posterior
Parameters
----------
obj_type : method
Whether a likelihood or a posterior
Returns
----------
None (plots posterior)
"""
# Get Mode and Inverse Hessian information
y = self.fit(method='PML',printer=False)
if y.ihessian is None:
raise Exception("No Hessian information - Laplace approximation cannot be performed")
else:
self.latent_variables.estimation_method = 'Laplace'
theta, Y, scores, states, states_var, X_names = self._categorize_model_output(self.latent_variables.get_z_values())
# Change this in future
try:
latent_variables_store = self.latent_variables.copy()
except:
latent_variables_store = self.latent_variables
return LaplaceResults(data_name=self.data_name,X_names=X_names,model_name=self.model_name,
model_type=self.model_type, latent_variables=latent_variables_store,data=Y,index=self.index,
multivariate_model=self.multivariate_model,objective_object=obj_type,
method='Laplace',ihessian=y.ihessian,signal=theta,scores=scores,
z_hide=self._z_hide,max_lag=self.max_lag,states=states,states_var=states_var) | 0.0199 |
def change_puk(ctx, puk, new_puk):
"""
Change the PUK code.
If the PIN is lost or blocked it can be reset using a PUK.
The PUK must be between 6 and 8 characters long, and supports any type of
alphanumeric characters.
"""
controller = ctx.obj['controller']
if not puk:
puk = _prompt_pin(ctx, prompt='Enter your current PUK')
if not new_puk:
new_puk = click.prompt(
'Enter your new PUK', default='', hide_input=True,
show_default=False, confirmation_prompt=True,
err=True)
if not _valid_pin_length(puk):
ctx.fail('Current PUK must be between 6 and 8 characters long.')
if not _valid_pin_length(new_puk):
ctx.fail('New PUK must be between 6 and 8 characters long.')
try:
controller.change_puk(puk, new_puk)
click.echo('New PUK set.')
except AuthenticationBlocked as e:
logger.debug('PUK is blocked.', exc_info=e)
ctx.fail('PUK is blocked.')
except WrongPuk as e:
logger.debug(
'Failed to change PUK, %d tries left', e.tries_left, exc_info=e)
ctx.fail('PUK change failed - %d tries left.' % e.tries_left) | 0.00084 |
def get(cls, tab_uuid, tab_attachment_tab_id, custom_headers=None):
"""
Get a specific attachment. The header of the response contains the
content-type of the attachment.
:type api_context: context.ApiContext
:type tab_uuid: str
:type tab_attachment_tab_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseTabAttachmentTab
"""
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
endpoint_url = cls._ENDPOINT_URL_READ.format(tab_uuid,
tab_attachment_tab_id)
response_raw = api_client.get(endpoint_url, {}, custom_headers)
return BunqResponseTabAttachmentTab.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_GET)
) | 0.002215 |
def column_correlations(self, X):
"""Returns the column correlations with each principal component."""
utils.validation.check_is_fitted(self, 's_')
# Convert numpy array to pandas DataFrame
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
row_pc = self.row_coordinates(X)
return pd.DataFrame({
component: {
feature: row_pc[component].corr(X[feature])
for feature in X.columns
}
for component in row_pc.columns
}) | 0.003623 |
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented | 0.001305 |
def get_assets(self):
'''
Return a flat list of absolute paths to all assets required by this
viewer
'''
return sum([
[self.prefix_asset(viewer, relpath) for relpath in viewer.assets]
for viewer in self.viewers
], []) | 0.00692 |
def get_cache_token(self, token):
""" Get token and data from Redis """
if self.conn is None:
raise CacheException('Redis is not connected')
token_data = self.conn.get(token)
token_data = json.loads(token_data) if token_data else None
return token_data | 0.006515 |
def search_bytes(self, bytes, minAddr = None, maxAddr = None):
"""
Search for the given byte pattern within the process memory.
@type bytes: str
@param bytes: Bytes to search for.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@rtype: iterator of int
@return: An iterator of memory addresses where the pattern was found.
@raise WindowsError: An error occurred when querying or reading the
process memory.
"""
pattern = BytePattern(bytes)
matches = Search.search_process(self, pattern, minAddr, maxAddr)
for addr, size, data in matches:
yield addr | 0.007273 |
def generate(cls):
"""
Generates a random :class:`~nacl.signing.SigningKey` object.
:rtype: :class:`~nacl.signing.SigningKey`
"""
return cls(
random(nacl.bindings.crypto_sign_SEEDBYTES),
encoder=encoding.RawEncoder,
) | 0.006897 |
def do_GET(self):
"""Override inherited do_GET method.
Include logic for returning a http manifest when the URL ends with
"http_manifest.json".
"""
if self.path.endswith("http_manifest.json"):
try:
manifest = self.generate_http_manifest()
self.send_response(200)
self.end_headers()
self.wfile.write(manifest)
except dtoolcore.DtoolCoreTypeError:
self.send_response(400)
self.end_headers()
else:
super(DtoolHTTPRequestHandler, self).do_GET() | 0.003215 |
def save_user(self, uid, user_password, user_email='', user_channels=None, user_roles=None, user_views=None, disable_account=False):
'''
a method to add or update an authorized user to the bucket
:param uid: string with id to assign to user
:param user_password: string with password to assign to user
:param user_email: [optional] string with email of user for future lookup
:param user_channels: [optional] list of strings with channels to subscribe to user
:param user_roles: [optional] list of strings with roles to assign to user
:param user_views: [optional] list of query criteria to create as views for user
:param disable_account: boolean to disable access to records by user
:return: integer with status code of user account creation
'''
# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/user/put__db___user__name_
# https://developer.couchbase.com/documentation/mobile/1.5/guides/sync-gateway/authorizing-users/index.html
title = '%s.save_user' % self.__class__.__name__
# validate inputs
input_fields = {
'uid': uid,
'user_password': user_password,
'user_email': user_email,
'user_channels': user_channels,
'user_roles': user_roles
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct url
url = self.bucket_url + '/_user/%s' % uid
# create default settings
json_data = {
'admin_channels': [ uid ],
'admin_roles': [ uid ],
'name': uid,
'password': user_password,
'disabled': disable_account
}
# add optional additional channels and roles
if user_email:
json_data['email'] = user_email
if user_channels:
json_data['admin_channels'].extend(user_channels)
if user_roles:
json_data['admin_roles'].extend(user_roles)
# send request
response = requests.put(url, json=json_data)
# create indices
if response.status_code in (200, 201) and not self.public:
self.create_view(uid=uid)
if user_views:
for criteria in user_views:
self.create_view(query_criteria=criteria, uid=uid)
# report outcome
self.printer('User "%s" updated in bucket "%s"' % (uid, self.bucket_name))
return response.status_code | 0.006593 |
def hil_gps_send(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible, force_mavlink1=False):
'''
The global position, as returned by the Global Positioning System
(GPS). This is NOT the global
position estimate of the sytem, but rather a RAW
sensor value. See message GLOBAL_POSITION for the
global position estimate. Coordinate frame is right-
handed, Z-axis up (GPS frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t)
lat : Latitude (WGS84), in degrees * 1E7 (int32_t)
lon : Longitude (WGS84), in degrees * 1E7 (int32_t)
alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t)
vn : GPS velocity in cm/s in NORTH direction in earth-fixed NED frame (int16_t)
ve : GPS velocity in cm/s in EAST direction in earth-fixed NED frame (int16_t)
vd : GPS velocity in cm/s in DOWN direction in earth-fixed NED frame (int16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
'''
return self.send(self.hil_gps_encode(time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible), force_mavlink1=force_mavlink1) | 0.0075 |
def set_shared_config(cls, config):
""" This allows to set a config that will be used when calling
``shared_blockchain_instance`` and allows to define the configuration
without requiring to actually create an instance
"""
assert isinstance(config, dict)
cls._sharedInstance.config.update(config)
# if one is already set, delete
if cls._sharedInstance.instance:
cls._sharedInstance.instance = None | 0.00625 |
def sismember(self, name, value):
"""
Is the provided value is in the ``Set``?
:param name: str the name of the redis key
:param value: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.sismember(self.redis_key(name),
self.valueparse.encode(value)) | 0.00545 |
def verb_chain_starts(self):
"""The start positions of ``verb_chains`` elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.starts(VERB_CHAINS) | 0.009524 |
def exit(self, timeperiods, hosts, services):
"""Remove ref in scheduled downtime and raise downtime log entry (exit)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always | None
:rtype: list
"""
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
# If not is_in_effect means that ot was probably a flexible downtime which was
# not triggered. In this case, nothing special to do...
if self.is_in_effect is True:
# This was a fixed or a flexible+triggered downtime
self.is_in_effect = False
item.scheduled_downtime_depth -= 1
if item.scheduled_downtime_depth == 0:
item.raise_exit_downtime_log_entry()
notification_period = timeperiods[item.notification_period]
# Notification author data
# todo: note that alias and name are not implemented yet
author_data = {
'author': self.author, 'author_name': u'Not available',
'author_alias': u'Not available', 'author_comment': self.comment
}
item.create_notifications(u'DOWNTIMEEND', notification_period, hosts, services,
author_data=author_data)
item.in_scheduled_downtime = False
if self.ref in hosts:
broks.append(self.get_expire_brok(item.get_name()))
else:
broks.append(self.get_expire_brok(item.host_name, item.get_name()))
item.del_comment(self.comment_id)
self.can_be_deleted = True
# when a downtime ends and the concerned item was a problem
# a notification should be sent with the next critical check
# So we should set a flag here which informs the consume_result function
# to send a notification
item.in_scheduled_downtime_during_last_check = True
return broks | 0.003129 |
def _create_x_y(l, duration=1):
"""
Create 2 lists
x: time (as unit of dot (dit)
y: bits
from a list of bit
>>> l = [1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1]
>>> x, y = _create_x_y(l)
>>> x
[-1, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28]
>>> y
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0]
"""
l = [0] + l + [0]
y = []
x = []
for i, bit in enumerate(l):
y.append(bit)
y.append(bit)
x.append((i - 1) * duration)
x.append(i * duration)
return x, y | 0.006757 |
def add_exports_for_repos(repos):
"""
This function will add needed entries to /etc/exports. It will not remove any
entries from the file. It will then restart the server if necessary
"""
current_exports = _get_current_exports()
needed_exports = _get_exports_for_repos(repos)
if not needed_exports.difference(current_exports):
if not _server_is_running():
_restart_server()
return
_write_exports_config(current_exports.union(needed_exports))
_restart_server() | 0.003795 |
def mostly(fn):
"""
95% chance of happening
"""
def wrapped(*args, **kwargs):
if in_percentage(95):
fn(*args, **kwargs)
return wrapped | 0.005747 |
def t_INITIAL_COMMENT(self, t):
r';'
t.lexer.push_state('asmcomment')
t.type = 'TOKEN'
t.value = ';'
return t | 0.013423 |
def migrate(self, host, port, keys, destination_db, timeout,
copy=False, replace=False, auth=None):
"""
Migrate 1 or more keys from the current Redis server to a different
server specified by the ``host``, ``port`` and ``destination_db``.
The ``timeout``, specified in milliseconds, indicates the maximum
time the connection between the two servers can be idle before the
command is interrupted.
If ``copy`` is True, the specified ``keys`` are NOT deleted from
the source server.
If ``replace`` is True, this operation will overwrite the keys
on the destination server if they exist.
If ``auth`` is specified, authenticate to the destination server with
the password provided.
"""
keys = list_or_args(keys, [])
if not keys:
raise DataError('MIGRATE requires at least one key')
pieces = []
if copy:
pieces.append(Token.get_token('COPY'))
if replace:
pieces.append(Token.get_token('REPLACE'))
if auth:
pieces.append(Token.get_token('AUTH'))
pieces.append(auth)
pieces.append(Token.get_token('KEYS'))
pieces.extend(keys)
return self.execute_command('MIGRATE', host, port, '', destination_db,
timeout, *pieces) | 0.002146 |
def SigmoidContrast(gain=10, cutoff=0.5, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.
Values in the range ``gain=(5, 20)`` and ``cutoff=(0.25, 0.75)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_sigmoid`.
Parameters
----------
gain : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier for the sigmoid function's output.
Higher values lead to quicker changes from dark to light pixels.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
cutoff : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Cutoff that shifts the sigmoid function in horizontal direction.
Higher values mean that the switch from dark to light pixels happens later, i.e.
the pixels will remain darker.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform sigmoid contrast adjustment.
"""
# TODO add inv parameter?
params1d = [
iap.handle_continuous_param(gain, "gain", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(cutoff, "cutoff", value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_sigmoid
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
) | 0.005263 |
def setMaximum(self, m, update=True):
"""Set the maximum allowed value (or None for no limit)"""
if m is not None:
m = D(asUnicode(m))
self.opts['bounds'][1] = m
if update:
self.setValue() | 0.008197 |
def add_source(self, url, **kwargs):
"""
Add a source URL from which data related to this object was scraped.
:param url: the location of the source
"""
self['sources'].append(dict(url=url, **kwargs)) | 0.008299 |
def get_hashes(path, exclude=None):
'''
Get a dictionary of file paths and timestamps.
Paths matching `exclude` regex will be excluded.
'''
out = {}
for f in Path(path).rglob('*'):
if f.is_dir():
# We want to watch files, not directories.
continue
if exclude and re.match(exclude, f.as_posix()):
retox_log.debug("excluding '{}'".format(f.as_posix()))
continue
pytime = f.stat().st_mtime
out[f.as_posix()] = pytime
return out | 0.001876 |
def expectation(self, observables, statistics, lag_multiple=1, observables_mean_free=False, statistics_mean_free=False):
r"""Compute future expectation of observable or covariance using the approximated Koopman operator.
Parameters
----------
observables : np.ndarray((input_dimension, n_observables))
Coefficients that express one or multiple observables in
the basis of the input features.
statistics : np.ndarray((input_dimension, n_statistics)), optional
Coefficients that express one or multiple statistics in
the basis of the input features.
This parameter can be None. In that case, this method
returns the future expectation value of the observable(s).
lag_multiple : int
If > 1, extrapolate to a multiple of the estimator's lag
time by assuming Markovianity of the approximated Koopman
operator.
observables_mean_free : bool, default=False
If true, coefficients in `observables` refer to the input
features with feature means removed.
If false, coefficients in `observables` refer to the
unmodified input features.
statistics_mean_free : bool, default=False
If true, coefficients in `statistics` refer to the input
features with feature means removed.
If false, coefficients in `statistics` refer to the
unmodified input features.
Notes
-----
A "future expectation" of a observable g is the average of g computed
over a time window that has the same total length as the input data
from which the Koopman operator was estimated but is shifted
by lag_multiple*tau time steps into the future (where tau is the lag
time).
It is computed with the equation:
.. math::
\mathbb{E}[g]_{\rho_{n}}=\mathbf{q}^{T}\mathbf{P}^{n-1}\mathbf{e}_{1}
where
.. math::
P_{ij}=\sigma_{i}\langle\psi_{i},\phi_{j}\rangle_{\rho_{1}}
and
.. math::
q_{i}=\langle g,\phi_{i}\rangle_{\rho_{1}}
and :math:`\mathbf{e}_{1}` is the first canonical unit vector.
A model prediction of time-lagged covariances between the
observable f and the statistic g at a lag-time of lag_multiple*tau
is computed with the equation:
.. math::
\mathrm{cov}[g,\,f;n\tau]=\mathbf{q}^{T}\mathbf{P}^{n-1}\boldsymbol{\Sigma}\mathbf{r}
where :math:`r_{i}=\langle\psi_{i},f\rangle_{\rho_{0}}` and
:math:`\boldsymbol{\Sigma}=\mathrm{diag(\boldsymbol{\sigma})}` .
"""
# TODO: implement the case lag_multiple=0
dim = self.dimension()
S = np.diag(np.concatenate(([1.0], self.singular_values[0:dim])))
V = self.V[:, 0:dim]
U = self.U[:, 0:dim]
m_0 = self.mean_0
m_t = self.mean_t
assert lag_multiple >= 1, 'lag_multiple = 0 not implemented'
if lag_multiple == 1:
P = S
else:
p = np.zeros((dim + 1, dim + 1))
p[0, 0] = 1.0
p[1:, 0] = U.T.dot(m_t - m_0)
p[1:, 1:] = U.T.dot(self.Ctt).dot(V)
P = np.linalg.matrix_power(S.dot(p), lag_multiple - 1).dot(S)
Q = np.zeros((observables.shape[1], dim + 1))
if not observables_mean_free:
Q[:, 0] = observables.T.dot(m_t)
Q[:, 1:] = observables.T.dot(self.Ctt).dot(V)
if statistics is not None:
# compute covariance
R = np.zeros((statistics.shape[1], dim + 1))
if not statistics_mean_free:
R[:, 0] = statistics.T.dot(m_0)
R[:, 1:] = statistics.T.dot(self.C00).dot(U)
if statistics is not None:
# compute lagged covariance
return Q.dot(P).dot(R.T)
# TODO: discuss whether we want to return this or the transpose
# TODO: from MSMs one might expect to first index to refer to the statistics, here it is the other way round
else:
# compute future expectation
return Q.dot(P)[:, 0] | 0.001185 |
def change_password(self, newpassword):
""" Change the password that allows to decrypt the master key
"""
if not self.unlocked():
raise WalletLocked
self.password = newpassword
self._save_encrypted_masterpassword() | 0.007519 |
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r | 0.01548 |
def whois_history(self, query, **kwargs):
"""Pass in a domain name."""
return self._results('whois-history', '/v1/{0}/whois/history'.format(query), items_path=('history', ), **kwargs) | 0.015075 |
def _property_set(self, msg):
"""Set command received and acknowledged."""
prop = self._sent_property.get('prop')
if prop and hasattr(self, prop):
setattr(self, prop, self._sent_property.get('val'))
self._sent_property = {} | 0.007491 |
def menu_weekly(self, building_id):
"""Get an array of menu objects corresponding to the weekly menu for the
venue with building_id.
:param building_id:
A string representing the id of a building, e.g. "abc".
>>> commons_week = din.menu_weekly("593")
"""
din = DiningV2(self.bearer, self.token)
response = {'result_data': {'Document': {}}}
days = []
for i in range(7):
date = str(datetime.date.today() + datetime.timedelta(days=i))
v2_response = din.menu(building_id, date)
if building_id in VENUE_NAMES:
response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id]
else:
response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"]
formatted_date = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%-m/%d/%Y')
days.append({"tblDayPart": get_meals(v2_response, building_id), "menudate": formatted_date})
response["result_data"]["Document"]["tblMenu"] = days
return normalize_weekly(response) | 0.005963 |
def __updateJobResults(self):
""""
Check if this is the best model
If so:
1) Write it's checkpoint
2) Record this model as the best
3) Delete the previous best's output cache
Otherwise:
1) Delete our output cache
"""
isSaved = False
while True:
self._isBestModel, jobResults, jobResultsStr = \
self.__checkIfBestCompletedModel()
# -----------------------------------------------------------------------
# If the current model is the best:
# 1) Save the model's predictions
# 2) Checkpoint the model state
# 3) Update the results for the job
if self._isBestModel:
# Save the current model and its results
if not isSaved:
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__createModelCheckpoint()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
isSaved = True
# Now record the model as the best for the job
prevBest = jobResults.get('bestModel', None)
prevWasSaved = jobResults.get('saved', False)
# If the current model is the best, it shouldn't already be checkpointed
if prevBest == self._modelID:
assert not prevWasSaved
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = True
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
if prevWasSaved:
self.__deleteOutputCache(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self._logger.info("Model %d chosen as best model", self._modelID)
break
# -----------------------------------------------------------------------
# If the current model is not the best, delete its outputs
else:
# NOTE: we update model timestamp around these occasionally-lengthy
# operations to help prevent the model from becoming orphaned
self.__deleteOutputCache(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
break | 0.009424 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.