text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def smart_unicode(string, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# if isinstance(s, Promise):
# # The input is the result of a gettext_lazy() call.
# return s
return force_unicode(string, encoding, strings_only, errors) | 0.004444 |
def set_proxy(self, proxy_account, account=None, **kwargs):
""" Set a specific proxy for account
:param bitshares.account.Account proxy_account: Account to be
proxied
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, blockchain_instance=self)
proxy = Account(proxy_account, blockchain_instance=self)
options = account["options"]
options["voting_account"] = proxy["id"]
op = operations.Account_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"account": account["id"],
"new_options": options,
"extensions": {},
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | 0.001757 |
def password_input(*args, **kwargs):
'''
Get a password
'''
password_input = wtforms.PasswordField(*args, **kwargs)
password_input.input_type = 'password'
return password_input | 0.005 |
def matches(self, *args, **kwargs):
"""Test if a request matches a :ref:`message spec <message spec>`.
Returns ``True`` or ``False``.
"""
request = make_prototype_request(*args, **kwargs)
if self._prototype.opcode not in (None, request.opcode):
return False
if self._prototype.is_command not in (None, request.is_command):
return False
for name in dir(self._prototype):
if name.startswith('_') or name in request._non_matched_attrs:
# Ignore privates, and handle documents specially.
continue
prototype_value = getattr(self._prototype, name, None)
if inspect.ismethod(prototype_value):
continue
actual_value = getattr(request, name, None)
if prototype_value not in (None, actual_value):
return False
if len(self._prototype.docs) not in (0, len(request.docs)):
return False
return self._prototype._matches_docs(self._prototype.docs, request.docs) | 0.00278 |
def namespace_lower(self, namespace):
"""
Return a copy with only the keys from a given namespace, lower-cased.
The keys in the returned dict will be transformed to lower case after
filtering, so they can be easily passed as keyword arguments to other
functions. This is just syntactic sugar for calling
:meth:`~ConfigLoader.namespace` with
``key_transform=lambda key: key.lower()``.
Example::
>>> from configloader import ConfigLoader
>>> config = ConfigLoader(
... MY_APP_SETTING1='a',
... EXTERNAL_LIB_SETTING1='b',
... EXTERNAL_LIB_SETTING2='c',
... )
>>> config.namespace_lower('EXTERNAL_LIB')
ConfigLoader({'setting1': 'b', 'setting2': 'c'})
:arg namespace: Common prefix.
:return: New config dict.
:rtype: :class:`ConfigLoader`
"""
return self.namespace(namespace, key_transform=lambda key: key.lower()) | 0.001947 |
def _update_event(self, event_index, event_state, event_type, event_value,
proc_list, proc_desc, peak_time):
"""Update an event in the list"""
if event_state == "OK" or event_state == "CAREFUL":
# Reset the automatic process sort key
self.reset_process_sort()
# Set the end of the events
endtime = time.mktime(datetime.now().timetuple())
if endtime - self.events_list[event_index][0] > peak_time:
# If event is > peak_time seconds
self.events_list[event_index][1] = endtime
else:
# If event <= peak_time seconds, ignore
self.events_list.remove(self.events_list[event_index])
else:
# Update the item
self.set_process_sort(event_type)
# State
if event_state == "CRITICAL":
self.events_list[event_index][2] = event_state
# Min value
self.events_list[event_index][6] = min(self.events_list[event_index][6],
event_value)
# Max value
self.events_list[event_index][4] = max(self.events_list[event_index][4],
event_value)
# Average value
self.events_list[event_index][7] += event_value
self.events_list[event_index][8] += 1
self.events_list[event_index][5] = (self.events_list[event_index][7] /
self.events_list[event_index][8])
# TOP PROCESS LIST (only for CRITICAL ALERT)
if event_state == "CRITICAL":
events_sort_key = self.get_event_sort_key(event_type)
# Sort the current process list to retreive the TOP 3 processes
self.events_list[event_index][9] = sort_stats(proc_list,
events_sort_key)[0:3]
self.events_list[event_index][11] = events_sort_key
# MONITORED PROCESSES DESC
self.events_list[event_index][10] = proc_desc
return True | 0.003623 |
def highlight(self, **kwargs):
"""
kwargs:
style: css
highlight_time: int; default: .3
"""
self.debug_log("Highlighting element")
style = kwargs.get('style')
highlight_time = kwargs.get('highlight_time', .3)
driver = self._element._parent
try:
original_style = self._element.get_attribute('style')
driver.execute_script(
"arguments[0].setAttribute('style', arguments[1]);",
self._element,
style
)
except Exception as e:
self.error_log("highlight exception: %s" % str(e))
sleep(highlight_time)
try:
driver.execute_script(
"arguments[0].setAttribute('style', arguments[1]);",
self._element,
original_style
)
except Exception as e:
self.error_log("highlight exception: %s" % str(e))
return True | 0.001963 |
def _move_group_helper(self, group):
"""A helper to move the chidren of a group."""
for i in group.children:
self.groups.remove(i)
i.level = group.level + 1
self.groups.insert(self.groups.index(group) + 1, i)
if i.children:
self._move_group_helper(i) | 0.006042 |
def profileit(path=None):
"""cProfile decorator to profile a function
:param path: output file path
:type path: str
:return: Function
"""
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
if path is not None:
print prof.print_stats()
prof.dump_stats(os.path.expanduser(path))
else:
print prof.print_stats()
return retval
return wrapper
return inner | 0.001692 |
def unfinished_objects(self):
'''
Leaves only versions of those objects that has some version with
`_end == None` or with `_end > right cutoff`.
'''
mask = self._end_isnull
if self._rbound is not None:
mask = mask | (self._end > self._rbound)
oids = set(self[mask]._oid.tolist())
return self[self._oid.apply(lambda oid: oid in oids)] | 0.00489 |
def tag_and_stem(text):
"""
Returns a list of (stem, tag, token) triples:
- stem: the word's uninflected form
- tag: the word's part of speech
- token: the original word, so we can reconstruct it later
"""
tokens = tokenize(text)
tagged = nltk.pos_tag(tokens)
out = []
for token, tag in tagged:
stem = morphy_stem(token, tag)
out.append((stem, tag, token))
return out | 0.002336 |
def get_from_ident(self, ident):
"""
Take a string as returned by get_ident and return a job,
based on the class representation and the job's pk from the ident
"""
model_repr, job_pk = ident.split(':', 1)
klass = import_class(model_repr)
return klass.get(job_pk) | 0.006289 |
def split(self, delimiter=None):
"""Same as string.split(), but retains literal/expandable structure.
Returns:
List of `EscapedString`.
"""
result = []
strings = self.strings[:]
current = None
while strings:
is_literal, value = strings[0]
parts = value.split(delimiter, 1)
if len(parts) > 1:
value1, value2 = parts
strings[0] = (is_literal, value2)
out = EscapedString(value1, is_literal)
push = True
else:
strings = strings[1:]
out = EscapedString(value, is_literal)
push = False
if current is None:
current = out
else:
current = current + out
if push:
result.append(current)
current = None
if current:
result.append(current)
return result | 0.002 |
def index(self, index, field, value):
"Search for records matching a value in an index service"
params = {
"q": value,
# search index has '_' instead of '-' in field names ..
"q.options": "{fields:['%s']}" % (field.replace('-', '_'))
}
response = self.get(self.config(index, 'search_url'), params=params)
results = [hit['fields'] for hit in response.json()['hits']['hit']]
for result in results:
for key in result:
result[key.replace('_', '-')] = result.pop(key)
return results | 0.003333 |
def start(self):
"""Indicate that we are performing work in a thread.
:returns: multiprocessing job object
"""
if self.run is True:
self.job = multiprocessing.Process(target=self.indicator)
self.job.start()
return self.job | 0.006849 |
def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000,
AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None,
print_sql=False, bucket_location=None, s3_bucket=None):
"""
Upload a dataframe to redshift via s3.
Parameters
----------
name: str
name for your shiny new table
df: DataFrame
data frame you want to save to the db
drop_if_exists: bool (False)
whether you'd like to drop the table if it already exists
chunk_size: int (10000)
Number of DataFrame chunks to upload and COPY from S3. Upload speed
is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes
have 2 slices per node, so if running 2 nodes you will want
chunk_size=4, 8, etc
AWS_ACCESS_KEY: str
your aws access key. if this is None, the function will try
and grab AWS_ACCESS_KEY from your environment variables
AWS_SECRET_KEY: str
your aws secrety key. if this is None, the function will try
and grab AWS_SECRET_KEY from your environment variables
s3: S3
alternative to using keys, you can use an S3 object
print_sql: bool (False)
option for printing sql statement that will be executed
bucket_location: boto.s3.connection.Location
a specific AWS location in which to create the temporary transfer s3
bucket. This should match your redshift cluster's region.
Examples
--------
"""
if self.dbtype!="redshift":
raise Exception("Sorry, feature only available for redshift.")
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.connection import Location
# if boto is present, set the bucket_location to default.
# we can't do this in the function definition because we're
# lazily importing boto only if necessary here.
if bucket_location is None:
bucket_location = Location.DEFAULT
except ImportError:
raise Exception("Couldn't find boto library. Please ensure it is installed")
if s3 is not None:
AWS_ACCESS_KEY = s3.access_key
AWS_SECRET_KEY = s3.secret_key
if AWS_ACCESS_KEY is None:
AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY')
if AWS_SECRET_KEY is None:
AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY')
if AWS_ACCESS_KEY is None:
raise Exception("Must specify AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`")
if AWS_SECRET_KEY is None:
raise Exception("Must specify AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`")
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
#this way users with permission on specific buckets can use this feature
bucket_name = "dbpy-{0}".format(uuid.uuid4())
if s3_bucket:
bucket = conn.get_bucket(s3_bucket)
bucket_name = s3_bucket
else:
bucket = conn.create_bucket(bucket_name, location=bucket_location)
# we're going to chunk the file into pieces. according to amazon, this is
# much faster when it comes time to run the \COPY statment.
#
# see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html
sys.stderr.write("Transfering {0} to s3 in chunks".format(name))
len_df = len(df)
chunks = range(0, len_df, chunk_size)
def upload_chunk(i):
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
chunk = df[i:(i+chunk_size)]
k = Key(bucket)
k.key = 'data-%d-%d.csv.gz' % (i, i + chunk_size)
k.set_metadata('parent', 'db.py')
out = StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(chunk.to_csv(index=False, encoding='utf-8'))
k.set_contents_from_string(out.getvalue())
sys.stderr.write(".")
return i
threads = []
for i in chunks:
t = threading.Thread(target=upload_chunk, args=(i, ))
t.start()
threads.append(t)
# join all threads
for t in threads:
t.join()
sys.stderr.write("done\n")
if drop_if_exists:
sql = "DROP TABLE IF EXISTS {0};".format(name)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
# generate schema from pandas and then adapt for redshift
sql = pd.io.sql.get_schema(df, name)
# defaults to using SQLite format. need to convert it to Postgres
sql = sql.replace("[", "").replace("]", "")
# we'll create the table ONLY if it doens't exist
sql = sql.replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS")
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
# perform the \COPY here. the s3 argument is a prefix, so it'll pick up
# all of the data*.gz files we've created
sys.stderr.write("Copying data from s3 to redshfit...")
sql = """
copy {name} from 's3://{bucket_name}/data'
credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}'
CSV IGNOREHEADER as 1 GZIP;
""".format(name=name, bucket_name=bucket_name,
AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
sys.stderr.write("done!\n")
# tear down the bucket
sys.stderr.write("Tearing down bucket...")
for key in bucket.list():
key.delete()
if not s3_bucket:
conn.delete_bucket(bucket_name)
sys.stderr.write("done!") | 0.002758 |
def _update_table(data):
"""Add new jobs to the priority table and update the build system if required.
data - it is a list of dictionaries that describe a job type
returns the number of new, failed and updated jobs
"""
jp_index, priority, expiration_date = _initialize_values()
total_jobs = len(data)
new_jobs, failed_changes, updated_jobs = 0, 0, 0
# Loop through sanitized jobs, add new jobs and update the build system if needed
for job in data:
key = _unique_key(job)
if key in jp_index:
# We already know about this job, we might need to update the build system
# We're seeing the job again with another build system (e.g. buildbot vs
# taskcluster). We need to change it to '*'
if jp_index[key]['build_system_type'] != '*' and jp_index[key]['build_system_type'] != job["build_system_type"]:
db_job = JobPriority.objects.get(pk=jp_index[key]['pk'])
db_job.buildsystem = '*'
db_job.save()
logger.info('Updated %s/%s from %s to %s',
db_job.testtype, db_job.buildtype,
job['build_system_type'], db_job.buildsystem)
updated_jobs += 1
else:
# We have a new job from runnablejobs to add to our master list
try:
jobpriority = JobPriority(
testtype=str(job["testtype"]),
buildtype=str(job["platform_option"]),
platform=str(job["platform"]),
priority=priority,
expiration_date=expiration_date,
buildsystem=job["build_system_type"]
)
jobpriority.save()
logger.info('New job was found (%s,%s,%s,%s)',
job['testtype'], job['platform_option'], job['platform'],
job["build_system_type"])
new_jobs += 1
except Exception as error:
logger.warning(str(error))
failed_changes += 1
logger.info('We have %s new jobs and %s updated jobs out of %s total jobs processed.',
new_jobs, updated_jobs, total_jobs)
if failed_changes != 0:
logger.warning('We have failed %s changes out of %s total jobs processed.',
failed_changes, total_jobs)
return new_jobs, failed_changes, updated_jobs | 0.003603 |
def _query(self, sql, *args):
""" Executes the specified `sql` query and returns the cursor """
if not self._con:
logger.debug(("Open MBTiles file '%s'") % self.filename)
self._con = sqlite3.connect(self.filename)
self._cur = self._con.cursor()
sql = ' '.join(sql.split())
logger.debug(("Execute query '%s' %s") % (sql, args))
try:
self._cur.execute(sql, *args)
except (sqlite3.OperationalError, sqlite3.DatabaseError) as e:
raise InvalidFormatError(("%s while reading %s") % (e, self.filename))
return self._cur | 0.024834 |
def delete(self, article_attachment):
"""
This function completely wipes attachment from Zendesk Helpdesk article.
:param article_attachment: :class:`ArticleAttachment` object or numeric article attachment id.
:return: status_code == 204 on success
"""
return HelpdeskAttachmentRequest(self).delete(self.endpoint.delete, article_attachment) | 0.012853 |
def register(class_=None, **kwargs):
"""Registers a dataset with segment specific hyperparameters.
When passing keyword arguments to `register`, they are checked to be valid
keyword arguments for the registered Dataset class constructor and are
saved in the registry. Registered keyword arguments can be retrieved with
the `list_datasets` function.
All arguments that result in creation of separate datasets should be
registered. Examples are datasets divided in different segments or
categories, or datasets containing multiple languages.
Once registered, an instance can be created by calling
:func:`~gluonnlp.data.create` with the class name.
Parameters
----------
**kwargs : list or tuple of allowed argument values
For each keyword argument, it's value must be a list or tuple of the
allowed argument values.
Examples
--------
>>> @gluonnlp.data.register(segment=['train', 'test', 'dev'])
... class MyDataset(gluon.data.Dataset):
... def __init__(self, segment='train'):
... pass
>>> my_dataset = gluonnlp.data.create('MyDataset')
>>> print(type(my_dataset))
<class 'MyDataset'>
"""
def _real_register(class_):
# Assert that the passed kwargs are meaningful
for kwarg_name, values in kwargs.items():
try:
real_args = inspect.getfullargspec(class_).args
except AttributeError:
# pylint: disable=deprecated-method
real_args = inspect.getargspec(class_.__init__).args
if not kwarg_name in real_args:
raise RuntimeError(
('{} is not a valid argument for {}. '
'Only valid arguments can be registered.').format(
kwarg_name, class_.__name__))
if not isinstance(values, (list, tuple)):
raise RuntimeError(('{} should be a list of '
'valid arguments for {}. ').format(
values, kwarg_name))
# Save the kwargs associated with this class_
_REGSITRY_NAME_KWARGS[class_] = kwargs
register_ = registry.get_register_func(Dataset, 'dataset')
return register_(class_)
if class_ is not None:
# Decorator was called without arguments
return _real_register(class_)
return _real_register | 0.000816 |
def get_as_html(self) -> str:
"""
Returns the table object as an HTML string.
:return: HTML representation of the table.
"""
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}' | 0.007916 |
def logoPNG(symbol, token='', version=''):
'''This is a helper function, but the google APIs url is standardized.
https://iexcloud.io/docs/api/#logo
8am UTC daily
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
image: result as png
'''
_raiseIfNotStr(symbol)
response = requests.get(logo(symbol, token, version)['url'])
return ImageP.open(BytesIO(response.content)) | 0.002012 |
def get_instance_health(name, region=None, key=None, keyid=None, profile=None, instances=None):
'''
Get a list of instances and their health state
CLI example:
.. code-block:: bash
salt myminion boto_elb.get_instance_health myelb
salt myminion boto_elb.get_instance_health myelb region=us-east-1 instances="[instance_id,instance_id]"
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
instance_states = conn.describe_instance_health(name, instances)
ret = []
for _instance in instance_states:
ret.append({'instance_id': _instance.instance_id,
'description': _instance.description,
'state': _instance.state,
'reason_code': _instance.reason_code
})
return ret
except boto.exception.BotoServerError as error:
log.debug(error)
return [] | 0.003096 |
def add_indicator(self, indicator_data):
"""Add an indicator to Batch Job.
.. code-block:: javascript
{
"type": "File",
"rating": 5.00,
"confidence": 50,
"summary": "53c3609411c83f363e051d455ade78a7
: 57a49b478310e4313c54c0fee46e4d70a73dd580
: db31cb2a748b7e0046d8c97a32a7eb4efde32a0593e5dbd58e07a3b4ae6bf3d7",
"associatedGroups": [
{
"groupXid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904"
}
],
"attribute": [{
"type": "Source",
"displayed": true,
"value": "Malware Analysis provided by external AMA."
}],
"fileOccurrence": [{
"fileName": "drop1.exe",
"date": "2017-03-03T18:00:00-06:00"
}],
"tag": [{
"name": "China"
}],
"xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904:170139"
}
Args:
indicator_data (dict): The Full Indicator data including attributes, labels, tags,
and associations.
"""
if indicator_data.get('type') not in ['Address', 'EmailAddress', 'File', 'Host', 'URL']:
# for custom indicator types the valueX fields are required.
# using the summary we can build the values
index = 1
for value in self._indicator_values(indicator_data.get('summary')):
indicator_data['value{}'.format(index)] = value
index += 1
if indicator_data.get('type') == 'File':
# convert custom field name to the appropriate value for batch v2
size = indicator_data.pop('size', None)
if size is not None:
indicator_data['intValue1'] = size
if indicator_data.get('type') == 'Host':
# convert custom field name to the appropriate value for batch v2
dns_active = indicator_data.pop('dnsActive', None)
if dns_active is not None:
indicator_data['flag1'] = dns_active
whois_active = indicator_data.pop('whoisActive', None)
if whois_active is not None:
indicator_data['flag2'] = whois_active
return self._indicator(indicator_data) | 0.002006 |
def format_file_path(filepath):
"""Formats a path as absolute and with the correct platform separator."""
try:
is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN.match(filepath)
filepath = os.path.realpath(os.path.abspath(filepath))
filepath = re.sub(BACKSLASH_REPLACE_PATTERN, '/', filepath)
is_windows_drive = WINDOWS_DRIVE_PATTERN.match(filepath)
if is_windows_drive:
filepath = filepath.capitalize()
if is_windows_network_mount:
# Add back a / to the front, since the previous modifications
# will have replaced any double slashes with single
filepath = '/' + filepath
except:
pass
return filepath | 0.004121 |
def check_signature(self, timestamp, nonce, signature):
"""
根据时间戳和生成签名的字符串 (nonce) 检查签名。
:param timestamp: 时间戳
:param nonce: 生成签名的随机字符串
:param signature: 要检查的签名
:return: 如果签名合法将返回 ``True``,不合法将返回 ``False``
"""
return check_signature(
self.config["TOKEN"], timestamp, nonce, signature
) | 0.005405 |
def DeleteUserDefinedFunction(self, udf_link, options=None):
"""Deletes a user defined function.
:param str udf_link:
The link to the user defined function.
:param dict options:
The request options for the request.
:return:
The deleted UDF.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.DeleteResource(path,
'udfs',
udf_id,
None,
options) | 0.002732 |
def slot_add_nio_binding(self, slot_number, port_number, nio):
"""
Adds a slot NIO binding.
:param slot_number: slot number
:param port_number: port number
:param nio: NIO instance to add to the slot/port
"""
try:
adapter = self._slots[slot_number]
except IndexError:
raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name,
slot_number=slot_number))
if adapter is None:
raise DynamipsError("Adapter is missing in slot {slot_number}".format(slot_number=slot_number))
if not adapter.port_exists(port_number):
raise DynamipsError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter,
port_number=port_number))
try:
yield from self._hypervisor.send('vm slot_add_nio_binding "{name}" {slot_number} {port_number} {nio}'.format(name=self._name,
slot_number=slot_number,
port_number=port_number,
nio=nio))
except DynamipsError:
# in case of error try to remove and add the nio binding
yield from self._hypervisor.send('vm slot_remove_nio_binding "{name}" {slot_number} {port_number}'.format(name=self._name,
slot_number=slot_number,
port_number=port_number))
yield from self._hypervisor.send('vm slot_add_nio_binding "{name}" {slot_number} {port_number} {nio}'.format(name=self._name,
slot_number=slot_number,
port_number=port_number,
nio=nio))
log.info('Router "{name}" [{id}]: NIO {nio_name} bound to port {slot_number}/{port_number}'.format(name=self._name,
id=self._id,
nio_name=nio.name,
slot_number=slot_number,
port_number=port_number))
yield from self.slot_enable_nio(slot_number, port_number)
adapter.add_nio(port_number, nio) | 0.006765 |
def update_parameters(parameters, grads, learning_rate=1.2):
"""
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
"""
# Retrieve each parameter from the dictionary "parameters"
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Retrieve each gradient from the dictionary "grads"
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
# Update rule for each parameter
W1 -= learning_rate * dW1
b1 -= learning_rate * db1
W2 -= learning_rate * dW2
b2 -= learning_rate * db2
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters | 0.001018 |
def remove_history(self, i=None):
"""
Remove a history item from the bundle by index.
You can toggle whether history is recorded using
* :meth:`enable_history`
* :meth:`disable_history`
:parameter int i: integer for indexing (can be positive or
negative). If i is None or not provided, the entire list
of history items will be removed
:raises ValueError: if no history items have been recorded.
"""
if i is None:
self.remove_parameters_all(context='history')
else:
param = self.get_history(i=i)
self.remove_parameter(uniqueid=param.uniqueid) | 0.002886 |
def get_line_number(line_map, offset):
"""Find a line number, given a line map and a character offset."""
for lineno, line_offset in enumerate(line_map, start=1):
if line_offset > offset:
return lineno
return -1 | 0.004115 |
def plot(self,
resolution_constant_regions=20,
resolution_smooth_regions=200):
"""
Return arrays x, y for plotting the piecewise constant function.
Just the minimum number of straight lines are returned if
``eps=0``, otherwise `resolution_constant_regions` plotting intervals
are insed in the constant regions with `resolution_smooth_regions`
plotting intervals in the smoothed regions.
"""
if self.eps == 0:
x = []; y = []
for I, value in zip(self._indicator_functions, self._values):
x.append(I.L)
y.append(value)
x.append(I.R)
y.append(value)
return x, y
else:
n = float(resolution_smooth_regions)/self.eps
if len(self.data) == 1:
return [self.L, self.R], [self._values[0], self._values[0]]
else:
x = [np.linspace(self.data[0][0], self.data[1][0]-self.eps,
resolution_constant_regions+1)]
# Iterate over all internal discontinuities
for I in self._indicator_functions[1:]:
x.append(np.linspace(I.L-self.eps, I.L+self.eps,
resolution_smooth_regions+1))
x.append(np.linspace(I.L+self.eps, I.R-self.eps,
resolution_constant_regions+1))
# Last part
x.append(np.linspace(I.R-self.eps, I.R, 3))
x = np.concatenate(x)
y = self(x)
return x, y | 0.004192 |
def expand_gallery(generator, metadata):
""" Expand a gallery tag to include all of the files in a specific directory under IMAGE_PATH
:param pelican: The pelican instance
:return: None
"""
if "gallery" not in metadata or metadata['gallery'] is None:
return # If no gallery specified, we do nothing
lines = [ ]
base_path = _image_path(generator)
in_path = path.join(base_path, metadata['gallery'])
template = generator.settings.get('GALLERY_TEMPLATE', DEFAULT_TEMPLATE)
thumbnail_name = generator.settings.get("GALLERY_THUMBNAIL", DEFAULT_GALLERY_THUMB)
thumbnail_prefix = generator.settings.get("")
resizer = _resizer(thumbnail_name, '?x?', base_path)
for dirpath, _, filenames in os.walk(in_path):
for filename in filenames:
if not filename.startswith('.'):
url = path.join(dirpath, filename).replace(base_path, "")[1:]
url = path.join('/static', generator.settings.get('IMAGE_PATH', DEFAULT_IMAGE_DIR), url).replace('\\', '/')
logger.debug("GALLERY: {0}".format(url))
thumbnail = resizer.get_thumbnail_name(filename)
thumbnail = path.join('/', generator.settings.get('THUMBNAIL_DIR', DEFAULT_THUMBNAIL_DIR), thumbnail).replace('\\', '/')
lines.append(template.format(
filename=filename,
url=url,
thumbnail=thumbnail,
))
metadata['gallery_content'] = "\n".join(lines) | 0.003927 |
def source_from_cache(path):
"""Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147/488 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
#if sys.implementation.cache_tag is None:
# raise NotImplementedError('sys.implementation.cache_tag is None')
#path = os.fspath(path)
head, pycache_filename = os.path.split(path)
head, pycache = os.path.split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
dot_count = pycache_filename.count('.')
if dot_count not in {2, 3}:
raise ValueError('expected only 2 or 3 dots in '
'{!r}'.format(pycache_filename))
elif dot_count == 3:
optimization = pycache_filename.rsplit('.', 2)[-2]
if not optimization.startswith(_OPT):
raise ValueError("optimization portion of filename does not start "
"with {!r}".format(_OPT))
opt_level = optimization[len(_OPT):]
if not opt_level.isalnum():
raise ValueError("optimization level {!r} is not an alphanumeric "
"value".format(optimization))
base_filename = pycache_filename.partition('.')[0]
return os.path.join(head, base_filename + SOURCE_SUFFIXES[0]) | 0.001896 |
def visit(spht, node):
"""Append opening tags to document body list.
:param sphinx.writers.html.SmartyPantsHTMLTranslator spht: Object to modify.
:param sphinxcontrib.imgur.nodes.ImgurJavaScriptNode node: This class' instance.
"""
html_attrs_bq = {'async': '', 'src': '//s.imgur.com/min/embed.js', 'charset': 'utf-8'}
spht.body.append(spht.starttag(node, 'script', '', **html_attrs_bq)) | 0.011494 |
def name(anon, obj, field, val):
"""
Generates a random full name (using first name and last name)
"""
return anon.faker.name(field=field) | 0.006494 |
def removeSpacePadding(str, blocksize=AES_blocksize):
'Remove padding with spaces'
pad_len = 0
for char in str[::-1]: # str[::-1] reverses string
if char == ' ':
pad_len += 1
else:
break
str = str[:-pad_len]
return str | 0.017361 |
def fw_policy_create(self, data, fw_name=None, cache=False):
"""Top level policy create routine. """
LOG.debug("FW Policy Debug")
self._fw_policy_create(fw_name, data, cache) | 0.010101 |
def _norm(self, string):
"""Extended normalization: normalize by list of norm-characers, split
by character "/"."""
nstring = norm(string)
if "/" in string:
s, t = string.split('/')
nstring = t
return self.normalize(nstring) | 0.006944 |
def get_vertex_surrounding_multicolor(graph, vertex):
"""
Loops over all edges that are incident to supplied vertex and accumulates all colors,
that are present in those edges
"""
result = Multicolor()
for edge in graph.get_edges_by_vertex(vertex):
result += edge.multicolor
return result | 0.006154 |
def _set_if_type(self, v, load=False):
"""
Setter method for if_type, mapped from YANG variable /mpls_state/dynamic_bypass/dynamic_bypass_interface/if_type (mpls-if-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_if_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_if_type() directly.
YANG Description: Interface type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback-interface': {'value': 7}, u'ethernet-interface': {'value': 2}, u'port-channel-interface': {'value': 5}, u'unknown-interface': {'value': 1}, u've-interface': {'value': 6}, u'fbr-channel-interface': {'value': 8}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-if-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """if_type must be of a type compatible with mpls-if-type""",
'defined-type': "brocade-mpls-operational:mpls-if-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback-interface': {'value': 7}, u'ethernet-interface': {'value': 2}, u'port-channel-interface': {'value': 5}, u'unknown-interface': {'value': 1}, u've-interface': {'value': 6}, u'fbr-channel-interface': {'value': 8}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-if-type', is_config=False)""",
})
self.__if_type = t
if hasattr(self, '_set'):
self._set() | 0.004419 |
def _validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name,
min_val=min_val)
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val | 0.001099 |
def get_stoplist(language):
"""Returns an built-in stop-list for the language as a set of words."""
file_path = os.path.join("stoplists", "%s.txt" % language)
try:
stopwords = pkgutil.get_data("justext", file_path)
except IOError:
raise ValueError(
"Stoplist for language '%s' is missing. "
"Please use function 'get_stoplists' for complete list of stoplists "
"and feel free to contribute by your own stoplist." % language
)
return frozenset(w.decode("utf8").lower() for w in stopwords.splitlines()) | 0.003442 |
def _get_attributes(self, path):
"""
:param path: filepath within fast5
:return: dictionary of attributes found at ``path``
:rtype dict
"""
path_grp = self.handle[path]
path_attr = path_grp.attrs
return dict(path_attr) | 0.007092 |
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x | 0.014837 |
def _save_or_delete_workflow(self):
"""
Calls the real save method if we pass the beggining of the wf
"""
if not self.current.task_type.startswith('Start'):
if self.current.task_name.startswith('End') and not self.are_we_in_subprocess():
self.wf_state['finished'] = True
self.wf_state['finish_date'] = datetime.now().strftime(
settings.DATETIME_DEFAULT_FORMAT)
if self.current.workflow_name not in settings.EPHEMERAL_WORKFLOWS and not \
self.wf_state['in_external']:
wfi = WFCache(self.current).get_instance()
TaskInvitation.objects.filter(instance=wfi, role=self.current.role,
wf_name=wfi.wf.name).delete()
self.current.log.info("Delete WFCache: %s %s" % (self.current.workflow_name,
self.current.token))
self.save_workflow_to_cache(self.serialize_workflow()) | 0.008419 |
def capabilities(self):
"""Returns the list of system capabilities.
:return: A ``list`` of capabilities.
"""
response = self.get(PATH_CAPABILITIES)
return _load_atom(response, MATCH_ENTRY_CONTENT).capabilities | 0.008 |
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N | 0.002105 |
def get_index_list(shape, j):
"""
index_list = get_index_list(shape, j)
:Arguments:
shape: a tuple
j: an integer
Assumes index j is from a ravelled version of an array
with specified shape, returns the corresponding
non-ravelled index tuple as a list.
"""
r = range(len(shape))
index_list = (r)
for i in r:
if i < len(shape):
prodshape = prod(shape[i + 1:])
else:
prodshape = 0
index_list[i] = int(floor(j / prodshape))
if index_list[i] > shape[i]:
raise IndexError('Requested index too large')
j %= prodshape
return index_list | 0.001497 |
def list_wallet_names(api_key, is_hd_wallet=False, coin_symbol='btc'):
''' Get all the wallets belonging to an API key '''
assert is_valid_coin_symbol(coin_symbol), coin_symbol
assert api_key
params = {'token': api_key}
kwargs = dict(wallets='hd' if is_hd_wallet else '')
url = make_url(coin_symbol, **kwargs)
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r) | 0.004474 |
def _get_ipv6_info(cls, ip_address: str) -> tuple:
'''Extract the flow info and control id.'''
results = socket.getaddrinfo(
ip_address, 0, proto=socket.IPPROTO_TCP,
flags=socket.AI_NUMERICHOST)
flow_info = results[0][4][2]
control_id = results[0][4][3]
return flow_info, control_id | 0.005747 |
def parse_gptl(file_path, var_list):
"""
Read a GPTL timing file and extract some data.
Args:
file_path: the path to the GPTL timing file
var_list: a list of strings to look for in the file
Returns:
A dict containing key-value pairs of the livvkit
and the times associated with them
"""
timing_result = dict()
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
for var in var_list:
for line in f:
if var in line:
timing_result[var] = float(line.split()[4])/int(line.split()[2])
break
return timing_result | 0.00292 |
def ParseNumericOption(self, options, name, base=10, default_value=None):
"""Parses a numeric option.
If the option is not set the default value is returned.
Args:
options (argparse.Namespace): command line arguments.
name (str): name of the numeric option.
base (Optional[int]): base of the numeric value.
default_value (Optional[object]): default value.
Returns:
int: numeric value.
Raises:
BadConfigOption: if the options are invalid.
"""
numeric_value = getattr(options, name, None)
if not numeric_value:
return default_value
try:
return int(numeric_value, base)
except (TypeError, ValueError):
name = name.replace('_', ' ')
raise errors.BadConfigOption(
'Unsupported numeric value {0:s}: {1!s}.'.format(
name, numeric_value)) | 0.005814 |
def write_string(value, buff, byteorder='big'):
"""Write a string to a file-like object."""
data = value.encode('utf-8')
write_numeric(USHORT, len(data), buff, byteorder)
buff.write(data) | 0.004831 |
def resolve_label(self, label):
"""
Resolves a label for this module only. If the label refers to another
module, an exception is raised.
@type label: str
@param label: Label to resolve.
@rtype: int
@return: Memory address pointed to by the label.
@raise ValueError: The label is malformed or impossible to resolve.
@raise RuntimeError: Cannot resolve the module or function.
"""
# Split the label into it's components.
# Use the fuzzy mode whenever possible.
aProcess = self.get_process()
if aProcess is not None:
(module, procedure, offset) = aProcess.split_label(label)
else:
(module, procedure, offset) = _ModuleContainer.split_label(label)
# If a module name is given that doesn't match ours,
# raise an exception.
if module and not self.match_name(module):
raise RuntimeError("Label does not belong to this module")
# Resolve the procedure if given.
if procedure:
address = self.resolve(procedure)
if address is None:
# If it's a debug symbol, use the symbol.
address = self.resolve_symbol(procedure)
# If it's the keyword "start" use the entry point.
if address is None and procedure == "start":
address = self.get_entry_point()
# The procedure was not found.
if address is None:
if not module:
module = self.get_name()
msg = "Can't find procedure %s in module %s"
raise RuntimeError(msg % (procedure, module))
# If no procedure is given use the base address of the module.
else:
address = self.get_base()
# Add the offset if given and return the resolved address.
if offset:
address = address + offset
return address | 0.00099 |
def bulkWrite(self, endpoint, buffer, timeout = 100):
r"""Perform a bulk write request to the endpoint specified.
Arguments:
endpoint: endpoint number.
buffer: sequence data buffer to write.
This parameter can be any sequence type.
timeout: operation timeout in milliseconds. (default: 100)
Returns the number of bytes written.
"""
return self.dev.write(endpoint, buffer, timeout) | 0.007984 |
def scan(self, data, dlen=None):
'''
Scan a data block for matching signatures.
@data - A string of data to scan.
@dlen - If specified, signatures at offsets larger than dlen will be ignored.
Returns a list of SignatureResult objects.
'''
results = []
matched_offsets = set()
# Since data can potentially be quite a large string, make it available to other
# methods via a class attribute so that it doesn't need to be passed around to
# different methods over and over again.
self.data = data
# If dlen wasn't specified, search all of self.data
if dlen is None:
dlen = len(data)
for signature in self.signatures:
# Use regex to search the data block for potential signature
# matches (fast)
for match in signature.regex.finditer(data):
# Take the offset of the start of the signature into account
offset = match.start() - signature.offset
# Signatures are ordered based on the length of their magic bytes (largest first).
# If this offset has already been matched to a previous signature, ignore it unless
# self.show_invalid has been specified. Also ignore obviously invalid offsets (<0)
# as well as those outside the specified self.data range (dlen).
if (offset not in matched_offsets or self.show_invalid) and offset >= 0 and offset < dlen:
# if offset >= 0 and offset < dlen:
# Analyze the data at this offset using the current
# signature rule
tags = self._analyze(signature, offset)
# Generate a SignatureResult object and append it to the results list if the
# signature is valid, or if invalid results were requested.
if (not tags['invalid'] or self.show_invalid) and not self._filtered(tags['description']):
# Only display results with the 'once' tag once.
if tags['once']:
if signature.title in self.display_once:
continue
else:
self.display_once.add(signature.title)
# Append the result to the results list
results.append(SignatureResult(**tags))
# Add this offset to the matched_offsets set, so that it can be ignored by
# subsequent loops.
matched_offsets.add(offset)
# Sort results by offset
results.sort(key=lambda x: x.offset, reverse=False)
return results | 0.004977 |
def calculate_Y(sub_network,skip_pre=False):
"""Calculate bus admittance matrices for AC sub-networks."""
if not skip_pre:
calculate_dependent_values(sub_network.network)
if sub_network.network.sub_networks.at[sub_network.name,"carrier"] != "AC":
logger.warning("Non-AC networks not supported for Y!")
return
branches = sub_network.branches()
buses_o = sub_network.buses_o
network = sub_network.network
#following leans heavily on pypower.makeYbus
#Copyright Richard Lincoln, Ray Zimmerman, BSD-style licence
num_branches = len(branches)
num_buses = len(buses_o)
y_se = 1/(branches["r_pu"] + 1.j*branches["x_pu"])
y_sh = branches["g_pu"]+ 1.j*branches["b_pu"]
tau = branches["tap_ratio"].fillna(1.)
#catch some transformers falsely set with tau = 0 by pypower
tau[tau==0] = 1.
#define the HV tap ratios
tau_hv = pd.Series(1.,branches.index)
tau_hv[branches.tap_side==0] = tau[branches.tap_side==0]
#define the LV tap ratios
tau_lv = pd.Series(1.,branches.index)
tau_lv[branches.tap_side==1] = tau[branches.tap_side==1]
phase_shift = np.exp(1.j*branches["phase_shift"].fillna(0.)*np.pi/180.)
#build the admittance matrix elements for each branch
Y11 = (y_se + 0.5*y_sh)/tau_lv**2
Y10 = -y_se/tau_lv/tau_hv/phase_shift
Y01 = -y_se/tau_lv/tau_hv/np.conj(phase_shift)
Y00 = (y_se + 0.5*y_sh)/tau_hv**2
#bus shunt impedances
b_sh = network.shunt_impedances.b_pu.groupby(network.shunt_impedances.bus).sum().reindex(buses_o, fill_value = 0.)
g_sh = network.shunt_impedances.g_pu.groupby(network.shunt_impedances.bus).sum().reindex(buses_o, fill_value = 0.)
Y_sh = g_sh + 1.j*b_sh
#get bus indices
bus0 = buses_o.get_indexer(branches.bus0)
bus1 = buses_o.get_indexer(branches.bus1)
#connection matrices
C0 = csr_matrix((ones(num_branches), (np.arange(num_branches), bus0)), (num_branches, num_buses))
C1 = csr_matrix((ones(num_branches), (np.arange(num_branches), bus1)), (num_branches, num_buses))
#build Y{0,1} such that Y{0,1} * V is the vector complex branch currents
i = r_[np.arange(num_branches), np.arange(num_branches)]
sub_network.Y0 = csr_matrix((r_[Y00,Y01],(i,r_[bus0,bus1])), (num_branches,num_buses))
sub_network.Y1 = csr_matrix((r_[Y10,Y11],(i,r_[bus0,bus1])), (num_branches,num_buses))
#now build bus admittance matrix
sub_network.Y = C0.T * sub_network.Y0 + C1.T * sub_network.Y1 + \
csr_matrix((Y_sh, (np.arange(num_buses), np.arange(num_buses)))) | 0.017041 |
def apply(self, q, bindings, ordering, distinct=None):
""" Sort on a set of field specifications of the type (ref, direction)
in order of the submitted list. """
info = []
for (ref, direction) in self.parse(ordering):
info.append((ref, direction))
table, column = self.cube.model[ref].bind(self.cube)
if distinct is not None and distinct != ref:
column = asc(ref) if direction == 'asc' else desc(ref)
else:
column = column.label(column.name)
column = column.asc() if direction == 'asc' else column.desc()
bindings.append(Binding(table, ref))
if self.cube.is_postgresql:
column = column.nullslast()
q = q.order_by(column)
if not len(self.results):
for column in q.columns:
column = column.asc()
if self.cube.is_postgresql:
column = column.nullslast()
q = q.order_by(column)
return info, q, bindings | 0.001855 |
def get_suite_token(self, suite_id, suite_secret, suite_ticket):
"""
获取第三方应用凭证
https://work.weixin.qq.com/api/doc#90001/90143/9060
:param suite_id: 以ww或wx开头应用id(对应于旧的以tj开头的套件id)
:param suite_secret: 应用secret
:param suite_ticket: 企业微信后台推送的ticket
:return: 返回的 JSON 数据包
"""
return self._post(
'service/get_suite_token',
data={
'suite_id': suite_id,
'suite_secret': suite_secret,
'suite_ticket': suite_ticket
}
) | 0.003478 |
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = getargspec_no_self(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
args.sort()
return args | 0.002162 |
def tidy(args):
"""
%prog tidy fastafile
Trim terminal Ns, normalize gap sizes and remove small components.
"""
p = OptionParser(tidy.__doc__)
p.add_option("--gapsize", dest="gapsize", default=0, type="int",
help="Set all gaps to the same size [default: %default]")
p.add_option("--minlen", dest="minlen", default=100, type="int",
help="Minimum component size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
gapsize = opts.gapsize
minlen = opts.minlen
tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta"
fw = must_open(tidyfastafile, "w")
removed = normalized = 0
fasta = Fasta(fastafile, lazy=True)
for name, rec in fasta.iteritems_ordered():
rec.seq = rec.seq.upper()
if minlen:
removed += remove_small_components(rec, minlen)
trim_terminal_Ns(rec)
if gapsize:
normalized += normalize_gaps(rec, gapsize)
if len(rec) == 0:
logging.debug("Drop seq {0}".format(rec.id))
continue
SeqIO.write([rec], fw, "fasta")
# Print statistics
if removed:
logging.debug("Total discarded bases: {0}".format(removed))
if normalized:
logging.debug("Gaps normalized: {0}".format(normalized))
logging.debug("Tidy FASTA written to `{0}`.".format(tidyfastafile))
fw.close()
return tidyfastafile | 0.002003 |
def get_entity_propnames(entity):
""" Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() + # Columns
ins.mapper.relationships.keys() # Relationships
) | 0.002232 |
def repo_def_matches_reality(juicer_def, pulp_def):
"""Compare a juicer repo def with a given pulp definition. Compute and
return the update necessary to make `pulp_def` match `juicer_def`.
`juicer_def` - A JuicerRepo() object representing a juicer repository
`pulp_def` - A PulpRepo() object representing a pulp repository
"""
return juicer.common.Repo.RepoDiff(juicer_repo=juicer_def, pulp_repo=pulp_def) | 0.00464 |
def get_functions_overridden_by(self, function):
'''
Return the list of functions overriden by the function
Args:
(core.Function)
Returns:
list(core.Function)
'''
candidates = [c.functions_not_inherited for c in self.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == function.full_name] | 0.006438 |
def RemoveClientKeyword(self, client_id, keyword, cursor=None):
"""Removes the association of a particular client to a keyword."""
cursor.execute(
"DELETE FROM client_keywords "
"WHERE client_id = %s AND keyword_hash = %s",
[db_utils.ClientIDToInt(client_id),
mysql_utils.Hash(keyword)]) | 0.003049 |
def searchNsByHref(self, doc, href):
"""Search a Ns aliasing a given URI. Recurse on the parents
until it finds the defined namespace or return None
otherwise. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlSearchNsByHref(doc__o, self._o, href)
if ret is None:raise treeError('xmlSearchNsByHref() failed')
__tmp = xmlNs(_obj=ret)
return __tmp | 0.013393 |
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style | 0.002506 |
def loads(s, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list):
'''Loads an object from a string.
:param s: An object to parse
:type s: bytes or str
:param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types.
:param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.
:param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict.
:param list_cls: A callable that takes an iterable and returns a sequence.
'''
if isinstance(s, six.text_type):
io = StringIO(s)
else:
io = BytesIO(s)
return load(
fp=io,
separator=separator,
index_separator=index_separator,
cls=cls,
list_cls=list_cls,
) | 0.005682 |
def block_start(self, previous_block):
"""Returns an ordered list of batches to inject at the beginning of the
block. Can also return None if no batches should be injected.
Args:
previous_block (Block): The previous block.
Returns:
A list of batches to inject.
"""
previous_header_bytes = previous_block.header
previous_header = BlockHeader()
previous_header.ParseFromString(previous_header_bytes)
block_info = BlockInfo(
block_num=previous_header.block_num,
previous_block_id=previous_header.previous_block_id,
signer_public_key=previous_header.signer_public_key,
header_signature=previous_block.header_signature,
timestamp=int(time.time()))
return [self.create_batch(block_info)] | 0.002353 |
def match_bitwidth(*args, **opt):
""" Matches the bitwidth of all of the input arguments with zero or sign extend
:param args: WireVectors of which to match bitwidths
:param opt: Optional keyword argument 'signed=True' (defaults to False)
:return: tuple of args in order with extended bits
Example of matching the bitwidths of two WireVectors `a` and `b` with
with zero extention: ::
a,b = match_bitwidth(a, b)
Example of matching the bitwidths of three WireVectors `a`,`b`, and `c` with
with sign extention: ::
a,b = match_bitwidth(a, b, c, signed=True)
"""
# TODO: when we drop 2.7 support, this code should be cleaned up with explicit
# kwarg support for "signed" rather than the less than helpful "**opt"
if len(opt) == 0:
signed = False
else:
if len(opt) > 1 or 'signed' not in opt:
raise PyrtlError('error, only supported kwarg to match_bitwidth is "signed"')
signed = bool(opt['signed'])
max_len = max(len(wv) for wv in args)
if signed:
return (wv.sign_extended(max_len) for wv in args)
else:
return (wv.zero_extended(max_len) for wv in args) | 0.004212 |
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND):
"""Convert pssm array to pwm array
"""
b = background_probs2array(background_probs)
b = b.reshape([1, 4, 1])
return (np.exp(arr) * b).astype(arr.dtype) | 0.004098 |
def rendaku_merge_pairs(lpair, rpair):
"""Merge lpair < rpair while applying semi-irregular rendaku rules"""
ltext, lnum = lpair
rtext, rnum = rpair
if lnum > rnum:
raise ValueError
if rpair == ("ひゃく", 100):
if lpair == ("さん", 3):
rtext = "びゃく"
elif lpair == ("ろく", 6):
ltext = "ろっ"
rtext = "ぴゃく"
elif lpair == ("はち", 8):
ltext = "はっ"
rtext = "ぴゃく"
elif rpair == ("せん", 1000):
if lpair == ("さん", 3):
rtext = "ぜん"
elif lpair == ("はち", 8):
ltext = "はっ"
elif rpair == ("ちょう", 10**12):
if lpair == ("いち", 1):
ltext = "いっ"
elif lpair == ("はち", 8):
ltext = "はっ"
elif lpair == ("じゅう", 10):
ltext = "じゅっ"
elif rpair == ("けい", 10**16):
if lpair == ("いち", 1):
ltext = "いっ"
elif lpair == ("ろく", 6):
ltext = "ろっ"
elif lpair == ("はち", 8):
ltext = "はっ"
elif lpair == ("じゅう", 10):
ltext = "じゅっ"
elif lpair == ("ひゃく", 100):
ltext = "ひゃっ"
return ("%s%s" % (ltext, rtext), lnum * rnum) | 0.000835 |
def get_cluster_plan(self):
"""Fetch cluster plan from zookeeper."""
_log.info('Fetching current cluster-topology from Zookeeper...')
cluster_layout = self.get_topics(fetch_partition_state=False)
# Re-format cluster-layout
partitions = [
{
'topic': topic_id,
'partition': int(p_id),
'replicas': partitions_data['replicas']
}
for topic_id, topic_info in six.iteritems(cluster_layout)
for p_id, partitions_data in six.iteritems(topic_info['partitions'])
]
return {
'version': 1,
'partitions': partitions
} | 0.00436 |
def cublasCtpsv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Solve complex triangular-packed system with one right-hand side.
"""
status = _libcublas.cublasCtpsv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status) | 0.0125 |
def get(self, tag, default=None):
"""Get a metadata value.
Each metadata value is referenced by a ``tag`` -- a short
string such as ``'xlen'`` or ``'audit'``. In the sidecar file
these tag names are prepended with ``'Xmp.pyctools.'``, which
corresponds to a custom namespace in the XML file.
:param str tag: The tag name.
:returns: The metadata value associated with ``tag``.
:rtype: :py:class:`str`
"""
full_tag = 'Xmp.pyctools.' + tag
if full_tag in self.data:
return self.data[full_tag]
return default | 0.003241 |
def get(self, url, params):
"""
Issues a GET request against the API, properly formatting the params
:param url: a string, the url you are requesting
:param params: a dict, the key-value of all the paramaters needed
in the request
:returns: a dict parsed of the JSON response
"""
url = self.host + url
if params:
url = url + "?" + urllib.parse.urlencode(params)
try:
resp = requests.get(url, allow_redirects=False, headers=self.headers, auth=self.oauth)
except TooManyRedirects as e:
resp = e.response
return self.json_parse(resp) | 0.004412 |
def newNsProp(self, node, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlNewNsProp(node__o, self._o, name, value)
if ret is None:raise treeError('xmlNewNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp | 0.015228 |
def get_accepted_features(features, proposed_feature):
"""Deselect candidate features from list of all features
Args:
features (List[Feature]): collection of all features in the ballet
project: both accepted features and candidate ones that have not
been accepted
proposed_feature (Feature): candidate feature that has not been
accepted
Returns:
List[Feature]: list of features with the proposed feature not in it.
Raises:
ballet.exc.BalletError: Could not deselect exactly the proposed
feature.
"""
def eq(feature):
"""Features are equal if they have the same source
At least in this implementation...
"""
return feature.source == proposed_feature.source
# deselect features that match the proposed feature
result = lfilter(complement(eq), features)
if len(features) - len(result) == 1:
return result
elif len(result) == len(features):
raise BalletError(
'Did not find match for proposed feature within \'contrib\'')
else:
raise BalletError(
'Unexpected condition (n_features={}, n_result={})'
.format(len(features), len(result))) | 0.000798 |
def igetattr(self, attrname, context=None):
"""Infer the possible values of the given attribute on the slice.
:param attrname: The name of the attribute to infer.
:type attrname: str
:returns: The inferred possible values.
:rtype: iterable(NodeNG)
"""
if attrname == "start":
yield self._wrap_attribute(self.lower)
elif attrname == "stop":
yield self._wrap_attribute(self.upper)
elif attrname == "step":
yield self._wrap_attribute(self.step)
else:
yield from self.getattr(attrname, context=context) | 0.003185 |
def MRA(biomf, sampleIDs=None, transform=None):
"""
Calculate the mean relative abundance percentage.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: list
:param sampleIDs: A list of sample id's from BIOM format OTU table.
:param transform: Mathematical function which is used to transform smax to another
format. By default, the function has been set to None.
:rtype: dict
:return: A dictionary keyed on OTUID's and their mean relative abundance for a given
number of sampleIDs.
"""
ra = relative_abundance(biomf, sampleIDs)
if transform is not None:
ra = {sample: {otuID: transform(abd) for otuID, abd in ra[sample].items()}
for sample in ra.keys()}
otuIDs = biomf.ids(axis="observation")
return mean_otu_pct_abundance(ra, otuIDs) | 0.004566 |
def fix_filename(filename, suffix=''):
"""
e.g.
fix_filename('icon.png', '_40x40')
return
icon_40x40.png
"""
if suffix:
f, ext = os.path.splitext(filename)
return f+suffix+ext
else:
return filename | 0.010601 |
def flatFieldFromCloseDistance(imgs, bg_imgs=None):
'''
Average multiple images of a homogeneous device
imaged directly in front the camera lens.
if [bg_imgs] are not given, background level is extracted
from 1% of the cumulative intensity distribution
of the averaged [imgs]
This measurement method is referred as 'Method A' in
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
---
'''
img = imgAverage(imgs)
bg = getBackground2(bg_imgs, img)
img -= bg
img = toGray(img)
mx = median_filter(img[::10, ::10], 3).max()
img /= mx
return img | 0.001404 |
def _parse_key_from_substring(substring) -> str:
'''
Returns the key in the expected string "N:12.3", where "N" is the
key, and "12.3" is a floating point value
'''
try:
return substring.split(':')[0]
except (ValueError, IndexError, TypeError, AttributeError):
log.exception('Unexpected argument to _parse_key_from_substring:')
raise ParseError(
'Unexpected argument to _parse_key_from_substring: {}'.format(
substring)) | 0.002016 |
def request(self, path, data=None, headers=None, method=None):
"""Performs a HTTP request to the Go server
Args:
path (str): The full path on the Go server to request.
This includes any query string attributes.
data (str, dict, bool, optional): If any data is present this
request will become a POST request.
headers (dict, optional): Headers to set for this particular
request
Raises:
HTTPError: when the HTTP request fails.
Returns:
file like object: The response from a
:func:`urllib2.urlopen` call
"""
if isinstance(data, str):
data = data.encode('utf-8')
response = urlopen(self._request(path, data=data, headers=headers, method=method))
self._set_session_cookie(response)
return response | 0.003413 |
def get_prep_value(self, value):
"""
We need to accomodate queries where a single email,
or list of email addresses is supplied as arguments. For example:
- Email.objects.filter(to='[email protected]')
- Email.objects.filter(to=['[email protected]', '[email protected]'])
"""
if isinstance(value, six.string_types):
return value
else:
return ', '.join(map(lambda s: s.strip(), value)) | 0.004274 |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CountryContext for this CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
"""
if self._context is None:
self._context = CountryContext(self._version, iso_code=self._solution['iso_code'], )
return self._context | 0.009881 |
def table(self, table, columns, types, constraints='', pk='', new_table=False):
"""
Rearrange, add or delete columns from database **table** with desired ordered list of **columns** and corresponding data **types**.
Parameters
----------
table: sequence
The name of the table to modify
columns: list
A sequence of the columns in the order in which they are to appear in the SQL table
types: sequence
A sequence of the types corresponding to each column in the columns list above.
constraints: sequence (optional)
A sequence of the constraints for each column, e.g. '', 'UNIQUE', 'NOT NULL', etc.
pk: string or list
Name(s) of the primary key(s) if other than ID
new_table: bool
Create a new table
"""
goodtogo = True
# Make sure there is an integer primary key, unique, not null 'id' column
# and the appropriate number of elements in each sequence
if columns[0] != 'id':
print("Column 1 must be called 'id'")
goodtogo = False
if constraints:
if 'UNIQUE' not in constraints[0].upper() and 'NOT NULL' not in constraints[0].upper():
print("'id' column constraints must be 'UNIQUE NOT NULL'")
goodtogo = False
else:
constraints = ['UNIQUE NOT NULL'] + ([''] * (len(columns) - 1))
# Set UNIQUE NOT NULL constraints for the primary keys, except ID which is already has them
if pk:
if not isinstance(pk, type(list())):
pk = list(pk)
for elem in pk:
if elem == 'id':
continue
else:
ind, = np.where(columns == elem)
constraints[ind] = 'UNIQUE NOT NULL'
else:
pk = ['id']
if not len(columns) == len(types) == len(constraints):
print("Must provide equal length *columns ({}), *types ({}), and *constraints ({}) sequences." \
.format(len(columns), len(types), len(constraints)))
goodtogo = False
if goodtogo:
t = self.query("SELECT name FROM sqlite_master", unpack=True, fmt='table')
tables = t['name'].tolist()
# If the table exists, modify the columns
if table in tables and not new_table:
# Rename the old table and create a new one
self.list("DROP TABLE IF EXISTS TempOldTable")
self.list("ALTER TABLE {0} RENAME TO TempOldTable".format(table))
create_txt = "CREATE TABLE {0} (\n\t{1}".format(table, ', \n\t'.join(
['{} {} {}'.format(c, t, r) for c, t, r in zip(columns, types, constraints)]))
create_txt += ', \n\tPRIMARY KEY({})\n)'.format(', '.join([elem for elem in pk]))
# print(create_txt.replace(',', ',\n'))
self.list(create_txt)
# Populate the new table and drop the old one
old_columns = [c for c in self.query("PRAGMA table_info(TempOldTable)", unpack=True)[1] if c in columns]
self.list("INSERT INTO {0} ({1}) SELECT {1} FROM TempOldTable".format(table, ','.join(old_columns)))
# Check for and add any foreign key constraints
t = self.query('PRAGMA foreign_key_list(TempOldTable)', fmt='table')
if not isinstance(t, type(None)):
self.list("DROP TABLE TempOldTable")
self.add_foreign_key(table, t['table'].tolist(), t['from'].tolist(), t['to'].tolist())
else:
self.list("DROP TABLE TempOldTable")
# If the table does not exist and new_table is True, create it
elif table not in tables and new_table:
create_txt = "CREATE TABLE {0} (\n\t{1}".format(table, ', \n\t'.join(
['{} {} {}'.format(c, t, r) for c, t, r in zip(columns, types, constraints)]))
create_txt += ', \n\tPRIMARY KEY({})\n)'.format(', '.join([elem for elem in pk]))
# print(create_txt.replace(',', ',\n'))
print(create_txt)
self.list(create_txt)
# Otherwise the table to be modified doesn't exist or the new table to add already exists, so do nothing
else:
if new_table:
print('Table {} already exists. Set *new_table=False to modify.'.format(table.upper()))
else:
print('Table {} does not exist. Could not modify. Set *new_table=True to add a new table.'.format(
table.upper()))
else:
print('The {} table has not been {}. Please make sure your table columns, \
types, and constraints are formatted properly.'.format(table.upper(), \
'created' if new_table else 'modified')) | 0.005914 |
def get_command_line_key_for_unknown_config_file_setting(self, key):
"""Compute a commandline arg key to be used for a config file setting
that doesn't correspond to any defined configargparse arg (and so
doesn't have a user-specified commandline arg key).
Args:
key: The config file key that was being set.
"""
key_without_prefix_chars = key.strip(self.prefix_chars)
command_line_key = self.prefix_chars[0]*2 + key_without_prefix_chars
return command_line_key | 0.003717 |
def get_password_authentication_key(self, username, password, server_b_value, salt):
"""
Calculates the final hkdf based on computed S value, and computed U value and the key
:param {String} username Username.
:param {String} password Password.
:param {Long integer} server_b_value Server B value.
:param {Long integer} salt Generated salt.
:return {Buffer} Computed HKDF value.
"""
u_value = calculate_u(self.large_a_value, server_b_value)
if u_value == 0:
raise ValueError('U cannot be zero.')
username_password = '%s%s:%s' % (self.pool_id.split('_')[1], username, password)
username_password_hash = hash_sha256(username_password.encode('utf-8'))
x_value = hex_to_long(hex_hash(pad_hex(salt) + username_password_hash))
g_mod_pow_xn = pow(self.g, x_value, self.big_n)
int_value2 = server_b_value - self.k * g_mod_pow_xn
s_value = pow(int_value2, self.small_a_value + u_value * x_value, self.big_n)
hkdf = compute_hkdf(bytearray.fromhex(pad_hex(s_value)),
bytearray.fromhex(pad_hex(long_to_hex(u_value))))
return hkdf | 0.004992 |
def get_formatted_stack_frame(
project: 'projects.Project',
error_stack: bool = True
) -> list:
"""
Returns a list of the stack frames formatted for user display that has
been enriched by the project-specific data.
:param project:
The currently open project used to enrich the stack data.
:param error_stack:
Whether or not to return the error stack. When True the stack of the
last exception will be returned. If no such exception exists, an empty
list will be returned instead. When False the current execution stack
trace will be returned.
"""
return [
format_stack_frame(f, project)
for f in get_stack_frames(error_stack=error_stack)
] | 0.004 |
def parse(self, extent, desc_tag):
# type: (int, UDFTag) -> None
'''
Parse the passed in data into a UDF Terminating Descriptor.
Parameters:
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor already initialized')
self.desc_tag = desc_tag
self.orig_extent_loc = extent
self._initialized = True | 0.006612 |
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print() | 0.002105 |
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0] | 0.010338 |
def calculate_integral(self, T1, T2, method):
r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Uses SciPy's `quad` function
to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
'''
return float(quad(self.calculate, T1, T2, args=(method))[0]) | 0.004137 |
def _fingerprint_dirs(self, dirpaths, topdown=True, onerror=None, followlinks=False):
"""Returns a fingerprint of the given file directories and all their sub contents.
This assumes that the file directories are of reasonable size
to cause memory or performance issues.
"""
# Note that we don't sort the dirpaths, as their order may have meaning.
filepaths = []
for dirpath in dirpaths:
dirs = os.walk(dirpath, topdown=topdown, onerror=onerror,
followlinks=followlinks)
sorted_dirs = sorted(dirs, key=lambda d: d[0])
filepaths.extend([os.path.join(dirpath, filename)
for dirpath, dirnames, filenames in sorted_dirs
for filename in sorted(filenames)])
return self._fingerprint_files(filepaths) | 0.009975 |
def parse(pem_str):
# type: (bytes) -> List[AbstractPEMObject]
"""
Extract PEM objects from *pem_str*.
:param pem_str: String to parse.
:type pem_str: bytes
:return: list of :ref:`pem-objects`
"""
return [
_PEM_TO_CLASS[match.group(1)](match.group(0))
for match in _PEM_RE.finditer(pem_str)
] | 0.002899 |
def distance(self, other):
"""Distance to another point on the sphere"""
return math.acos(self._pos3d.dot(other.vector)) | 0.014706 |
def file_open(self, fn):
"""Yields the opening text of a file section in multipart HTTP.
Parameters
----------
fn : str
Filename for the file being opened and added to the HTTP body
"""
yield b'--'
yield self.boundary.encode()
yield CRLF
headers = content_disposition(fn)
headers.update(content_type(fn))
for c in self._write_headers(headers):
yield c | 0.00431 |
def list_passwords(kwargs=None, call=None):
'''
List all password on the account
.. versionadded:: 2015.8.0
'''
response = _query('support', 'password/list')
ret = {}
for item in response['list']:
if 'server' in item:
server = item['server']['name']
if server not in ret:
ret[server] = []
ret[server].append(item)
return ret | 0.002387 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.