text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def aggregation_result_extractor(impact_report, component_metadata):
"""Extracting aggregation result of breakdown from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
"""Initializations."""
extra_args = component_metadata.extra_args
# Find out aggregation report type
analysis_layer = impact_report.analysis
provenance = impact_report.impact_function.provenance
exposure_keywords = provenance['exposure_keywords']
exposure_summary_table = impact_report.exposure_summary_table
if exposure_summary_table:
exposure_summary_table_fields = exposure_summary_table.keywords[
'inasafe_fields']
aggregation_summary = impact_report.aggregation_summary
aggregation_summary_fields = aggregation_summary.keywords[
'inasafe_fields']
use_rounding = impact_report.impact_function.use_rounding
use_aggregation = bool(
impact_report.impact_function.provenance['aggregation_layer'])
if not use_aggregation:
return context
"""Filtering report sections."""
# Only process for applicable exposure types
# Get exposure type definition
exposure_type = definition(exposure_keywords['exposure'])
# Only round the number when it is population exposure and we use rounding
is_population = exposure_type is exposure_population
# For now aggregation report only applicable for breakable exposure types:
itemizable_exposures_all = [
exposure for exposure in exposure_all
if exposure.get('classifications')]
if exposure_type not in itemizable_exposures_all:
return context
"""Generating type name for columns."""
type_fields = read_dynamic_inasafe_field(
aggregation_summary_fields, affected_exposure_count_field)
# do not include total, to preserve ordering and proper reference
type_fields.remove('total')
# we need to sort the column
# get the classes lists
# retrieve classes definitions
exposure_classes_lists = retrieve_exposure_classes_lists(exposure_keywords)
# sort columns based on class order
# create function to sort
def sort_classes(_type_field):
"""Sort method to retrieve exposure class key index."""
# class key is the type field name
# find index in class list
for i, _exposure_class in enumerate(exposure_classes_lists):
if _type_field == _exposure_class['key']:
index = i
break
else:
index = -1
return index
# sort
type_fields = sorted(type_fields, key=sort_classes)
# generate type_header_labels for column header
type_header_labels = []
for type_name in type_fields:
type_label = tr(type_name.capitalize())
type_header_labels.append(type_label)
"""Generating values for rows."""
# generate rows of values for values of each column
rows = []
aggregation_name_index = aggregation_summary.fields().lookupField(
aggregation_name_field['field_name'])
total_field_index = aggregation_summary.fields().lookupField(
total_affected_field['field_name'])
type_field_index = []
for type_name in type_fields:
field_name = affected_exposure_count_field['field_name'] % type_name
type_index = aggregation_summary.fields().lookupField(field_name)
type_field_index.append(type_index)
for feat in aggregation_summary.getFeatures():
total_affected_value = format_number(
feat[total_field_index],
use_rounding=use_rounding,
is_population=is_population)
if total_affected_value == '0':
# skip aggregation type if the total affected is zero
continue
item = {
# Name is the header for each row
'name': feat[aggregation_name_index],
# Total is the total for each row
'total': total_affected_value
}
# Type values is the values for each column in each row
type_values = []
for idx in type_field_index:
affected_value = format_number(
feat[idx],
use_rounding=use_rounding)
type_values.append(affected_value)
item['type_values'] = type_values
rows.append(item)
"""Generate total for footers."""
# calculate total values for each type. Taken from exposure summary table
type_total_values = []
# Get affected field index
affected_field_index = exposure_summary_table.fields().lookupField(
total_affected_field['field_name'])
# Get breakdown field
breakdown_field = None
# I'm not sure what's the difference
# It is possible to have exposure_type_field or exposure_class_field
# at the moment
breakdown_fields = [
exposure_type_field,
exposure_class_field
]
for field in breakdown_fields:
if field['key'] in exposure_summary_table_fields:
breakdown_field = field
break
breakdown_field_name = breakdown_field['field_name']
breakdown_field_index = exposure_summary_table.fields().lookupField(
breakdown_field_name)
# Fetch total affected for each breakdown name
value_dict = {}
for feat in exposure_summary_table.getFeatures():
# exposure summary table is in csv format, so the field returned is
# always in text format
affected_value = int(float(feat[affected_field_index]))
affected_value = format_number(
affected_value,
use_rounding=use_rounding,
is_population=is_population)
value_dict[feat[breakdown_field_index]] = affected_value
if value_dict:
for type_name in type_fields:
affected_value_string_formatted = value_dict[type_name]
if affected_value_string_formatted == '0':
# if total affected for breakdown type is zero
# current column index
column_index = len(type_total_values)
# cut column header
type_header_labels = (
type_header_labels[:column_index]
+ type_header_labels[column_index + 1:])
# cut all row values for the column
for item in rows:
type_values = item['type_values']
item['type_values'] = (
type_values[:column_index]
+ type_values[column_index + 1:])
continue
type_total_values.append(affected_value_string_formatted)
"""Get the super total affected."""
# total for affected (super total)
analysis_feature = next(analysis_layer.getFeatures())
field_index = analysis_layer.fields().lookupField(
total_affected_field['field_name'])
total_all = format_number(
analysis_feature[field_index],
use_rounding=use_rounding)
"""Generate and format the context."""
aggregation_area_default_header = resolve_from_dictionary(
extra_args, 'aggregation_area_default_header')
header_label = (
aggregation_summary.title() or aggregation_area_default_header)
table_header_format = resolve_from_dictionary(
extra_args, 'table_header_format')
# check unit
units = exposure_type['units']
if units:
unit = units[0]
abbreviation = unit['abbreviation']
if abbreviation:
unit_string = '({abbreviation})'.format(abbreviation=abbreviation)
else:
unit_string = ''
else:
unit_string = ''
table_header = table_header_format.format(
title=provenance['map_legend_title'],
unit=unit_string)
table_header = ' '.join(table_header.split())
section_header = resolve_from_dictionary(extra_args, 'header')
notes = resolve_from_dictionary(extra_args, 'notes')
total_header = resolve_from_dictionary(extra_args, 'total_header')
total_in_aggregation_header = resolve_from_dictionary(
extra_args, 'total_in_aggregation_header')
context['component_key'] = component_metadata.key
context['header'] = section_header
context['notes'] = notes
context['aggregation_result'] = {
'table_header': table_header,
'header_label': header_label,
'type_header_labels': type_header_labels,
'total_label': total_header,
'total_in_aggregation_area_label': total_in_aggregation_header,
'rows': rows,
'type_total_values': type_total_values,
'total_all': total_all,
}
return context | 0.00011 |
def stageContent(self, configFiles, dateTimeFormat=None):
"""Parses a JSON configuration file to stage content.
Args:
configFiles (list): A list of JSON files on disk containing
configuration data for staging content.
dateTimeFormat (str): A valid date formatting directive, as understood
by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e.,
``'%Y-%m-%d %H:%M'``.
"""
results = None
groups = None
items = None
group = None
content = None
contentInfo = None
startTime = None
orgTools = None
if dateTimeFormat is None:
dateTimeFormat = '%Y-%m-%d %H:%M'
scriptStartTime = datetime.datetime.now()
try:
print ("********************Stage Content Started********************")
print ("Script started at %s" % scriptStartTime.strftime(dateTimeFormat))
if self.securityhandler.valid == False:
print ("Login required")
else:
orgTools = orgtools.orgtools(securityinfo=self)
if orgTools is None:
print ("Error creating org tools")
else:
for configFile in configFiles:
config = common.init_config_json(config_file=configFile)
if config is not None:
if 'ContentItems' in config:
startTime = datetime.datetime.now()
print ("Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat)))
contentInfo = config['ContentItems']
for cont in contentInfo:
content = cont['Content']
group = cont['ShareToGroup']
print ("Sharing content to: %s" % group)
if os.path.isfile(content):
with open(content, 'rb') as csvfile:
items = []
groups = []
for row in csv.DictReader(csvfile,dialect='excel'):
if cont['Type'] == "Group":
groups.append(row['id'])
elif cont['Type'] == "Items":
items.append(row['id'])
results = orgTools.shareItemsToGroup(shareToGroupName=group,items=items,groups=groups)
print ("Config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime)))
else:
print ("Config file missing ContentItems section")
else:
print ("Config %s not found" % configFile)
except(TypeError,ValueError,AttributeError) as e:
print (e)
except (common.ArcRestHelperError) as e:
print ("error in function: %s" % e[0]['function'])
print ("error on line: %s" % e[0]['line'])
print ("error in file name: %s" % e[0]['filename'])
print ("with error message: %s" % e[0]['synerror'])
if 'arcpyError' in e[0]:
print ("with arcpy message: %s" % e[0]['arcpyError'])
except Exception as e:
if (reportToolsInstalled):
if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):
print ("error in function: %s" % e[0]['function'])
print ("error on line: %s" % e[0]['line'])
print ("error in file name: %s" % e[0]['filename'])
print ("with error message: %s" % e[0]['synerror'])
if 'arcpyError' in e[0]:
print ("with arcpy message: %s" % e[0]['arcpyError'])
else:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror)
else:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror)
finally:
print ("Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime))
print ("###############Stage Content Completed#################")
print ("")
#if orgTools is not None:
#orgTools.dispose()
results = None
groups = None
items = None
group = None
content = None
contentInfo = None
startTime = None
orgTools = None
del results
del groups
del items
del group
del content
del contentInfo
del startTime
del orgTools
gc.collect() | 0.01059 |
def _initURL(self,
org_url,
referer_url):
""" sets proper URLs for AGOL """
if org_url is not None and org_url != '':
if not org_url.startswith('http://') and not org_url.startswith('https://'):
org_url = 'https://' + org_url
self._org_url = org_url
if self._org_url.lower().find('/sharing/rest') > -1:
self._url = self._org_url
else:
self._url = self._org_url + "/sharing/rest"
if self._url.startswith('http://'):
self._surl = self._url.replace('http://', 'https://')
else:
self._surl = self._url
parsed_url = urlparse(self._org_url)
self._parsed_org_url = urlunparse((parsed_url[0],parsed_url[1],"","","",""))#added 7/15/2015
if referer_url is None:
self._referer_url = parsed_url.netloc | 0.015538 |
def plot_2d(self, X, labels=None, s=20, marker='o',
dimensions=(0, 1), ax=None, colors=None,
fignum=None, cmap=None, # @UndefinedVariable
** kwargs):
"""
Plot dimensions `dimensions` with given labels against each other in
PC space. Labels can be any sequence of labels of dimensions X.shape[0].
Labels can be drawn with a subsequent call to legend()
"""
if cmap is None:
cmap = matplotlib.cm.jet
if ax is None:
fig = pylab.figure(fignum)
ax = fig.add_subplot(111)
if labels is None:
labels = numpy.zeros(X.shape[0])
ulabels = []
for lab in labels:
if not lab in ulabels:
ulabels.append(lab)
nlabels = len(ulabels)
if colors is None:
colors = iter([cmap(float(i) / nlabels) for i in range(nlabels)])
else:
colors = iter(colors)
X_ = self.project(X, self.Q)[:,dimensions]
kwargs.update(dict(s=s))
plots = list()
for i, l in enumerate(ulabels):
kwargs.update(dict(color=next(colors), marker=marker[i % len(marker)]))
plots.append(ax.scatter(*X_[labels == l, :].T, label=str(l), **kwargs))
ax.set_xlabel(r"PC$_1$")
ax.set_ylabel(r"PC$_2$")
try:
pylab.tight_layout()
except:
pass
return plots | 0.008219 |
def create_attachment(self, upload_stream, file_name, repository_id, pull_request_id, project=None, **kwargs):
"""CreateAttachment.
[Preview API] Attach a new file to a pull request.
:param object upload_stream: Stream to upload
:param str file_name: The name of the file.
:param str repository_id: The repository ID of the pull request’s target branch.
:param int pull_request_id: ID of the pull request.
:param str project: Project ID or project name
:rtype: :class:`<Attachment> <azure.devops.v5_1.git.models.Attachment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if file_name is not None:
route_values['fileName'] = self._serialize.url('file_name', file_name, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='965d9361-878b-413b-a494-45d5b5fd8ab7',
version='5.1-preview.1',
route_values=route_values,
content=content,
media_type='application/octet-stream')
return self._deserialize('Attachment', response) | 0.005093 |
def start_transmit(self, fd, data=None):
"""
Cause :meth:`poll` to yield `data` when `fd` is writeable.
"""
self._wfds[fd] = (data or fd, self._generation)
self._update(fd) | 0.009434 |
def encode(self, string):
"""Encode a_string as per the canonicalisation encoding rules.
See the AWS dev reference page 186 (2009-11-30 version).
@return: a_string encoded.
"""
if isinstance(string, unicode):
string = string.encode("utf-8")
return quote(string, safe="~") | 0.006024 |
def calc_offset(self, syllables_spaces: List[str]) -> Dict[int, int]:
"""
Calculate a dictionary of accent positions from a list of syllables with spaces.
:param syllables_spaces:
:return:
"""
line = string_utils.flatten(syllables_spaces)
mydict = {} # type: Dict[int, int]
# #defaultdict(int) #type: Dict[int, int]
for idx, syl in enumerate(syllables_spaces):
target_syllable = syllables_spaces[idx]
skip_qu = string_utils.starts_with_qu(target_syllable)
matches = list(self.syllable_matcher.finditer(target_syllable))
for position, possible in enumerate(matches):
if skip_qu:
skip_qu = False
continue
(start, end) = possible.span()
if target_syllable[start:end] in \
self.constants.VOWELS + self.constants.ACCENTED_VOWELS:
part = line[:len("".join(syllables_spaces[:idx]))]
offset = len(part) + start
if line[offset] not in self.constants.VOWELS + self.constants.ACCENTED_VOWELS:
LOG.error("Problem at line {} offset {}".format(line, offset))
mydict[idx] = offset
return mydict | 0.004532 |
def _get_LDAP_connection():
"""
Return a LDAP connection
"""
server = ldap3.Server('ldap://' + get_optional_env('EPFL_LDAP_SERVER_FOR_SEARCH'))
connection = ldap3.Connection(server)
connection.open()
return connection, get_optional_env('EPFL_LDAP_BASE_DN_FOR_SEARCH') | 0.006757 |
def perm(A, p):
"""
Symmetric permutation of a symmetric sparse matrix.
:param A: :py:class:`spmatrix`
:param p: :py:class:`matrix` or :class:`list` of length `A.size[0]`
"""
assert isinstance(A,spmatrix), "argument must be a sparse matrix"
assert A.size[0] == A.size[1], "A must be a square matrix"
assert A.size[0] == len(p), "length of p must be equal to the order of A"
return A[p,p] | 0.009259 |
def unset_key(dotenv_path, key_to_unset, quote_mode="always"):
"""
Removes a given key from the given .env
If the .env path given doesn't exist, fails
If the given key doesn't exist in the .env, fails
"""
if not os.path.exists(dotenv_path):
warnings.warn("can't delete from %s - it doesn't exist." % dotenv_path)
return None, key_to_unset
removed = False
with rewrite(dotenv_path) as (source, dest):
for mapping in parse_stream(source):
if mapping.key == key_to_unset:
removed = True
else:
dest.write(mapping.original)
if not removed:
warnings.warn("key %s not removed from %s - key doesn't exist." % (key_to_unset, dotenv_path))
return None, key_to_unset
return removed, key_to_unset | 0.002427 |
def to_bytes(data):
"""Takes an input str or bytes object and returns an equivalent bytes object.
:param data: Input data
:type data: str or bytes
:returns: Data normalized to bytes
:rtype: bytes
"""
if isinstance(data, six.string_types) and not isinstance(data, bytes):
return codecs.encode(data, aws_encryption_sdk.internal.defaults.ENCODING)
return data | 0.007557 |
def tag_instance(instance_id, **tags):
"""Tag a single ec2 instance."""
logger.debug("Got request to add tags %s to instance %s."
% (str(tags), instance_id))
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
# Remove None's from `tags`
filtered_tags = {k: v for k, v in tags.items() if v and k}
# Check for existing tags
if instance.tags is not None:
existing_tags = {tag.get('Key'): tag.get('Value')
for tag in instance.tags}
logger.debug("Ignoring existing tags; %s" % str(existing_tags))
for tag_key in existing_tags.keys():
filtered_tags.pop(tag_key, None)
# If we have new tags to add, add them.
tag_list = [{'Key': k, 'Value': v} for k, v in filtered_tags.items()]
if len(tag_list):
logger.info('Adding project tags "%s" to instance %s'
% (filtered_tags, instance_id))
instance.create_tags(Tags=tag_list)
else:
logger.info('No new tags from: %s' % str(tags))
return | 0.000941 |
def run_examples(examples):
"""Run read() on a number of examples, supress output, generate summary.
Parameters
----------
examples : list of tuples of three str elements
Tuples contain the path and cfg argument to the read function,
as well as the cfg argument to the merge function (*TODO*)
e.g. [(path, read_cfg, merge_cfg), (...)]
Returns
-------
None
Prints all results to stdout.
"""
import inspect
PATH_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe()))), '../local/test_data')
print('running examples ...')
t0 = datetime.now()
results = [0, 0, 0]
for ex in sorted(examples):
t1 = datetime.now()
src, cfg, _ = examples[ex]
src = os.path.abspath(os.path.join(PATH_TEST_DATA, src))
if not os.path.isfile(src) and not os.path.isdir(src):
print('{} not a file'.format(src))
break
if type(cfg) == str:
cfg = os.path.abspath(os.path.join(PATH_TEST_DATA, cfg))
if not os.path.isfile(cfg):
print('{} not a file'.format(cfg))
break
try:
data = read(src, cfg=cfg, silent=True)
n_keys = len(data.keys())
if n_keys > 0:
print('data {:<15} [SUCCESS] {:.1f}s, {} dfs, {}'.format(
ex, (datetime.now()-t1).total_seconds(), n_keys, mem(data)))
results[0] += 1
else:
print('data {:<15} [NO IMPORT] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[1] += 1
except:
print('data {:<15} [EXCEPTION] {:.1f}s'.format(ex, (datetime.now()-t1).total_seconds()))
results[2] += 1
print()
print('ran {} tests in {:.1f} seconds'.format(len(examples),
(datetime.now()-t0).total_seconds()))
print('{} success / {} no import / {} exception'.format(
str(results[0]), str(results[1]), str(results[2]))) | 0.005294 |
def duration(self):
""" Returns task's current duration in minutes.
"""
if not self._loaded:
return 0
delta = datetime.datetime.now() - self._start_time
total_secs = (delta.microseconds +
(delta.seconds + delta.days * 24 * 3600) *
10 ** 6) / 10 ** 6
return max(0, int(round(total_secs / 60.0))) | 0.004938 |
def make_series_url(key):
"""For internal use. Given a series key, generate a valid URL to the series
endpoint for that key.
:param string key: the series key
:rtype: string"""
url = urlparse.urljoin(endpoint.SERIES_ENDPOINT, 'key/')
url = urlparse.urljoin(url, urllib.quote(key))
return url | 0.003115 |
def lookup_field_class(self, field, obj=None, default=None):
"""
Looks up any additional class we should include when rendering this field
"""
css = ""
# is there a class specified for this field
if field in self.field_config and 'class' in self.field_config[field]:
css = self.field_config[field]['class']
# if we were given a default, use that
elif default:
css = default
return css | 0.006211 |
def run(self):
"""Run this section and print out information."""
if ProfileCollection and isinstance(self.mloginfo.logfile,
ProfileCollection):
print("\n not available for system.profile collections\n")
return
for version, logevent in self.mloginfo.logfile.restarts:
print(" %s version %s"
% (logevent.datetime.strftime("%b %d %H:%M:%S"), version))
if len(self.mloginfo.logfile.restarts) == 0:
print(" no restarts found") | 0.003503 |
def exec_appcommand_post(self, attribute_list):
"""
Prepare and execute a HTTP POST call to AppCommand.xml end point.
Returns XML ElementTree on success and None on fail.
"""
# Prepare POST XML body for AppCommand.xml
post_root = ET.Element("tx")
for attribute in attribute_list:
# Append tags for each attribute
item = ET.Element("cmd")
item.set("id", "1")
item.text = attribute
post_root.append(item)
# Buffer XML body as binary IO
body = BytesIO()
post_tree = ET.ElementTree(post_root)
post_tree.write(body, encoding="utf-8", xml_declaration=True)
# Query receivers AppCommand.xml
try:
res = self.send_post_command(
self._urls.appcommand, body.getvalue())
except requests.exceptions.RequestException:
_LOGGER.error("No connection to %s end point on host %s",
self._urls.appcommand, self._host)
body.close()
else:
# Buffered XML not needed anymore: close
body.close()
try:
# Return XML ElementTree
root = ET.fromstring(res)
except (ET.ParseError, TypeError):
_LOGGER.error(
"End point %s on host %s returned malformed XML.",
self._urls.appcommand, self._host)
else:
return root | 0.001328 |
def compute_nutation(t):
"""Generate the nutation rotations for Time `t`.
If the Julian date is scalar, a simple ``(3, 3)`` matrix is
returned; if the date is an array of length ``n``, then an array of
matrices is returned with dimensions ``(3, 3, n)``.
"""
oblm, oblt, eqeq, psi, eps = t._earth_tilt
cobm = cos(oblm * DEG2RAD)
sobm = sin(oblm * DEG2RAD)
cobt = cos(oblt * DEG2RAD)
sobt = sin(oblt * DEG2RAD)
cpsi = cos(psi * ASEC2RAD)
spsi = sin(psi * ASEC2RAD)
return array(((cpsi,
-spsi * cobm,
-spsi * sobm),
(spsi * cobt,
cpsi * cobm * cobt + sobm * sobt,
cpsi * sobm * cobt - cobm * sobt),
(spsi * sobt,
cpsi * cobm * sobt - sobm * cobt,
cpsi * sobm * sobt + cobm * cobt))) | 0.001133 |
def check_version(url=VERSION_URL):
"""Returns the version string for the latest SDK."""
for line in get(url):
if 'release:' in line:
return line.split(':')[-1].strip(' \'"\r\n') | 0.004854 |
def preamble(self, lenient=False):
"""
Extract the image metadata by reading
the initial part of the PNG file up to
the start of the ``IDAT`` chunk.
All the chunks that precede the ``IDAT`` chunk are
read and either processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if self.atchunk is None:
raise FormatError('This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient) | 0.002439 |
def _apply_color(code, content):
"""
Apply a color code to text
"""
normal = u'\x1B[0m'
seq = u'\x1B[%sm' % code
# Replace any normal sequences with this sequence to support nested colors
return seq + (normal + seq).join(content.split(normal)) + normal | 0.009677 |
def add_resource(self, resource, *urls, **kwargs):
"""Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Type[Resource]`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
:param resource_class_args: args to be forwarded to the constructor of
the resource.
:type resource_class_args: tuple
:param resource_class_kwargs: kwargs to be forwarded to the constructor
of the resource.
:type resource_class_kwargs: dict
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
"""
if self.app is not None:
self._register_view(self.app, resource, *urls, **kwargs)
else:
self.resources.append((resource, urls, kwargs)) | 0.002075 |
def post_send_process(context):
"""
Task to ensure subscription is bumped or converted
"""
if "error" in context:
return context
[deserialized_subscription] = serializers.deserialize(
"json", context["subscription"]
)
subscription = deserialized_subscription.object
[messageset] = serializers.deserialize("json", context["messageset"])
messageset = messageset.object
# Get set max
set_max = messageset.messages.filter(lang=subscription.lang).count()
logger.debug("set_max calculated - %s" % set_max)
# Compare user position to max
if subscription.next_sequence_number == set_max:
with transaction.atomic():
# Mark current as completed
logger.debug("marking current subscription as complete")
subscription.completed = True
subscription.active = False
subscription.process_status = 2 # Completed
deserialized_subscription.save(
update_fields=("completed", "active", "process_status")
)
# If next set defined create new subscription
if messageset.next_set:
logger.info("Creating new subscription for next set")
newsub = Subscription.objects.create(
identity=subscription.identity,
lang=subscription.lang,
messageset=messageset.next_set,
schedule=messageset.next_set.default_schedule,
)
logger.debug("Created Subscription <%s>" % newsub.id)
else:
# More in this set so increment by one
logger.debug("incrementing next_sequence_number")
subscription.next_sequence_number = F("next_sequence_number") + 1
logger.debug("setting process status back to 0")
subscription.process_status = 0
logger.debug("saving subscription")
deserialized_subscription.save(
update_fields=("next_sequence_number", "process_status")
)
# return response
return "Subscription for %s updated" % str(subscription.id) | 0.000471 |
def metadata(self):
"""Retrieves metadata about the object.
Returns:
An ObjectMetadata instance with information about this object.
Raises:
Exception if there was an error requesting the object's metadata.
"""
if self._info is None:
try:
self._info = self._api.objects_get(self._bucket, self._key)
except Exception as e:
raise e
return ObjectMetadata(self._info) if self._info else None | 0.006652 |
def _erase_vm_info(name):
'''
erase the information for a VM the we are destroying.
some sdb drivers (such as the SQLite driver we expect to use)
do not have a `delete` method, so if the delete fails, we have
to replace the with a blank entry.
'''
try:
# delete the machine record
vm_ = get_vm_info(name)
if vm_['machine']:
key = _build_machine_uri(vm_['machine'], vm_.get('cwd', '.'))
try:
__utils__['sdb.sdb_delete'](key, __opts__)
except KeyError:
# no delete method found -- load a blank value
__utils__['sdb.sdb_set'](key, None, __opts__)
except Exception:
pass
uri = _build_sdb_uri(name)
try:
# delete the name record
__utils__['sdb.sdb_delete'](uri, __opts__)
except KeyError:
# no delete method found -- load an empty dictionary
__utils__['sdb.sdb_set'](uri, {}, __opts__)
except Exception:
pass | 0.000991 |
def bisect(func, a, b, xtol=1e-12, maxiter=100):
"""
Finds the root of `func` using the bisection method.
Requirements
------------
- func must be continuous function that accepts a single number input
and returns a single number
- `func(a)` and `func(b)` must have opposite sign
Parameters
----------
func : function
the function that we want to find the root of
a : number
one of the bounds on the input
b : number
the other bound on the input
xtol : number, optional
the solution tolerance of the input value. The algorithm is
considered converged if `abs(b-a)2. < xtol`
maxiter : number, optional
the maximum number of iterations allowed for convergence
"""
fa = func(a)
if fa == 0.:
return a
fb = func(b)
if fb == 0.:
return b
assert sign(fa) != sign(fb)
for i in xrange(maxiter):
c = (a + b) / 2.
fc = func(c)
if fc == 0. or abs(b - a) / 2. < xtol:
return c
if sign(fc) == sign(func(a)):
a = c
else:
b = c
else:
raise RuntimeError('Failed to converge after %d iterations.' % maxiter) | 0.000812 |
def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray:
"""
Convert labels to one-hot vectors for multi-class multi-label classification
Args:
labels: list of samples where each sample is a class or a list of classes which sample belongs with
classes: array of classes' names
Returns:
2d array with one-hot representation of given samples
"""
n_classes = len(classes)
y = []
for sample in labels:
curr = np.zeros(n_classes)
if isinstance(sample, list):
for intent in sample:
if intent not in classes:
log.warning('Unknown intent {} detected. Assigning no class'.format(intent))
else:
curr[np.where(np.array(classes) == intent)[0]] = 1
else:
curr[np.where(np.array(classes) == sample)[0]] = 1
y.append(curr)
y = np.asarray(y)
return y | 0.005081 |
def _pipeline_needs_fastq(config, data):
"""Determine if the pipeline can proceed with a BAM file, or needs fastq conversion.
"""
aligner = config["algorithm"].get("aligner")
support_bam = aligner in alignment.metadata.get("support_bam", [])
return aligner and not support_bam | 0.006757 |
def find(self, pattern):
""" Searches for an image pattern in the given region
Throws ``FindFailed`` exception if the image could not be found.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
findFailedRetry = True
while findFailedRetry:
match = self.exists(pattern)
if match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return match | 0.00438 |
def _windows_rename(self, tmp_filename):
""" Workaround the fact that os.rename raises an OSError on Windows
:param tmp_filename: The file to rename
"""
os.remove(self.input_file) if os.path.isfile(self.input_file) else None
os.rename(tmp_filename, self.input_file) | 0.0125 |
def listfiles(data_name):
"""
List files in a dataset.
"""
data_source = get_data_object(data_name, use_data_config=False)
if not data_source:
if 'output' in data_name:
floyd_logger.info("Note: You cannot clone the output of a running job. You need to wait for it to finish.")
sys.exit()
# Depth-first search
dirs = ['']
paths = []
while dirs:
cur_dir = dirs.pop()
url = "/resources/{}/{}?content=true".format(data_source.resource_id, cur_dir)
response = DataClient().request("GET", url).json()
if response['skipped_files'] > 0:
floyd_logger.info("Warning: in directory '%s', %s/%s files skipped (too many files)", cur_dir, response['skipped_files'], response['total_files'])
files = response['files']
files.sort(key=lambda f: f['name'])
for f in files:
path = os.path.join(cur_dir, f['name'])
if f['type'] == 'directory':
path += os.sep
paths.append(path)
if f['type'] == 'directory':
dirs.append(os.path.join(cur_dir, f['name']))
for path in paths:
floyd_logger.info(path) | 0.003322 |
def batch_update_reimburse(self, openid, reimburse_status, invoice_list):
"""
报销方批量更新发票信息
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1496561749_f7T6D
:param openid: 用户的 Open ID
:param reimburse_status: 发票报销状态
:param invoice_list: 发票列表
:type invoice_list: list[dict]
"""
return self._post(
'reimburse/updatestatusbatch',
data={
'openid': openid,
'reimburse_status': reimburse_status,
'invoice_list': invoice_list,
},
) | 0.003401 |
def query(query, ts, **kwargs):
"""
Perform *query* on the testsuite *ts*.
Note: currently only 'select' queries are supported.
Args:
query (str): TSQL query string
ts (:class:`delphin.itsdb.TestSuite`): testsuite to query over
kwargs: keyword arguments passed to the more specific query
function (e.g., :func:`select`)
Example:
>>> list(tsql.query('select i-id where i-length < 4', ts))
[[142], [1061]]
"""
queryobj = _parse_query(query)
if queryobj['querytype'] in ('select', 'retrieve'):
return _select(
queryobj['projection'],
queryobj['tables'],
queryobj['where'],
ts,
mode=kwargs.get('mode', 'list'),
cast=kwargs.get('cast', True))
else:
# not really a syntax error; replace with TSQLError or something
# when the proper exception class exists
raise TSQLSyntaxError(queryobj['querytype'] +
' queries are not supported') | 0.000952 |
def listdir(self, url):
"""Returns a list of the files under the specified path"""
(store_name, path) = self._split_url(url)
adapter = self._create_adapter(store_name)
return [
"adl://{store_name}.azuredatalakestore.net/{path_to_child}".format(
store_name=store_name, path_to_child=path_to_child
)
for path_to_child in adapter.ls(path)
] | 0.004662 |
def _setup_tunnel(
self):
"""
*setup ssh tunnel if required*
"""
from subprocess import Popen, PIPE, STDOUT
import pymysql as ms
# SETUP TUNNEL IF REQUIRED
if "ssh tunnel" in self.settings:
# TEST TUNNEL DOES NOT ALREADY EXIST
sshPort = self.settings["ssh tunnel"]["port"]
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
if connected:
pass
else:
# GRAB TUNNEL SETTINGS FROM SETTINGS FILE
ru = self.settings["ssh tunnel"]["remote user"]
rip = self.settings["ssh tunnel"]["remote ip"]
rh = self.settings["ssh tunnel"]["remote datbase host"]
cmd = "ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306" % locals()
p = Popen(cmd, shell=True, close_fds=True)
output = p.communicate()[0]
# TEST CONNECTION - QUIT AFTER SO MANY TRIES
connected = False
count = 0
while not connected:
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
time.sleep(1)
count += 1
if count == 5:
self.log.error(
'cound not setup tunnel to remote datbase' % locals())
sys.exit(0)
if "tunnel" in self.settings["database settings"] and self.settings["database settings"]["tunnel"]:
# TEST TUNNEL DOES NOT ALREADY EXIST
sshPort = self.settings["database settings"]["tunnel"]["port"]
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
if connected:
pass
else:
# GRAB TUNNEL SETTINGS FROM SETTINGS FILE
ru = self.settings["database settings"][
"tunnel"]["remote user"]
rip = self.settings["database settings"]["tunnel"]["remote ip"]
rh = self.settings["database settings"][
"tunnel"]["remote datbase host"]
cmd = "ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306" % locals()
p = Popen(cmd, shell=True, close_fds=True)
output = p.communicate()[0]
# TEST CONNECTION - QUIT AFTER SO MANY TRIES
connected = False
count = 0
while not connected:
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
time.sleep(1)
count += 1
if count == 5:
self.log.error(
'cound not setup tunnel to remote datbase' % locals())
sys.exit(0)
# SETUP A DATABASE CONNECTION FOR THE remote database
host = self.settings["database settings"]["host"]
user = self.settings["database settings"]["user"]
passwd = self.settings["database settings"]["password"]
dbName = self.settings["database settings"]["db"]
thisConn = ms.connect(
host=host,
user=user,
passwd=passwd,
db=dbName,
port=sshPort,
use_unicode=True,
charset='utf8',
local_infile=1,
client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,
connect_timeout=36000,
max_allowed_packet=51200000
)
thisConn.autocommit(True)
self.remoteDBConn = thisConn
return None | 0.001837 |
def round_sigfigs(x, n=2):
"""
Rounds the number to the specified significant figures. x can also be
a list or array of numbers (in these cases, a numpy array is returned).
"""
iterable = is_iterable(x)
if not iterable: x = [x]
# make a copy to be safe
x = _n.array(x)
# loop over the elements
for i in range(len(x)):
# Handle the weird cases
if not x[i] in [None, _n.inf, _n.nan]:
sig_figs = -int(_n.floor(_n.log10(abs(x[i]))))+n-1
x[i] = _n.round(x[i], sig_figs)
if iterable: return x
else: return x[0] | 0.014423 |
def on_content_type(handlers, default=None, error='The requested content type does not match any of those allowed'):
"""Returns a content in a different format based on the clients provided content type,
should pass in a dict with the following format:
{'[content-type]': action,
...
}
"""
def output_type(data, request, response):
handler = handlers.get(request.content_type.split(';')[0], default)
if not handler:
raise falcon.HTTPNotAcceptable(error)
response.content_type = handler.content_type
return handler(data, request=request, response=response)
output_type.__doc__ = 'Supports any of the following formats: {0}'.format(', '.join(
function.__doc__ or function.__name__ for function in handlers.values()))
output_type.content_type = ', '.join(handlers.keys())
return output_type | 0.005501 |
def brpop(self, key, *keys, timeout=0, encoding=_NOTSET):
"""Remove and get the last element in a list, or block until one
is available.
:raises TypeError: if timeout is not int
:raises ValueError: if timeout is less than 0
"""
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
args = keys + (timeout,)
return self.execute(b'BRPOP', key, *args, encoding=encoding) | 0.003578 |
def approximate_surface(points, size_u, size_v, degree_u, degree_v, **kwargs):
""" Surface approximation using least squares method with fixed number of control points.
This algorithm interpolates the corner control points and approximates the remaining control points. Please refer to
Algorithm A9.7 of The NURBS Book (2nd Edition), pp.422-423 for details.
Keyword Arguments:
* ``centripetal``: activates centripetal parametrization method. *Default: False*
* ``ctrlpts_size_u``: number of control points on the u-direction. *Default: size_u - 1*
* ``ctrlpts_size_v``: number of control points on the v-direction. *Default: size_v - 1*
:param points: data points
:type points: list, tuple
:param size_u: number of data points on the u-direction, :math:`r`
:type size_u: int
:param size_v: number of data points on the v-direction, :math:`s`
:type size_v: int
:param degree_u: degree of the output surface for the u-direction
:type degree_u: int
:param degree_v: degree of the output surface for the v-direction
:type degree_v: int
:return: approximated B-Spline surface
:rtype: BSpline.Surface
"""
# Keyword arguments
use_centripetal = kwargs.get('centripetal', False)
num_cpts_u = kwargs.get('ctrlpts_size_u', size_u - 1) # number of datapts, r + 1 > number of ctrlpts, n + 1
num_cpts_v = kwargs.get('ctrlpts_size_v', size_v - 1) # number of datapts, s + 1 > number of ctrlpts, m + 1
# Dimension
dim = len(points[0])
# Get uk and vl
uk, vl = compute_params_surface(points, size_u, size_v, use_centripetal)
# Compute knot vectors
kv_u = compute_knot_vector2(degree_u, size_u, num_cpts_u, uk)
kv_v = compute_knot_vector2(degree_v, size_v, num_cpts_v, vl)
# Construct matrix Nu
matrix_nu = []
for i in range(1, size_u - 1):
m_temp = []
for j in range(1, num_cpts_u - 1):
m_temp.append(helpers.basis_function_one(degree_u, kv_u, j, uk[i]))
matrix_nu.append(m_temp)
# Compute Nu transpose
matrix_ntu = linalg.matrix_transpose(matrix_nu)
# Compute NTNu matrix
matrix_ntnu = linalg.matrix_multiply(matrix_ntu, matrix_nu)
# Compute LU-decomposition of NTNu matrix
matrix_ntnul, matrix_ntnuu = linalg.lu_decomposition(matrix_ntnu)
# Fit u-direction
ctrlpts_tmp = [[0.0 for _ in range(dim)] for _ in range(num_cpts_u * size_v)]
for j in range(size_v):
ctrlpts_tmp[j + (size_v * 0)] = list(points[j + (size_v * 0)])
ctrlpts_tmp[j + (size_v * (num_cpts_u - 1))] = list(points[j + (size_v * (size_u - 1))])
# Compute Rku - Eqn. 9.63
pt0 = points[j + (size_v * 0)] # Qzero
ptm = points[j + (size_v * (size_u - 1))] # Qm
rku = []
for i in range(1, size_u - 1):
ptk = points[j + (size_v * i)]
n0p = helpers.basis_function_one(degree_u, kv_u, 0, uk[i])
nnp = helpers.basis_function_one(degree_u, kv_u, num_cpts_u - 1, uk[i])
elem2 = [c * n0p for c in pt0]
elem3 = [c * nnp for c in ptm]
rku.append([a - b - c for a, b, c in zip(ptk, elem2, elem3)])
# Compute Ru - Eqn. 9.67
ru = [[0.0 for _ in range(dim)] for _ in range(num_cpts_u - 2)]
for i in range(1, num_cpts_u - 1):
ru_tmp = []
for idx, pt in enumerate(rku):
ru_tmp.append([p * helpers.basis_function_one(degree_u, kv_u, i, uk[idx + 1]) for p in pt])
for d in range(dim):
for idx in range(len(ru_tmp)):
ru[i - 1][d] += ru_tmp[idx][d]
# Get intermediate control points
for d in range(dim):
b = [pt[d] for pt in ru]
y = linalg.forward_substitution(matrix_ntnul, b)
x = linalg.backward_substitution(matrix_ntnuu, y)
for i in range(1, num_cpts_u - 1):
ctrlpts_tmp[j + (size_v * i)][d] = x[i - 1]
# Construct matrix Nv
matrix_nv = []
for i in range(1, size_v - 1):
m_temp = []
for j in range(1, num_cpts_v - 1):
m_temp.append(helpers.basis_function_one(degree_v, kv_v, j, vl[i]))
matrix_nv.append(m_temp)
# Compute Nv transpose
matrix_ntv = linalg.matrix_transpose(matrix_nv)
# Compute NTNv matrix
matrix_ntnv = linalg.matrix_multiply(matrix_ntv, matrix_nv)
# Compute LU-decomposition of NTNv matrix
matrix_ntnvl, matrix_ntnvu = linalg.lu_decomposition(matrix_ntnv)
# Fit v-direction
ctrlpts = [[0.0 for _ in range(dim)] for _ in range(num_cpts_u * num_cpts_v)]
for i in range(num_cpts_u):
ctrlpts[0 + (num_cpts_v * i)] = list(ctrlpts_tmp[0 + (size_v * i)])
ctrlpts[num_cpts_v - 1 + (num_cpts_v * i)] = list(ctrlpts_tmp[size_v - 1 + (size_v * i)])
# Compute Rkv - Eqs. 9.63
pt0 = ctrlpts_tmp[0 + (size_v * i)] # Qzero
ptm = ctrlpts_tmp[size_v - 1 + (size_v * i)] # Qm
rkv = []
for j in range(1, size_v - 1):
ptk = ctrlpts_tmp[j + (size_v * i)]
n0p = helpers.basis_function_one(degree_v, kv_v, 0, vl[j])
nnp = helpers.basis_function_one(degree_v, kv_v, num_cpts_v - 1, vl[j])
elem2 = [c * n0p for c in pt0]
elem3 = [c * nnp for c in ptm]
rkv.append([a - b - c for a, b, c in zip(ptk, elem2, elem3)])
# Compute Rv - Eqn. 9.67
rv = [[0.0 for _ in range(dim)] for _ in range(num_cpts_v - 2)]
for j in range(1, num_cpts_v - 1):
rv_tmp = []
for idx, pt in enumerate(rkv):
rv_tmp.append([p * helpers.basis_function_one(degree_v, kv_v, j, vl[idx + 1]) for p in pt])
for d in range(dim):
for idx in range(len(rv_tmp)):
rv[j - 1][d] += rv_tmp[idx][d]
# Get intermediate control points
for d in range(dim):
b = [pt[d] for pt in rv]
y = linalg.forward_substitution(matrix_ntnvl, b)
x = linalg.backward_substitution(matrix_ntnvu, y)
for j in range(1, num_cpts_v - 1):
ctrlpts[j + (num_cpts_v * i)][d] = x[j - 1]
# Generate B-spline surface
surf = BSpline.Surface()
surf.degree_u = degree_u
surf.degree_v = degree_v
surf.ctrlpts_size_u = num_cpts_u
surf.ctrlpts_size_v = num_cpts_v
surf.ctrlpts = ctrlpts
surf.knotvector_u = kv_u
surf.knotvector_v = kv_v
return surf | 0.002465 |
def custom_gradient(fx, gx, x, fx_gx_manually_stopped=False, name=None):
"""Embeds a custom gradient into a `Tensor`.
This function works by clever application of `stop_gradient`. I.e., observe
that:
```none
h(x) = stop_gradient(f(x)) + stop_gradient(g(x)) * (x - stop_gradient(x))
```
is such that `h(x) == stop_gradient(f(x))` and
`grad[h(x), x] == stop_gradient(g(x)).`
In addition to scalar-domain/scalar-range functions, this function also
supports tensor-domain/scalar-range functions.
Partial Custom Gradient:
Suppose `h(x) = htilde(x, y)`. Note that `dh/dx = stop(g(x))` but `dh/dy =
None`. This is because a `Tensor` cannot have only a portion of its gradient
stopped. To circumvent this issue, one must manually `stop_gradient` the
relevant portions of `f`, `g`. For example see the unit-test,
`test_works_correctly_fx_gx_manually_stopped`.
Args:
fx: `Tensor`. Output of function evaluated at `x`.
gx: `Tensor` or list of `Tensor`s. Gradient of function at (each) `x`.
x: `Tensor` or list of `Tensor`s. Args of evaluation for `f`.
fx_gx_manually_stopped: Python `bool` indicating that `fx`, `gx` manually
have `stop_gradient` applied.
name: Python `str` name prefixed to Ops created by this function.
Returns:
fx: Floating-type `Tensor` equal to `f(x)` but which has gradient
`stop_gradient(g(x))`.
"""
def maybe_stop(x):
if fx_gx_manually_stopped:
return x
return tf.stop_gradient(x)
with tf.compat.v1.name_scope(name, 'custom_gradient', [fx, gx, x]):
fx = tf.convert_to_tensor(value=fx, name='fx')
# We don't want to bother eagerly computing `gx` since we may not even need
# it.
with tf.control_dependencies([fx]):
if is_list_like(x):
x = [identity(x_, name='x') for x_ in x]
else:
x = [identity(x, name='x')]
if is_list_like(gx):
gx = [identity(gx_, dtype=fx.dtype, name='gx')
for gx_ in gx]
else:
gx = [identity(gx, dtype=fx.dtype, name='gx')]
override_grad = []
for x_, gx_ in zip(x, gx):
# Observe: tf.gradients(f(x), x)[i].shape == x[i].shape
# thus we check that the user is supplying correct shapes.
equal_shape = tf.compat.v1.assert_equal(
tf.shape(input=x_),
tf.shape(input=gx_),
message='Each `x` must have the same shape as each `gx`.')
with tf.control_dependencies([equal_shape]):
# IEEE754 ensures `(x-x)==0.` and that `0.*x==0.` so we make sure to
# write the code this way, rather than, e.g.,
# `sum_x * stop(gx) + stop(fx - sum_x * gx)`.
# For more discussion regarding the relevant portions of the IEEE754
# standard, see the StackOverflow question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
zeros_like_x_ = x_ - tf.stop_gradient(x_)
override_grad.append(
tf.reduce_sum(input_tensor=maybe_stop(gx_) * zeros_like_x_))
override_grad = sum(override_grad)
override_grad /= tf.cast(tf.size(input=fx), dtype=fx.dtype.base_dtype)
# Proof of correctness:
#
# f(x) = x * stop[gx] + stop[fx - x * gx]
# = stop[fx]
#
# g(x) = grad[fx]
# = stop[gx] + grad[stop[fx - x * gx]]
# = stop[gx] + 0
#
# Notice that when x is zero it still works:
# grad[x * stop(gx) + stop(fx - x * gx)] = 1 * stop[gx] + 0 = stop[gx]
#
# The proof is similar for the tensor-domain case, except that we
# `reduce_sum` the `stop[gx] * (x - stop[x])` then rescale by
# `tf.size(fx)` since this reduced version is broadcast to `fx`.
return maybe_stop(fx) + override_grad | 0.010188 |
def dirty_ops(self, instance):
''' Returns a dict of the operations needed to update this object.
See :func:`Document.get_dirty_ops` for more details.'''
obj_value = instance._values[self._name]
if not obj_value.set:
return {}
if not obj_value.dirty and self.__type.config_extra_fields != 'ignore':
return {}
ops = obj_value.value.get_dirty_ops()
ret = {}
for op, values in ops.items():
ret[op] = {}
for key, value in values.items():
name = '%s.%s' % (self._name, key)
ret[op][name] = value
return ret | 0.00303 |
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg))) | 0.002004 |
def play(self, wav=None, data=None, rate=16000, channels=1, width=2, block=True, spectrum=None):
"""
play wav file or raw audio (string or generator)
Args:
wav: wav file path
data: raw audio data, str or iterator
rate: sample rate, only for raw audio
channels: channel number, only for raw data
width: raw audio data width, 16 bit is 2, only for raw data
block: if true, block until audio is played.
spectrum: if true, use a spectrum analyzer thread to analyze data
"""
if wav:
f = wave.open(wav, 'rb')
rate = f.getframerate()
channels = f.getnchannels()
width = f.getsampwidth()
def gen(w):
d = w.readframes(CHUNK_SIZE)
while d:
yield d
d = w.readframes(CHUNK_SIZE)
w.close()
data = gen(f)
self.stop_event.clear()
if block:
self._play(data, rate, channels, width, spectrum)
else:
thread = threading.Thread(target=self._play, args=(data, rate, channels, width, spectrum))
thread.start() | 0.003244 |
def assert_optimizer_pickle_matches_for_phase(phase):
"""
Assert that the previously saved optimizer is equal to the phase's optimizer if a saved optimizer is found.
Parameters
----------
phase
The phase
Raises
-------
exc.PipelineException
"""
path = make_optimizer_pickle_path(phase)
if os.path.exists(path):
with open(path, "r+b") as f:
loaded_optimizer = pickle.loads(f.read())
if phase.optimizer != loaded_optimizer:
raise exc.PipelineException(
f"Can't restart phase at path {path} because settings don't match. "
f"Did you change the optimizer settings or model?") | 0.004208 |
def get_definition(self, project, definition_id, revision=None, min_metrics_time=None, property_filters=None, include_latest_builds=None):
"""GetDefinition.
Gets a definition, optionally at a specific revision.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:param int revision: The revision number to retrieve. If this is not specified, the latest version will be returned.
:param datetime min_metrics_time: If specified, indicates the date from which metrics should be included.
:param [str] property_filters: A comma-delimited list of properties to include in the results.
:param bool include_latest_builds:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response) | 0.006404 |
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code | 0.015873 |
def update_settings(self, updates, config=None):
'''update client secrets will update the data structure for a particular
authentication. This should only be used for a (quasi permanent) token
or similar. The secrets file, if found, is updated and saved by default.
Parameters
==========
helper: the name of the helper to look up in the config
updates: a dictionary of key:value pairs to add to the config
config: a configparser.ConfigParser(), if already loaded
'''
if config is None:
config = self._load_config_user()
if self.name not in config:
config[self.name] = {}
config[self.name].update(updates)
# Update the saved file
configfile = get_configfile_user()
write_config(configfile, config)
return config | 0.001224 |
def _identity(table, target_length):
"""Identity minimisation function."""
if target_length is None or len(table) < target_length:
return table
raise MinimisationFailedError(target_length, len(table)) | 0.004545 |
def calc_steady_state_dist(R):
"""Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
"""
#Calc steady state distribution for a dinucleotide bias matrix
w, v = np.linalg.eig(R)
for i in range(4):
if np.abs(w[i] - 1) < 1e-8:
return np.real(v[:, i] / np.sum(v[:, i]))
return -1 | 0.009488 |
def _move_tmp_file(self, tmpfilepath, filepath):
"""Moves tmpfile over file after saving is finished
Parameters
----------
filepath: String
\tTarget file path for xls file
tmpfilepath: String
\tTemporary file file path for xls file
"""
try:
shutil.move(tmpfilepath, filepath)
except OSError, err:
# No tmp file present
post_command_event(self.main_window, self.StatusBarMsg, text=err) | 0.00396 |
def loglike(self, y, f):
r"""
Bernoulli log likelihood.
Parameters
----------
y: ndarray
array of 0, 1 valued integers of targets
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
Returns
-------
logp: ndarray
the log likelihood of each y given each f under this
likelihood.
"""
# way faster than calling bernoulli.logpmf
y, f = np.broadcast_arrays(y, f)
ll = y * f - softplus(f)
return ll | 0.003284 |
def element_id_by_label(browser, label):
"""Return the id of a label's for attribute"""
label = XPathSelector(browser,
unicode('//label[contains(., "%s")]' % label))
if not label:
return False
return label.get_attribute('for') | 0.003623 |
def get_all_masters():
""" Returns the json object that represents each of the masters.
"""
masters = []
for master in __master_zk_nodes_keys():
master_zk_str = get_zk_node_data(master)['str']
masters.append(json.loads(master_zk_str))
return masters | 0.003497 |
def rotatePolygon(polygon, theta, origin=None):
"""Rotates the given polygon around the origin or if not given it's center of mass
polygon: np.array( (x1,y1), (...))
theta: rotation clockwise in RADIAN
origin = [x,y] - if not given set to center of gravity
returns: None
"""
if origin is None:
origin = np.mean(polygon, axis=0, dtype=polygon.dtype)
#polygon = polygon.copy()
polygon -= origin
for n, corner in enumerate(polygon):
polygon[n] = corner[0] * np.cos(theta) - corner[1] * np.sin(theta), corner[
0] * np.sin(theta) + corner[1] * np.cos(theta)
polygon += origin
return polygon | 0.005882 |
def installed(name, enabled=True):
'''
Make sure that we have the given bundle ID or path to command
installed in the assistive access panel.
name
The bundle ID or path to command
enable
Should assistive access be enabled on this application?
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
is_installed = __salt__['assistive.installed'](name)
if is_installed:
is_enabled = __salt__['assistive.enabled'](name)
if enabled != is_enabled:
__salt__['assistive.enable'](name, enabled)
ret['comment'] = 'Updated enable to {0}'.format(enabled)
else:
ret['comment'] = 'Already in the correct state'
else:
__salt__['assistive.install'](name, enabled)
ret['comment'] = 'Installed {0} into the assistive access panel'.format(name)
return ret | 0.002151 |
def Cinv(self):
"""Inverse of the noise covariance."""
try:
return np.linalg.inv(self.c)
except np.linalg.linalg.LinAlgError:
print('Warning: non-invertible noise covariance matrix c.')
return np.eye(self.c.shape[0]) | 0.007246 |
def find_mean_vector(*args, **kwargs):
"""
Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1).
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
vector, r_value = stereonet_math.mean_vector(lon, lat)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector)
return (plunge[0], bearing[0]), r_value | 0.000513 |
def write_fasta(
init_fasta, info_frags, output=DEFAULT_NEW_GENOME_NAME, junction=False
):
"""Convert an info_frags.txt file into a fasta file given a reference.
Optionally adds junction sequences to reflect the possibly missing base
pairs between two newly joined scaffolds.
"""
init_genome = {
record.id: record.seq for record in SeqIO.parse(init_fasta, "fasta")
}
my_new_records = []
with open(info_frags, "r") as info_frags_handle:
current_seq = ""
current_id = None
previous_contig = None
for line in info_frags_handle:
if line.startswith(">"):
previous_contig = None
if current_id is not None:
new_record = SeqRecord(
current_seq, id=current_id, description=""
)
my_new_records.append(new_record)
current_seq = ""
current_id = str(line[1:])
elif line.startswith("init_contig"):
previous_contig = None
else:
(init_contig, _, orientation, pos_start, pos_end) = str(
line[:-1]
).split("\t")
start = int(pos_start)
end = int(pos_end)
ori = int(orientation)
assert start < end
assert ori in {-1, 1}
seq_to_add = init_genome[init_contig][start:end]
if ori == 1:
current_seq += seq_to_add
elif ori == -1:
current_seq += seq_to_add.reverse_complement()
if junction and previous_contig not in {init_contig, None}:
error_was_raised = False
try:
extra_seq = Seq(junction, IUPAC.ambiguous_dna)
current_seq = extra_seq + current_seq
except TypeError:
if not error_was_raised:
print("Invalid junction sequence")
error_was_raised = True
previous_contig = init_contig
new_record = SeqRecord(current_seq, id=current_id, description="")
my_new_records.append(new_record)
SeqIO.write(my_new_records, output, "fasta") | 0.000426 |
def database_names(self, session=None):
"""**DEPRECATED**: Get a list of the names of all databases on the
connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.7
Deprecated. Use :meth:`list_database_names` instead.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
warnings.warn("database_names is deprecated. Use list_database_names "
"instead.", DeprecationWarning, stacklevel=2)
return self.list_database_names(session) | 0.003145 |
def get_nearest_node(G, point, method='haversine', return_dist=False):
"""
Return the graph node nearest to some specified (lat, lng) or (y, x) point,
and optionally the distance between the node and the point. This function
can use either a haversine or euclidean distance calculator.
Parameters
----------
G : networkx multidigraph
point : tuple
The (lat, lng) or (y, x) point for which we will find the nearest node
in the graph
method : str {'haversine', 'euclidean'}
Which method to use for calculating distances to find nearest node.
If 'haversine', graph nodes' coordinates must be in units of decimal
degrees. If 'euclidean', graph nodes' coordinates must be projected.
return_dist : bool
Optionally also return the distance (in meters if haversine, or graph
node coordinate units if euclidean) between the point and the nearest
node.
Returns
-------
int or tuple of (int, float)
Nearest node ID or optionally a tuple of (node ID, dist), where dist is
the distance (in meters if haversine, or graph node coordinate units
if euclidean) between the point and nearest node
"""
start_time = time.time()
if not G or (G.number_of_nodes() == 0):
raise ValueError('G argument must be not be empty or should contain at least one node')
# dump graph node coordinates into a pandas dataframe indexed by node id
# with x and y columns
coords = [[node, data['x'], data['y']] for node, data in G.nodes(data=True)]
df = pd.DataFrame(coords, columns=['node', 'x', 'y']).set_index('node')
# add columns to the dataframe representing the (constant) coordinates of
# the reference point
df['reference_y'] = point[0]
df['reference_x'] = point[1]
# calculate the distance between each node and the reference point
if method == 'haversine':
# calculate distance vector using haversine (ie, for
# spherical lat-long geometries)
distances = great_circle_vec(lat1=df['reference_y'],
lng1=df['reference_x'],
lat2=df['y'],
lng2=df['x'])
elif method == 'euclidean':
# calculate distance vector using euclidean distances (ie, for projected
# planar geometries)
distances = euclidean_dist_vec(y1=df['reference_y'],
x1=df['reference_x'],
y2=df['y'],
x2=df['x'])
else:
raise ValueError('method argument must be either "haversine" or "euclidean"')
# nearest node's ID is the index label of the minimum distance
nearest_node = distances.idxmin()
log('Found nearest node ({}) to point {} in {:,.2f} seconds'.format(nearest_node, point, time.time()-start_time))
# if caller requested return_dist, return distance between the point and the
# nearest node as well
if return_dist:
return nearest_node, distances.loc[nearest_node]
else:
return nearest_node | 0.00252 |
def set_sum_w2(self, w, ix, iy=0, iz=0):
"""
Sets the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
self.GetSumw2().SetAt(w, idx) | 0.00346 |
def _normal_map_callback(self, msg):
"""Callback for handling normal maps.
"""
try:
self._cur_normal_map = self._bridge.imgmsg_to_cv2(msg)
except:
self._cur_normal_map = None | 0.013043 |
def update(self, z):
""" Update filter with new measurement `z`
Returns
-------
x : np.array
estimate for this time step (same as self.x)
"""
self.n += 1
# rename for readability
n = self.n
dt = self.dt
x = self.x
K = self.K
y = self.y
if self._order == 0:
K[0] = 1. / n
y = z - x
x[0] += K[0] * y
elif self._order == 1:
K[0] = 2. * (2*n - 1) / (n*(n + 1))
K[1] = 6. / (n*(n + 1)*dt)
y = z - x[0] - (dt * x[1])
x[0] += (K[0] * y) + (dt * x[1])
x[1] += (K[1] * y)
else:
den = n * (n+1) * (n+2)
K[0] = 3. * (3*n**2 - 3*n + 2) / den
K[1] = 18. * (2*n-1) / (den*dt)
K[2] = 60. / (den*dt**2)
y = z - x[0] - (dt * x[1]) - (0.5 * dt**2 * x[2])
x[0] += (K[0] * y) + (x[1] * dt) + (.5 * dt**2 * x[2])
x[1] += (K[1] * y) + (x[2] * dt)
x[2] += (K[2] * y)
return self.x | 0.00182 |
def unitResponse(self,band):
"""This is used internally for :ref:`pysynphot-formula-effstim`
calculations."""
sp=band*self.vegaspec
total=sp.integrate()
return 2.5*math.log10(total) | 0.022624 |
def assign(self, subject):
""" Assigns the given subject to the topic """
if not isinstance(subject, (Publisher, Subscriber)):
raise TypeError('Assignee has to be Publisher or Subscriber')
# check if not already assigned
if self._subject is not None:
raise SubscriptionError('Topic %r already assigned' % self._path)
self._subject = subject
# subscribe to subject if topic has subscriptions
if self._subscriptions:
self._subject.subscribe(self)
# if topic received emits before assignment replay those emits
if self._pre_assign_emit is not None:
for value in self._pre_assign_emit:
self._subject.emit(value, who=self)
self._pre_assign_emit = None
return subject | 0.002433 |
def bucket(
arg,
buckets,
closed='left',
close_extreme=True,
include_under=False,
include_over=False,
):
"""
Compute a discrete binning of a numeric array
Parameters
----------
arg : numeric array expression
buckets : list
closed : {'left', 'right'}, default 'left'
Which side of each interval is closed. For example
buckets = [0, 100, 200]
closed = 'left': 100 falls in 2nd bucket
closed = 'right': 100 falls in 1st bucket
close_extreme : boolean, default True
Returns
-------
bucketed : coded value expression
"""
op = Bucket(
arg,
buckets,
closed=closed,
close_extreme=close_extreme,
include_under=include_under,
include_over=include_over,
)
return op.to_expr() | 0.001217 |
def event_loop(self):
"""asyncio.BaseEventLoop: the running event loop.
This fixture mainly exists to allow for overrides during unit tests.
"""
if not self._event_loop:
self._event_loop = asyncio.get_event_loop()
return self._event_loop | 0.006873 |
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent | 0.007299 |
def _set_member_bridge_domain(self, v, load=False):
"""
Setter method for member_bridge_domain, mapped from YANG variable /topology_group/member_bridge_domain (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_bridge_domain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_bridge_domain() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=member_bridge_domain.member_bridge_domain, is_container='container', presence=False, yang_name="member-bridge-domain", rest_name="member-bridge-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member Bridge Domains for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """member_bridge_domain must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=member_bridge_domain.member_bridge_domain, is_container='container', presence=False, yang_name="member-bridge-domain", rest_name="member-bridge-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member Bridge Domains for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True)""",
})
self.__member_bridge_domain = t
if hasattr(self, '_set'):
self._set() | 0.005225 |
def has_permission(self, method, endpoint, user=None):
"""Return does the current user can access the resource.
Example::
@app.route('/some_url', methods=['GET', 'POST'])
@rbac.allow(['anonymous'], ['GET'])
def a_view_func():
return Response('Blah Blah...')
If you are not logged.
`rbac.has_permission('GET', 'a_view_func')` return True.
`rbac.has_permission('POST', 'a_view_func')` return False.
:param method: The method wait to check.
:param endpoint: The application endpoint.
:param user: user who you need to check. Current user by default.
"""
app = self.get_app()
_user = user or self._user_loader()
if not hasattr(_user, 'get_roles'):
roles = [anonymous]
else:
roles = _user.get_roles()
return self._check_permission(roles, method, endpoint) | 0.002123 |
def aggregate(self, pipeline, session=None, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional `aggregate command`_ parameters should be passed as
keyword arguments to this method. Valid options include, but are not
limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `useCursor` (bool): Deprecated. Will be removed in PyMongo 4.0.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`. Please note that using the ``$out`` pipeline stage
requires a read preference of
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` (the default).
The server will raise an error if the ``$out`` pipeline stage is used
with any other read preference.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.9
Apply this collection's read concern to pipelines containing the
`$out` stage when connected to MongoDB >= 4.2.
.. versionchanged:: 3.6
Added the `session` parameter. Added the `maxAwaitTimeMS` option.
Deprecated the `useCursor` option.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
https://docs.mongodb.com/manual/reference/command/aggregate
"""
with self.__database.client._tmp_session(session, close=False) as s:
return self._aggregate(pipeline,
CommandCursor,
kwargs.get('batchSize'),
session=s,
explicit_session=session is not None,
**kwargs) | 0.001565 |
def _make_cloud_datastore_context(app_id, external_app_ids=()):
"""Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context.
"""
from . import model # Late import to deal with circular imports.
# Late import since it might not exist.
if not datastore_pbs._CLOUD_DATASTORE_ENABLED:
raise datastore_errors.BadArgumentError(
datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
import googledatastore
try:
from google.appengine.datastore import cloud_datastore_v1_remote_stub
except ImportError:
from google3.apphosting.datastore import cloud_datastore_v1_remote_stub
current_app_id = os.environ.get('APPLICATION_ID', None)
if current_app_id and current_app_id != app_id:
# TODO(pcostello): We should support this so users can connect to different
# applications.
raise ValueError('Cannot create a Cloud Datastore context that connects '
'to an application (%s) that differs from the application '
'already connected to (%s).' % (app_id, current_app_id))
os.environ['APPLICATION_ID'] = app_id
id_resolver = datastore_pbs.IdResolver((app_id,) + tuple(external_app_ids))
project_id = id_resolver.resolve_project_id(app_id)
endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id)
datastore = googledatastore.Datastore(
project_endpoint=endpoint,
credentials=googledatastore.helper.get_credentials_from_env())
conn = model.make_connection(_api_version=datastore_rpc._CLOUD_DATASTORE_V1,
_id_resolver=id_resolver)
# If necessary, install the stubs
try:
stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore)
apiproxy_stub_map.apiproxy.RegisterStub(datastore_rpc._CLOUD_DATASTORE_V1,
stub)
except:
pass # The stub is already installed.
# TODO(pcostello): Ensure the current stub is connected to the right project.
# Install a memcache and taskqueue stub which throws on everything.
try:
apiproxy_stub_map.apiproxy.RegisterStub('memcache', _ThrowingStub())
except:
pass # The stub is already installed.
try:
apiproxy_stub_map.apiproxy.RegisterStub('taskqueue', _ThrowingStub())
except:
pass # The stub is already installed.
return make_context(conn=conn) | 0.010884 |
def xml_encode(string):
""" Returns the string with XML-safe special characters.
"""
string = string.replace("&", "&")
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace("\"",""")
string = string.replace(SLASH, "/")
return string | 0.006309 |
def pol2cart(theta, rho):
"""Polar to Cartesian coordinates conversion."""
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return x, y | 0.006667 |
def register_frontend_media(request, media):
"""
Add a :class:`~django.forms.Media` class to the current request.
This will be rendered by the ``render_plugin_media`` template tag.
"""
if not hasattr(request, '_fluent_contents_frontend_media'):
request._fluent_contents_frontend_media = Media()
add_media(request._fluent_contents_frontend_media, media) | 0.002597 |
def send_video(self, chat_id, video, duration=None, caption=None, reply_to_message_id=None, reply_markup=None):
"""
Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as
Document). On success, the sent Message is returned. Bots can currently send video files of up to 50 MB
in size, this limit may be changed in the future.
"""
payload = dict(chat_id=chat_id,
duration=duration,
caption=caption,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup)
files = dict(video=open(video, 'rb'))
return Message.from_api(api, **self._post('sendVideo', payload, files)) | 0.006329 |
async def _do_tp(self, pip, mount) -> top_types.Point:
""" Execute the work of tip probe.
This is a separate function so that it can be encapsulated in
a context manager that ensures the state of the pipette tip tracking
is reset properly. It should not be called outside of
:py:meth:`locate_tip_probe_center`.
:param pip: The pipette to use
:type pip: opentrons.hardware_control.pipette.Pipette
:param mount: The mount on which the pipette is attached
:type mount: opentrons.types.Mount
"""
# Clear the old offset during calibration
pip.update_instrument_offset(top_types.Point())
# Hotspots based on our expectation of tip length and config
hotspots = robot_configs.calculate_tip_probe_hotspots(
pip.current_tip_length, self._config.tip_probe)
new_pos: Dict[Axis, List[float]] = {
ax: [] for ax in Axis.gantry_axes() if ax != Axis.A}
safe_z = self._config.tip_probe.z_clearance.crossover + \
self._config.tip_probe.center[2]
for hs in hotspots:
ax_en = Axis[hs.axis.upper()]
overridden_center = {
ax: sum(vals)/len(vals)
if len(vals) == 2
else self._config.tip_probe.center[ax.value]
for ax, vals in new_pos.items()
}
x0 = overridden_center[Axis.X] + hs.x_start_offs
y0 = overridden_center[Axis.Y] + hs.y_start_offs
z0 = hs.z_start_abs
pos = await self.current_position(mount)
# Move safely to the setup point for the probe
await self.move_to(mount,
top_types.Point(pos[Axis.X],
pos[Axis.Y],
safe_z))
await self.move_to(mount,
top_types.Point(x0, y0, safe_z))
await self.move_to(mount,
top_types.Point(x0, y0, z0))
if ax_en == Axis.Z:
to_probe = Axis.by_mount(mount)
else:
to_probe = ax_en
# Probe and retrieve the position afterwards
async with self._motion_lock:
self._current_position = self._deck_from_smoothie(
self._backend.probe(
to_probe.name.lower(), hs.probe_distance))
xyz = await self.gantry_position(mount)
# Store the upated position.
self._log.debug(
"tip probe: hs {}: start: ({} {} {}) status {} will add {}"
.format(hs, x0, y0, z0, new_pos, xyz[ax_en.value]))
new_pos[ax_en].append(xyz[ax_en.value])
# Before moving up, move back to clear the switches
bounce = self._config.tip_probe.bounce_distance\
* (-1.0 if hs.probe_distance > 0 else 1.0)
await self.move_rel(mount,
top_types.Point(
**{hs.axis: bounce}))
await self.move_to(mount, xyz._replace(z=safe_z))
to_ret = top_types.Point(**{ax.name.lower(): sum(vals)/len(vals)
for ax, vals in new_pos.items()})
self._log.info("Tip probe complete with {} {} on {}. "
"New position: {} (default {}), averaged from {}"
.format(pip.name, pip.pipette_id, mount.name,
to_ret, self._config.tip_probe.center,
new_pos))
return to_ret | 0.000545 |
def produce_semiotic_square_explorer(semiotic_square,
x_label,
y_label,
category_name=None,
not_category_name=None,
neutral_category_name=None,
num_terms_semiotic_square=None,
get_tooltip_content=None,
x_axis_values=None,
y_axis_values=None,
color_func=None,
axis_scaler=scale_neg_1_to_1_with_zero_mean,
**kwargs):
'''
Produces a semiotic square visualization.
Parameters
----------
semiotic_square : SemioticSquare
The basis of the visualization
x_label : str
The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.
y_label
The y-axis label in the scatter plot. Relationship neutral term and complex term.
category_name : str or None
Name of category to use. Defaults to category_a.
not_category_name : str or None
Name of everything that isn't in category. Defaults to category_b.
neutral_category_name : str or None
Name of neutral set of data. Defaults to "Neutral".
num_terms_semiotic_square : int or None
10 by default. Number of terms to show in semiotic square.
get_tooltip_content : str or None
Defaults to tooltip showing z-scores on both axes.
x_axis_values : list, default None
Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
y_axis_values : list, default None
Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.
axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max
Scale values to fit axis
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization
'''
if category_name is None:
category_name = semiotic_square.category_a_
if not_category_name is None:
not_category_name = semiotic_square.category_b_
if get_tooltip_content is None:
get_tooltip_content = '''(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})''' \
% (x_label, y_label)
if color_func is None:
# this desaturates
# color_func = '(function(d) {var c = d3.hsl(d3.interpolateRdYlBu(d.x)); c.s *= d.y; return c;})'
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
'''
my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max
if foveate:
my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max
'''
axes = semiotic_square.get_axes()
return produce_scattertext_explorer(semiotic_square.term_doc_matrix_,
category=semiotic_square.category_a_,
category_name=category_name,
not_category_name=not_category_name,
not_categories=[semiotic_square.category_b_],
scores=-axes['x'],
sort_by_dist=False,
x_coords=axis_scaler(-axes['x']),
y_coords=axis_scaler(axes['y']),
original_x=-axes['x'],
original_y=axes['y'],
show_characteristic=False,
show_top_terms=False,
x_label=x_label,
y_label=y_label,
semiotic_square=semiotic_square,
neutral_categories=semiotic_square.neutral_categories_,
show_neutral=True,
neutral_category_name=neutral_category_name,
num_terms_semiotic_square=num_terms_semiotic_square,
get_tooltip_content=get_tooltip_content,
x_axis_values=x_axis_values,
y_axis_values=y_axis_values,
color_func=color_func,
show_axes=False,
**kwargs) | 0.00315 |
def plot_bhist(samples, file_type, **plot_args):
""" Create line graph plot of histogram data for BBMap 'bhist' output.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
all_x = set()
for item in sorted(chain(*[samples[sample]['data'].items()
for sample in samples])):
all_x.add(item[0])
columns_to_plot = {
'GC': {
1: 'C',
2: 'G',
},
'AT': {
0: 'A',
3: 'T',
},
'N': {
4: 'N'
},
}
nucleotide_data = []
for column_type in columns_to_plot:
nucleotide_data.append(
{
sample+'.'+column_name: {
x: samples[sample]['data'][x][column]*100 if x in samples[sample]['data'] else 0
for x in all_x
}
for sample in samples
for column, column_name in columns_to_plot[column_type].items()
}
)
plot_params = {
'id': 'bbmap-' + file_type + '_plot',
'title': 'BBTools: ' + plot_args['plot_title'],
'xlab': 'Read position',
'ymin': 0,
'ymax': 100,
'data_labels': [
{'name': 'Percentage of G+C bases'},
{'name': 'Percentage of A+T bases'},
{'name': 'Percentage of N bases'},
]
}
plot_params.update(plot_args['plot_params'])
plot = linegraph.plot(
nucleotide_data,
plot_params
)
return plot | 0.004905 |
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type | 0.002188 |
def disown(cmd):
"""Call a system command in the background,
disown it and hide it's output."""
subprocess.Popen(cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) | 0.004386 |
def set_agent(self, agent):
"""
Links behaviour with its owner agent
Args:
agent (spade.agent.Agent): the agent who owns the behaviour
"""
self.agent = agent
self.queue = asyncio.Queue(loop=self.agent.loop)
self.presence = agent.presence
self.web = agent.web | 0.005988 |
def params_as_tensors_for(*objs, convert=True):
"""
Context manager which changes the representation of parameters and data holders
for the specific parameterized object(s).
This can also be used to turn off tensor conversion functions wrapped with
`params_as_tensors`:
```
@gpflow.params_as_tensors
def compute_something(self): # self is parameterized object.
s = tf.reduce_sum(self.a) # self.a is a parameter.
with params_as_tensors_for(self, convert=False):
b = self.c.constrained_tensor
return s + b
```
:param objs: one or more instances of classes deriving from Parameterized
:param convert: Flag which is used for turning tensor convertion
feature on, `True`, or turning it off, `False`.
"""
objs = set(objs) # remove duplicate objects so the tensor mode won't be changed before saving
prev_values = [_params_as_tensors_enter(o, convert) for o in objs]
try:
yield
finally:
for o, pv in reversed(list(zip(objs, prev_values))):
_params_as_tensors_exit(o, pv) | 0.00272 |
def load_module(self, name):
"""
Load and return a module
If the module is already loaded, the existing module is returned.
Otherwise, raises :py:exc:`DisabledIncludeError`.
"""
# allow reload noop
if name in sys.modules:
return sys.modules[name]
raise DisabledIncludeError('Include type %r disabled, cannot import module %r' % (self._module_prefix, name)) | 0.006928 |
def get(self):
"""
*get the ebook object*
**Return:**
- ``ebook``
**Usage:**
See class docstring for usage
"""
self.log.debug('starting the ``get`` method')
if self.format == "epub":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
ebook = self._url_to_epub()
elif ".docx" in self.urlOrPath:
ebook = self._docx_to_epub()
if self.format == "mobi":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
epub = self._url_to_epub()
elif ".docx" in self.urlOrPath:
epub = self._docx_to_epub()
if not epub:
return None
ebook = self._epub_to_mobi(
epubPath=epub,
deleteEpub=False
)
tag(
log=self.log,
filepath=ebook,
tags=False,
rating=False,
wherefrom=self.url
)
self.log.debug('completed the ``get`` method')
return ebook | 0.001773 |
def last_modified(self):
"""When conversation was last modified (:class:`datetime.datetime`)."""
timestamp = self._conversation.self_conversation_state.sort_timestamp
# timestamp can be None for some reason when there is an ongoing video
# hangout
if timestamp is None:
timestamp = 0
return parsers.from_timestamp(timestamp) | 0.005208 |
def load(self, filename, fv_extern=None):
"""
Read model stored in the file.
:param filename: Path to file with model
:param fv_extern: external feature vector function is passed here
:return:
"""
self.modelparams["mdl_stored_file"] = filename
if fv_extern is not None:
self.modelparams["fv_extern"] = fv_extern
# segparams['modelparams'] = {
# 'mdl_stored_file': mdl_stored_file,
# # 'fv_extern': fv_function
# }
self.mdl = Model(modelparams=self.modelparams) | 0.003413 |
def get_pull_request_query(self, queries, repository_id, project=None):
"""GetPullRequestQuery.
[Preview API] This API is used to find what pull requests are related to a given commit. It can be used to either find the pull request that created a particular merge commit or it can be used to find all pull requests that have ever merged a particular commit. The input is a list of queries which each contain a list of commits. For each commit that you search against, you will get back a dictionary of commit -> pull requests.
:param :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>` queries: The list of queries to perform.
:param str repository_id: ID of the repository.
:param str project: Project ID or project name
:rtype: :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
content = self._serialize.body(queries, 'GitPullRequestQuery')
response = self._send(http_method='POST',
location_id='b3a6eebe-9cf0-49ea-b6cb-1a4c5f5007b0',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitPullRequestQuery', response) | 0.004957 |
def postinit(self, body=None, finalbody=None):
"""Do some setup after initialisation.
:param body: The try-except that the finally is attached to.
:type body: list(TryExcept) or None
:param finalbody: The contents of the ``finally`` block.
:type finalbody: list(NodeNG) or None
"""
self.body = body
self.finalbody = finalbody | 0.005115 |
def get_level_nodes(self, level):
"""!
@brief Traverses CF-tree to obtain nodes at the specified level.
@param[in] level (uint): CF-tree level from that nodes should be returned.
@return (list) List of CF-nodes that are located on the specified level of the CF-tree.
"""
level_nodes = [];
if (level < self.__height):
level_nodes = self.__recursive_get_level_nodes(level, self.__root);
return level_nodes; | 0.022263 |
def local_error(self, originalValue, calculatedValue):
"""Calculates the error between the two given values.
:param list originalValue: List containing the values of the original data.
:param list calculatedValue: List containing the values of the calculated TimeSeries that
corresponds to originalValue.
:return: Returns the error measure of the two given values.
:rtype: numeric
"""
originalValue = originalValue[0]
calculatedValue = calculatedValue[0]
if 0 == originalValue:
return None
return (math.fabs((calculatedValue - originalValue)/float(originalValue))) * 100.0 | 0.007215 |
def seed_args(subparsers):
"""Add command line options for the seed operation"""
seed_parser = subparsers.add_parser('seed')
secretfile_args(seed_parser)
vars_args(seed_parser)
seed_parser.add_argument('--mount-only',
dest='mount_only',
help='Only mount paths if needed',
default=False,
action='store_true')
thaw_from_args(seed_parser)
seed_parser.add_argument('--remove-unknown',
dest='remove_unknown',
action='store_true',
help='Remove mountpoints that are not '
'defined in the Secretfile')
base_args(seed_parser) | 0.001282 |
def on_props_activated(self, menu_item):
'''显示选中的文件或者当前目录的属性'''
tree_paths = self.iconview.get_selected_items()
if not tree_paths:
dialog = FolderPropertyDialog(self, self.app, self.parent.path)
dialog.run()
dialog.destroy()
else:
for tree_path in tree_paths:
pcs_file = self.get_pcs_file(tree_path)
dialog = PropertiesDialog(self.parent, self.app, pcs_file)
dialog.run()
dialog.destroy() | 0.003759 |
def pre_filter(self):
""" Return rTorrent condition to speed up data transfer.
"""
if self._name in self.PRE_FILTER_FIELDS:
if not self._value:
return '"not=${}"'.format(self.PRE_FILTER_FIELDS[self._name])
else:
val = self._value
if self._exact:
val = val.copy().pop()
return r'"string.contains_i=${},\"{}\""'.format(
self.PRE_FILTER_FIELDS[self._name], val.replace('"', r'\\\"'))
return '' | 0.005425 |
def from_coeff(self, chebcoeff, domain=None, prune=True, vscale=1.):
"""
Initialise from provided coefficients
prune: Whether to prune the negligible coefficients
vscale: the scale to use when pruning
"""
coeffs = np.asarray(chebcoeff)
if prune:
N = self._cutoff(coeffs, vscale)
pruned_coeffs = coeffs[:N]
else:
pruned_coeffs = coeffs
values = self.polyval(pruned_coeffs)
return self(values, domain, vscale) | 0.003824 |
def process(self, image_source, collect_dynamic = False, order_color = 0.9995, order_object = 0.999):
"""!
@brief Performs image segmentation.
@param[in] image_source (string): Path to image file that should be processed.
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of each layer of the network is collected.
@param[in] order_color (double): Local synchronization order for the first layer - coloring segmentation.
@param[in] order_object (double): Local synchronization order for the second layer - object segmentation.
@return (syncsegm_analyser) Analyser of segmentation results by the network.
"""
self.__order_color = order_color
self.__order_object = order_object
data = read_image(image_source)
color_analyser = self.__analyse_colors(data, collect_dynamic)
if self.__object_radius is None:
return syncsegm_analyser(color_analyser, None)
object_segment_analysers = self.__analyse_objects(image_source, color_analyser, collect_dynamic)
return syncsegm_analyser(color_analyser, object_segment_analysers) | 0.018519 |
def register(self, endpoint, procedure=None, options=None):
"""Register a procedure for remote calling.
Replace :meth:`autobahn.wamp.interface.IApplicationSession.register`
"""
def proxy_endpoint(*args, **kwargs):
return self._callbacks_runner.put(partial(endpoint, *args, **kwargs))
return self._async_session.register(proxy_endpoint, procedure=procedure, options=options) | 0.00939 |
def write_outro (self, interrupt=False):
"""Write end of checking message."""
self.writeln()
if interrupt:
self.writeln(_("The check has been interrupted; results are not complete."))
self.write(_("That's it.") + " ")
self.write(_n("%d link", "%d links",
self.stats.number) % self.stats.number)
self.write(u" ")
if self.stats.num_urls is not None:
self.write(_n("in %d URL", "in %d URLs",
self.stats.num_urls) % self.stats.num_urls)
self.write(u" checked. ")
warning_text = _n("%d warning found", "%d warnings found",
self.stats.warnings_printed) % self.stats.warnings_printed
if self.stats.warnings_printed:
warning_color = self.colorwarning
else:
warning_color = self.colorinfo
self.write(warning_text, color=warning_color)
if self.stats.warnings != self.stats.warnings_printed:
self.write(_(" (%d ignored or duplicates not printed)") %
(self.stats.warnings - self.stats.warnings_printed))
self.write(u". ")
error_text = _n("%d error found", "%d errors found",
self.stats.errors_printed) % self.stats.errors_printed
if self.stats.errors_printed:
error_color = self.colorinvalid
else:
error_color = self.colorvalid
self.write(error_text, color=error_color)
if self.stats.errors != self.stats.errors_printed:
self.write(_(" (%d duplicates not printed)") %
(self.stats.errors - self.stats.errors_printed))
self.writeln(u".")
num = self.stats.internal_errors
if num:
self.writeln(_n("There was %(num)d internal error.",
"There were %(num)d internal errors.", num) % {"num": num})
self.stoptime = time.time()
duration = self.stoptime - self.starttime
self.writeln(_("Stopped checking at %(time)s (%(duration)s)") %
{"time": strformat.strtime(self.stoptime),
"duration": strformat.strduration_long(duration)}) | 0.004625 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.