text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def download(url, path=None, headers=None, session=None, show_progress=True,
resume=True, auto_retry=True, max_rst_retries=5,
pass_through_opts=None, cainfo=None, user_agent=None, auth=None):
"""Main download function"""
hm = Homura(url, path, headers, session, show_progress, resume,
auto_retry, max_rst_retries, pass_through_opts, cainfo,
user_agent, auth)
hm.start() | 0.002278 |
def check_path(path):
"""Check that a path is legal.
:return: the path if all is OK
:raise ValueError: if the path is illegal
"""
if path is None or path == b'' or path.startswith(b'/'):
raise ValueError("illegal path '%s'" % path)
if (
(sys.version_info[0] >= 3 and not isinstance(path, bytes)) and
(sys.version_info[0] == 2 and not isinstance(path, str))
):
raise TypeError("illegale type for path '%r'" % path)
return path | 0.002033 |
def _remove_rid_from_vrf_list(self, ri):
"""Remove router ID from a VRF list.
This removes a router from the list of routers that's kept
in a map, using a VRF ID as the key. If the VRF exists, the
router is removed from the list if it's present. If the last
router in the list is removed, then the driver's method to
remove the VRF is called and the map entry for that
VRF is deleted.
"""
if ri.ex_gw_port or ri.router.get('gw_port'):
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
if self._router_ids_by_vrf.get(vrf_name) and (
ri.router['id'] in self._router_ids_by_vrf[vrf_name]):
self._router_ids_by_vrf[vrf_name].remove(ri.router['id'])
# If this is the last router in a VRF, then we can safely
# delete the VRF from the router config (handled by the driver)
if not self._router_ids_by_vrf.get(vrf_name):
LOG.debug("++ REMOVING VRF %s" % vrf_name)
driver._remove_vrf(ri)
del self._router_ids_by_vrf[vrf_name] | 0.001664 |
def hard_path(path, prefix_dir):
"""Returns an absolute path to either the relative or absolute file."""
relative = abspath("%s/%s" % (prefix_dir, path))
a_path = abspath(path)
if os.path.exists(relative):
LOG.debug("using relative path %s (%s)", relative, path)
return relative
LOG.debug("using absolute path %s", a_path)
return a_path | 0.002653 |
def add_job(self, job, merged=False, widened=False):
"""
Appended a new job to this JobInfo node.
:param job: The new job to append.
:param bool merged: Whether it is a merged job or not.
:param bool widened: Whether it is a widened job or not.
"""
job_type = ''
if merged:
job_type = 'merged'
elif widened:
job_type = 'widened'
self.jobs.append((job, job_type)) | 0.004283 |
def get_description(self):
"""Returns description text as provided by the studio"""
if self._description:
return self._description
try:
trailerURL= "http://trailers.apple.com%s" % self.baseURL
response = urllib.request.urlopen(trailerURL)
Reader = codecs.getreader("utf-8")
responseReader = Reader(response)
trailerHTML = responseReader.read()
description = re.search('<meta *name="Description" *content="(.*?)" *[/]*>'
,trailerHTML)
if description:
self._description = description.group(1)
else:
self._description = "None"
except:
self._description = "Error"
return self._description | 0.00861 |
def assemble_bucket(item):
"""Assemble a document representing all the config state around a bucket.
TODO: Refactor this, the logic here feels quite muddled.
"""
factory, b = item
s = factory()
c = s.client('s3')
# Bucket Location, Current Client Location, Default Location
b_location = c_location = location = "us-east-1"
methods = list(S3_AUGMENT_TABLE)
for m, k, default, select in methods:
try:
method = getattr(c, m)
v = method(Bucket=b['Name'])
v.pop('ResponseMetadata')
if select is not None and select in v:
v = v[select]
except (ssl.SSLError, SSLError) as e:
# Proxy issues? i assume
log.warning("Bucket ssl error %s: %s %s",
b['Name'], b.get('Location', 'unknown'),
e)
continue
except ClientError as e:
code = e.response['Error']['Code']
if code.startswith("NoSuch") or "NotFound" in code:
v = default
elif code == 'PermanentRedirect':
s = factory()
c = bucket_client(s, b)
# Requeue with the correct region given location constraint
methods.append((m, k, default, select))
continue
else:
log.warning(
"Bucket:%s unable to invoke method:%s error:%s ",
b['Name'], m, e.response['Error']['Message'])
# For auth failures, we don't bail out, continue processing if we can.
# Note this can lead to missing data, but in general is cleaner than
# failing hard, due to the common use of locked down s3 bucket policies
# that may cause issues fetching information across a fleet of buckets.
# This does mean s3 policies depending on augments should check denied
# methods annotation, generally though lacking get access to an augment means
# they won't have write access either.
# For other error types we raise and bail policy execution.
if e.response['Error']['Code'] == 'AccessDenied':
b.setdefault('c7n:DeniedMethods', []).append(m)
continue
raise
# As soon as we learn location (which generally works)
if k == 'Location' and v is not None:
b_location = v.get('LocationConstraint')
# Location == region for all cases but EU
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
if b_location is None:
b_location = "us-east-1"
elif b_location == 'EU':
b_location = "eu-west-1"
v['LocationConstraint'] = 'eu-west-1'
if v and v != c_location:
c = s.client('s3', region_name=b_location)
elif c_location != location:
c = s.client('s3', region_name=location)
b[k] = v
return b | 0.002254 |
def ns(self, prefix, tag):
"""
Given a prefix and an XML tag, output the qualified name
for proper namespace handling on output.
"""
return etree.QName(self.prefixes[prefix], tag) | 0.009132 |
def autoclear(self):
"""Clear Redis and ThreatConnect data from staging data."""
for sd in self.staging_data:
data_type = sd.get('data_type', 'redis')
if data_type == 'redis':
self.clear_redis(sd.get('variable'), 'auto-clear')
elif data_type == 'redis-array':
self.clear_redis(sd.get('variable'), 'auto-clear')
for variables in sd.get('data', {}).get('variables') or []:
self.clear_redis(variables.get('value'), 'auto-clear')
elif data_type == 'threatconnect':
# self.clear_tc(sd.get('data_owner'), sd.get('data', {}), 'auto-clear')
# self.clear_redis(sd.get('variable'), 'auto-clear')
pass
elif data_type == 'threatconnect-association':
# assuming these have already been cleared
pass
elif data_type == 'threatconnect-batch':
for group in sd.get('data', {}).get('group') or []:
self.clear_tc(sd.get('data_owner'), group, 'auto-clear')
self.clear_redis(group.get('variable'), 'auto-clear')
for indicator in sd.get('data', {}).get('indicator') or []:
self.clear_tc(sd.get('data_owner'), indicator, 'auto-clear')
self.clear_redis(indicator.get('variable'), 'auto-clear')
for vd in self.profile.get('validations') or []:
data_type = vd.get('data_type', 'redis')
variable = vd.get('variable')
if data_type == 'redis':
self.clear_redis(variable, 'auto-clear') | 0.002407 |
def strace(device, trace_address, breakpoint_address):
"""Implements simple trace using the STrace API.
Args:
device (str): the device to connect to
trace_address (int): address to begin tracing from
breakpoint_address (int): address to breakpoint at
Returns:
``None``
"""
jlink = pylink.JLink()
jlink.open()
# Do the initial connection sequence.
jlink.power_on()
jlink.set_tif(pylink.JLinkInterfaces.SWD)
jlink.connect(device)
jlink.reset()
# Clear any breakpoints that may exist as of now.
jlink.breakpoint_clear_all()
# Start the simple trace.
op = pylink.JLinkStraceOperation.TRACE_START
jlink.strace_clear_all()
jlink.strace_start()
# Set the breakpoint and trace events, then restart the CPU so that it
# will execute.
bphandle = jlink.breakpoint_set(breakpoint_address, thumb=True)
trhandle = jlink.strace_code_fetch_event(op, address=trace_address)
jlink.restart()
time.sleep(1)
# Run until the CPU halts due to the breakpoint being hit.
while True:
if jlink.halted():
break
# Print out all instructions that were captured by the trace.
while True:
instructions = jlink.strace_read(1)
if len(instructions) == 0:
break
instruction = instructions[0]
print(jlink.disassemble_instruction(instruction))
jlink.power_off()
jlink.close() | 0.00069 |
def check_pianoroll(arr):
"""
Return True if the array is a standard piano-roll matrix. Otherwise,
return False. Raise TypeError if the input object is not a numpy array.
"""
if not isinstance(arr, np.ndarray):
raise TypeError("`arr` must be of np.ndarray type")
if not (np.issubdtype(arr.dtype, np.bool_)
or np.issubdtype(arr.dtype, np.number)):
return False
if arr.ndim != 2:
return False
if arr.shape[1] != 128:
return False
return True | 0.001923 |
def metaseries_description_metadata(description):
"""Return metatata from MetaSeries image description as dict."""
if not description.startswith('<MetaData>'):
raise ValueError('invalid MetaSeries image description')
from xml.etree import cElementTree as etree # delayed import
root = etree.fromstring(description)
types = {'float': float, 'int': int,
'bool': lambda x: asbool(x, 'on', 'off')}
def parse(root, result):
# recursive
for child in root:
attrib = child.attrib
if not attrib:
result[child.tag] = parse(child, {})
continue
if 'id' in attrib:
i = attrib['id']
t = attrib['type']
v = attrib['value']
if t in types:
result[i] = types[t](v)
else:
result[i] = v
return result
adict = parse(root, {})
if 'Description' in adict:
adict['Description'] = adict['Description'].replace(' ', '\n')
return adict | 0.000912 |
def patch_priority_class(self, name, body, **kwargs):
"""
partially update the specified PriorityClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PriorityClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_priority_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_priority_class_with_http_info(name, body, **kwargs)
return data | 0.003682 |
def randint(self, a: int, b: int, n: Optional[int] = None) -> Union[List[int], int]:
""" Generate n numbers as a list or a single one if no n is given.
n is used to minimize the number of requests made and return type changes to be compatible
with :py:mod:`random`'s interface
"""
max_n = self.config.MAX_NUMBER_OF_INTEGERS
return self._generate_randoms(self._request_randints, max_n=max_n, a=a, b=b, n=n) | 0.011013 |
def raw_sql(self, query, results=False):
"""
Execute a given query string. Could have unexpected results if the
query modifies the behavior of the session in a way unknown to Ibis; be
careful.
Parameters
----------
query : string
DML or DDL statement
results : boolean, default False
Pass True if the query as a result set
Returns
-------
cur : ImpalaCursor if results=True, None otherwise
You must call cur.release() after you are finished using the cursor.
"""
return self._execute(query, results=results) | 0.00311 |
def dechunk(stream):
"""De-chunk HTTP body stream.
:param file stream: readable file-like object.
:rtype: __generator[bytes]
:raise: DechunkError
"""
# TODO(vovan): Add support for chunk extensions:
# TODO(vovan): http://tools.ietf.org/html/rfc2616#section-3.6.1
while True:
chunk_len = read_until(stream, b'\r\n')
if chunk_len is None:
raise DechunkError(
'Could not extract chunk size: unexpected end of data.')
try:
chunk_len = int(chunk_len.strip(), 16)
except (ValueError, TypeError) as err:
raise DechunkError('Could not parse chunk size: %s' % (err,))
if chunk_len == 0:
break
bytes_to_read = chunk_len
while bytes_to_read:
chunk = stream.read(bytes_to_read)
bytes_to_read -= len(chunk)
yield chunk
# chunk ends with \r\n
crlf = stream.read(2)
if crlf != b'\r\n':
raise DechunkError('No CR+LF at the end of chunk!') | 0.000948 |
def get_labs(format):
"""Gets Fab Lab data from fablabs.io."""
fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0)
fablabs = {}
# Load all the FabLabs
for i in fablabs_json["labs"]:
current_lab = FabLab()
current_lab.name = i["name"]
current_lab.address_1 = i["address_1"]
current_lab.address_2 = i["address_2"]
current_lab.address_notes = i["address_notes"]
current_lab.avatar = i["avatar_url"]
current_lab.blurb = i["blurb"]
current_lab.capabilities = i["capabilities"]
if i["city"].isupper():
i["city"] = i["city"].title()
current_lab.city = i["city"]
current_lab.country_code = i["country_code"]
current_lab.county = i["county"]
current_lab.description = i["description"]
current_lab.email = i["email"]
current_lab.id = i["id"]
current_lab.phone = i["phone"]
current_lab.postal_code = i["postal_code"]
current_lab.slug = i["slug"]
current_lab.url = i["url"]
current_lab.continent = country_alpha2_to_continent_code(i["country_code"].upper())
current_country = pycountry.countries.get(alpha_2=i["country_code"].upper())
current_lab.country_code = current_country.alpha_3
current_lab.country = current_country.name
# Check coordinates
if i["longitude"] is not None:
current_lab.longitude = i["longitude"]
else:
current_lab.longitude = 0.0
if i["latitude"] is not None:
current_lab.latitude = i["latitude"]
else:
current_lab.latitude = 0.0
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
for link in i["links"]:
if "facebook" in link["url"]:
current_lab.links["facebook"] = link["url"]
elif "twitter" in link["url"]:
current_lab.links["twitter"] = link["url"]
else:
current_lab.links[link["id"]] = link["url"]
# Add the lab to the list
fablabs[i["slug"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in fablabs:
single = fablabs[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = fablabs
# Default: return an oject
else:
output = fablabs
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output | 0.001165 |
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
# Check for invalid tracking number by checking if table element is present
if soup.find('thead') == None:
raise ValueError('Invalid tracking number')
# Assign the current status of the shipment - self.status
if 'Returned' in self.page:
self.status = 'R'
elif 'Signed for by:' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# The full checkpoints table div.
table = soup.find('table',{'class':'result-checkpoints'}).contents
cur_date = None # The date of the next few checkpoints, initially None
checkpoint = None
for element in table:
if element.name == 'thead':
# This has the date for the next few checkpoints
cur_date = element.find('th',{'colspan':'2'}).string.strip() + ' '
elif element.name == 'tbody':
# A checkpoint whose date = cur_date
checkpoint = {'status':'','date':cur_date,'location':''}
tds = element.findAll('td')
checkpoint['status'] = tds[1].string.strip()
checkpoint['location'] = tds[2].string.strip()
checkpoint['date'] += tds[3].string.strip()
date_time_format = "%d-%b-%Y %H:%M"
checkpoint['date'] = parse(checkpoint['date'])
self.tracking_data.append(checkpoint)
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date']) | 0.033375 |
def get_all(self, start=0, count=-1, filter='', query='', sort='', view='', fields='', uri=None, scope_uris=''):
"""Gets all items according with the given arguments.
Args:
start: The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count: The number of resources to return. A count of -1 requests all items (default).
filter (list or str): A general filter/query string to narrow the list of items returned. The default is no
filter; all resources are returned.
query: A single query parameter can do what would take multiple parameters or multiple GET requests using
filter. Use query for more complex queries. NOTE: This parameter is experimental for OneView 2.0.
sort: The sort order of the returned data set. By default, the sort order is based on create time with the
oldest entry first.
view:
Returns a specific subset of the attributes of the resource or collection by specifying the name of a
predefined view. The default view is expand (show all attributes of the resource and all elements of
the collections or resources).
fields:
Name of the fields.
uri:
A specific URI (optional)
scope_uris:
An expression to restrict the resources returned according to the scopes to
which they are assigned.
Returns:
list: A list of items matching the specified filter.
"""
if not uri:
uri = self._base_uri
uri = self.build_query_uri(uri=uri,
start=start,
count=count,
filter=filter,
query=query,
sort=sort,
view=view,
fields=fields,
scope_uris=scope_uris)
logger.debug('Getting all resources with uri: {0}'.format(uri))
return self.do_requests_to_getall(uri, count) | 0.005233 |
def rename_key(self, key_to_rename, strict=True):
"""
Rename the given keys from the given dictionary.
:param key_to_rename:
The key(s) to rename.
Expected format: :code:`{old:new}`
:type key_to_rename: dict
:param strict:
Tell us if we have to rename the exact index or
the index which looks like the given key(s)
:return: The well formatted dict.
:rtype: dict|None
"""
if isinstance(self.main_dictionnary, dict) and isinstance(key_to_rename, dict):
# * The given main directory is a dictionnary.
# and
# * The given key to rename is a dictionnary.
for old, new in key_to_rename.items():
# We loop through the key to raname.
if strict:
# The strict method is activated.
if old in self.main_dictionnary:
# The old key is in the main dictionnary.
# We initiate the new with the old and remove the old content.
self.main_dictionnary[new] = self.main_dictionnary.pop(old)
else:
# The strict method is not activated.
# We initiate the elements to rename.
to_rename = {}
for index in self.main_dictionnary:
# We loop throught the indexes of the main dictionnary.
if old in index:
# The old key is into the index name.
# We append the index name and the new index to our
# local list to rename.
to_rename.update({index: new[:-1] + index.split(old)[-1]})
# We run this method against the local list to rename in order
# to rename the element.
self.main_dictionnary = Dict(self.main_dictionnary).rename_key(
to_rename, True
)
# We return the final list.
return self.main_dictionnary
# * The given main directory is not a dictionnary.
# or
# * The given key to rename is not a dictionnary.
# We return None.
return None | 0.003388 |
def publish_network(user=None, reset=False):
"""
Generate graph network for a user and plot it using Plotly
"""
username = generate_network(user, reset)
network_file = username_to_file(username)
plot_data = prepare_plot_data(network_file)
data = Data(plot_data)
# hide axis line, grid, ticklabels and title
axis = dict(showline=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title=''
)
width = 800
height = 800
layout = Layout(title='GitHub Network for "{0}"'.format(username),
font=Font(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=XAxis(axis),
yaxis=YAxis(axis),
margin=Margin(
l=40,
r=40,
b=85,
t=100,
),
hovermode='closest',
annotations=Annotations([
Annotation(
showarrow=False,
text='This igraph.Graph has the graphopt layout',
xref='paper',
yref='paper',
x=0,
y=-0.1,
xanchor='left',
yanchor='bottom',
font=Font(
size=14
)
)
]),
)
fig = Figure(data=data, layout=layout)
# use credentials of the bot "octogrid", if user isn't authenticated
login_as_bot()
try:
plot_id = ''.join(choice(string.lowercase) for i in range(5))
plot_url = plotly.plot(
fig, filename='Octogrid: GitHub communities for {0} [v{1}]'.format(username, plot_id))
print 'Published the network graph at {0}'.format(plot_url)
except Exception, e:
raise e | 0.000913 |
def _merge_two_curves(curve1: Curve, curve2: Curve, qmin, qmax, qsep, use_additive_constant=False):
"""Merge two scattering curves
:param curve1: the first curve (longer distance)
:type curve1: sastool.classes.curve.GeneralCurve
:param curve2: the second curve (shorter distance)
:type curve2: sastool.classes.curve.GeneralCurve
:param qmin: lower bound of the interval for determining the scaling factor
:type qmin: float
:param qmax: upper bound of the interval for determining the scaling factor
:type qmax: float
:param qsep: separating (tailoring) point for the merge
:type qsep: float
:return: merged_curve, factor, background, stat
:rtype tuple of a sastool.classes2.curve.Curve and a float
"""
curve1=curve1.sanitize()
curve2=curve2.sanitize()
if len(curve1.trim(qmin, qmax)) > len(curve2.trim(qmin, qmax)):
curve2_interp = curve2.trim(qmin, qmax)
curve1_interp = curve1.interpolate(curve2_interp.q)
else:
curve1_interp = curve1.trim(qmin, qmax)
curve2_interp = curve2.interpolate(curve1_interp.q)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(curve2_interp.Intensity, curve1_interp.Intensity,
curve2_interp.Error, curve1_interp.Error,
lambda x, factor, bg: x * factor + bg, [1.0, bg_init])
return Curve.merge(curve1 - bg, curve2 * factor, qsep), factor, bg, stat | 0.003894 |
def get_content_comments(self, content_id, expand=None, parent_version=None, start=None, limit=None,
location=None, depth=None, callback=None):
"""
Returns the comments associated with a piece of content.
:param content_id (string): A string containing the id of the content to retrieve children for.
:param expand (string): OPTIONAL: a comma separated list of properties to expand on the children.
We can also specify some extensions such as extensions.inlineProperties (for getting
inline comment-specific properties) or extensions.resolution for the resolution status
of each comment in the results. Default: Empty
:param parent_version (int): OPTIONAL: An int representing the version of the content to retrieve children for.
Default: 0
:param start (int): OPTIONAL: The index of the first item within the result set that should be returned.
Default: 0.
:param limit (int): OPTIONAL: How many items should be returned after the start index. Default: Site limit.
:param location (string): OPTIONAL: The location of the comments. Possible values are: "inline", "footer",
"resolved". You can define multiple location params. The results will be the comments
matched by any location. Default: "" (all).
:param depth: The depth of the comments. Possible values are: "" (ROOT only), "all". Default: "".
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/child/comment endpoint, or the results of the
callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
if parent_version:
params["parentVersion"] = parent_version
if start is not None:
params["start"] = int(start)
if limit is not None:
params["limit"] = int(limit)
if location:
params["location"] = location
if depth:
assert depth in {"", "all"}
params["depth"] = depth
return self._service_get_request("rest/api/content/{id}/child/comment".format(id=content_id),
params=params, callback=callback) | 0.006795 |
def point_on_line(point, line_start, line_end, accuracy=50.):
"""Checks whether a point lies on a line
The function checks whether the point "point" (P) lies on the line defined by its starting point line_start (A) and
its end point line_end (B).
This is done by comparing the distance of [AB] with the sum of the distances [AP] and [PB]. If the difference is
smaller than [AB] / accuracy, the point P is assumed to be on the line. By increasing the value of accuracy (the
default is 50), the tolerance is decreased.
:param point: Point to be checked (tuple with x any y coordinate)
:param line_start: Starting point of the line (tuple with x any y coordinate)
:param line_end: End point of the line (tuple with x any y coordinate)
:param accuracy: The higher this value, the less distance is tolerated
:return: True if the point is one the line, False if not
"""
length = dist(line_start, line_end)
ds = length / float(accuracy)
if -ds < (dist(line_start, point) + dist(point, line_end) - length) < ds:
return True
return False | 0.004537 |
def call(self, params, _context, **kwargs):
'''Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise'''
assert len(params) == self.paramCount
# using setattr because '=' is not allowed inside of lambdas
return evaluate_impl(self.body, params).then(lambda value: setattr(_context.results, 'value', value)) | 0.005693 |
def save_playlist_file(self, stationFile=''):
""" Save a playlist
Create a txt file and write stations in it.
Then rename it to final target
return 0: All ok
-1: Error writing file
-2: Error renaming file
"""
if self._playlist_format_changed():
self.dirty_playlist = True
self.new_format = not self.new_format
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
if not self.dirty_playlist:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Playlist not modified...')
return 0
st_new_file = st_file.replace('.csv', '.txt')
tmp_stations = self.stations[:]
tmp_stations.reverse()
if self.new_format:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '', '' ])
else:
tmp_stations.append([ '# Find lots more stations at http://www.iheart.com' , '' ])
tmp_stations.reverse()
try:
with open(st_new_file, 'w') as cfgfile:
writter = csv.writer(cfgfile)
for a_station in tmp_stations:
writter.writerow(self._format_playlist_row(a_station))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot open playlist file for writing,,,')
return -1
try:
move(st_new_file, st_file)
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Cannot rename playlist file...')
return -2
self.dirty_playlist = False
return 0 | 0.006877 |
def set(self, instance, value, **kw): # noqa
"""Set the value of the uid reference field
"""
ref = []
# The value is an UID
if api.is_uid(value):
ref.append(value)
# The value is a dictionary, get the UIDs.
if u.is_dict(value):
ref = ref.append(value.get("uid"))
# The value is already an object
if api.is_at_content(value):
ref.append(value)
# The value is a list
if u.is_list(value):
for item in value:
# uid
if api.is_uid(item):
ref.append(item)
# dict (catalog query)
elif u.is_dict(item):
# If there is UID of objects, just use it.
uid = item.get('uid', None)
if uid:
ref.append(uid)
# Handle non multi valued fields
if not self.multi_valued:
if len(ref) > 1:
raise ValueError("Multiple values given for single valued "
"field {}".format(repr(self.field)))
else:
ref = ref[0]
return self._set(instance, ref, **kw) | 0.001612 |
def decode_varint(f, max_bytes=4):
"""Decode variable integer using algorithm similar to that described
in MQTT Version 3.1.1 line 297.
Parameters
----------
f: file
Object with a read method.
max_bytes: int or None
If a varint cannot be constructed using `max_bytes` or fewer
from f then raises a `DecodeError`. If None then there is no
maximum number of bytes.
Raises
-------
DecodeError
When length is greater than max_bytes.
UnderflowDecodeError
When file ends before enough bytes can be read to construct the
varint.
Returns
-------
int
Number of bytes consumed.
int
Value extracted from `f`.
"""
num_bytes_consumed = 0
value = 0
m = 1
while True:
buf = f.read(1)
if len(buf) == 0:
raise UnderflowDecodeError()
(u8,) = FIELD_U8.unpack(buf)
value += (u8 & 0x7f) * m
m *= 0x80
num_bytes_consumed += 1
if u8 & 0x80 == 0:
# No further bytes
break
elif max_bytes is not None and num_bytes_consumed >= max_bytes:
raise DecodeError('Variable integer contained more than maximum bytes ({}).'.format(max_bytes))
return num_bytes_consumed, value | 0.001522 |
def _restore_queue(self):
"""Restore the previous state of the queue.
Note:
The restore currently adds the items back into the queue
using the URI, for items the Sonos system already knows about
this is OK, but for other items, they may be missing some of
their metadata as it will not be automatically picked up.
"""
if self.queue is not None:
# Clear the queue so that it can be reset
self.device.clear_queue()
# Now loop around all the queue entries adding them
for queue_group in self.queue:
for queue_item in queue_group:
self.device.add_uri_to_queue(queue_item.uri) | 0.002717 |
def calculate_positions(self, first_bee_val, second_bee_val, value_range):
'''Calculate the new value/position for two given bee values
Args:
first_bee_val (int or float): value from the first bee
second_bee_val (int or float): value from the second bee
value_ranges (tuple): "(value type, (min_val, max_val))" for the
given value
Returns:
int or float: new value
'''
value = first_bee_val + np.random.uniform(-1, 1) \
* (first_bee_val - second_bee_val)
if value_range[0] == 'int':
value = int(value)
if value > value_range[1][1]:
value = value_range[1][1]
if value < value_range[1][0]:
value = value_range[1][0]
return value | 0.00246 |
def index(self, item):
""" Not recommended for use on large lists due to time
complexity, but it works
-> #int list index of @item
"""
for i, x in enumerate(self.iter()):
if x == item:
return i
return None | 0.006897 |
def typeOf(cls, expected_type): #pylint: disable=no-self-argument,invalid-name,no-self-use
"""
(*Type does NOT consider inherited class)
Matcher.mtest(...) will return True if type(...) == expected_type
Return: Matcher
Raise: matcher_type_error
"""
if isinstance(expected_type, type):
options = {}
options["target_type"] = expected_type
return Matcher("__TYPE__", options)
ErrorHandler.matcher_type_error(expected_type) | 0.009634 |
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra | 0.001034 |
def extract_meta(self, text):
"""
Takes input as the entire file.
Reads the first yaml document as metadata.
and the rest of the document as text
"""
first_line = True
metadata = []
content = []
metadata_parsed = False
for line in text.split('\n'):
if first_line:
first_line = False
if line.strip() != '---':
raise MetaParseException('Invalid metadata')
else:
continue
if line.strip() == '' and not metadata_parsed:
continue
if line.strip() == '---' and not metadata_parsed:
# reached the last line
metadata_parsed = True
elif not metadata_parsed:
metadata.append(line)
else:
content.append(line)
content = '\n'.join(content)
try:
metadata = yaml.load('\n'.join(metadata))
except:
raise
content = text
metadata = yaml.load('')
return content, metadata | 0.002627 |
def clustering_coef_wu(W):
'''
The weighted clustering coefficient is the average "intensity" of
triangles around a node.
Parameters
----------
W : NxN np.ndarray
weighted undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
'''
K = np.array(np.sum(np.logical_not(W == 0), axis=1), dtype=float)
ws = cuberoot(W)
cyc3 = np.diag(np.dot(ws, np.dot(ws, ws)))
K[np.where(cyc3 == 0)] = np.inf # if no 3-cycles exist, set C=0
C = cyc3 / (K * (K - 1))
return C | 0.001736 |
def expandFunction(self, func, args=[]):
"""applies the given function to each of this stimulus's memerships when autoparamters are applied
:param func: callable to execute for each version of the stimulus
:type instancemethod:
:param args: arguments to feed to func
:type args: list
:returns: list<results of *func*>, one for each trace
"""
# initilize array to hold all varied parameters
params = self._autoParams.allData()
steps = self.autoParamRanges()
ntraces = 1
for p in steps:
ntraces = ntraces*len(p)
varylist = [[None for x in range(len(params))] for y in range(ntraces)]
x = 1
for iset, step_set in enumerate(steps):
for itrace in range(ntraces):
idx = (itrace / x) % len(step_set)
varylist[itrace][iset] = step_set[idx]
x = x*len(step_set)
# now create the stimuli according to steps
# go through list of modifing parameters, update this stimulus,
# and then save current state to list
stim_list = []
for itrace in range(ntraces):
for ip, param in enumerate(params):
for component in param['selection']:
# print 'setting component {} parameter {} to {}'.format(component.name, param['parameter'], varylist[itrace][ip])
# so I encountered a bug when the parameters were dragged the
# pickling/unpickling seems to either make a copy or somehow
# otherwise loose connection to the original components
# make sure to be setting the components that are in this model.
index = self.indexByComponent(component)
component = self.component(*index)
component.set(param['parameter'], varylist[itrace][ip])
# copy of current stim state, or go ahead and turn it into a signal?
# so then would I want to formulate some doc here as well?
stim_list.append(func(*args))
# now reset the components to start value
for ip, param in enumerate(params):
for component in param['selection']:
component.set(param['parameter'], varylist[0][ip])
return stim_list | 0.003786 |
def has_no_password(gpg_secret_keyid):
"""Returns True iif gpg_secret_key has a password"""
if gnupg is None:
return False
gpg = gnupg.GPG()
s = gpg.sign("", keyid=gpg_secret_keyid, passphrase="")
try:
return s.status == "signature created"
except AttributeError:
# This may happen on Windows
if hasattr(s, "stderr"):
return "GOOD_PASSPHRASE" in s.stderr | 0.002353 |
def getUncertainty(self, result=None):
"""Returns the uncertainty for this analysis and result.
Returns the value from Schema's Uncertainty field if the Service has
the option 'Allow manual uncertainty'. Otherwise, do a callback to
getDefaultUncertainty(). Returns None if no result specified and the
current result for this analysis is below or above detections limits.
"""
uncertainty = self.getField('Uncertainty').get(self)
if result is None and (self.isAboveUpperDetectionLimit() or
self.isBelowLowerDetectionLimit()):
return None
if uncertainty and self.getAllowManualUncertainty() is True:
try:
uncertainty = float(uncertainty)
return uncertainty
except (TypeError, ValueError):
# if uncertainty is not a number, return default value
pass
return self.getDefaultUncertainty(result) | 0.002002 |
def init_dynamic_structure_factor(self,
Qpoints,
T,
atomic_form_factor_func=None,
scattering_lengths=None,
freq_min=None,
freq_max=None):
"""Initialize dynamic structure factor calculation.
*******************************************************************
This is still an experimental feature. API can be changed without
notification.
*******************************************************************
Need to call DynamicStructureFactor.run() to start calculation.
Parameters
----------
Qpoints: array_like
Q-points in any Brillouin zone.
dtype='double'
shape=(qpoints, 3)
T: float
Temperature in K.
atomic_form_factor_func: Function object
Function that returns atomic form factor (``func`` below):
f_params = {'Na': [3.148690, 2.594987, 4.073989, 6.046925,
0.767888, 0.070139, 0.995612, 14.1226457,
0.968249, 0.217037, 0.045300],
'Cl': [1.061802, 0.144727, 7.139886, 1.171795,
6.524271, 19.467656, 2.355626, 60.320301,
35.829404, 0.000436, -34.916604],b|
def get_func_AFF(f_params):
def func(symbol, Q):
return atomic_form_factor_WK1995(Q, f_params[symbol])
return func
scattering_lengths: dictionary
Coherent scattering lengths averaged over isotopes and spins.
Supposed for INS. For example, {'Na': 3.63, 'Cl': 9.5770}.
freq_min, freq_min: float
Minimum and maximum phonon frequencies to determine whether
phonons are included in the calculation.
"""
if self._mesh is None:
msg = ("run_mesh has to be done before initializing dynamic"
"structure factor.")
raise RuntimeError(msg)
if not self._mesh.with_eigenvectors:
msg = "run_mesh has to be called with with_eigenvectors=True."
raise RuntimeError(msg)
if np.prod(self._mesh.mesh_numbers) != len(self._mesh.ir_grid_points):
msg = "run_mesh has to be done with is_mesh_symmetry=False."
raise RuntimeError(msg)
self._dynamic_structure_factor = DynamicStructureFactor(
self._mesh,
Qpoints,
T,
atomic_form_factor_func=atomic_form_factor_func,
scattering_lengths=scattering_lengths,
freq_min=freq_min,
freq_max=freq_max) | 0.002753 |
def expected_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
datetime_format=None,
freq="D",
set_index_date=False,
freq_multiplier=1,
):
"""
Get expected and actual repeated cumulative transactions.
Parameters
----------
model:
A fitted lifetimes model
transactions: :obj: DataFrame
a Pandas DataFrame containing the transactions history of the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
customer_id_col: string
the column in transactions that denotes the customer_id
t: int
the number of time units since the begining of
data for which we want to calculate cumulative transactions
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't
understand the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
when True set date as Pandas DataFrame index, default False - number of time units
freq_multiplier: int, optional
Default 1, could be use to get exact cumulative transactions predicted
by model, i.e. model trained with freq='W', passed freq to
expected_cumulative_transactions is freq='D', and freq_multiplier=7.
Returns
-------
:obj: DataFrame
A dataframe with columns actual, predicted
"""
start_date = pd.to_datetime(transactions[datetime_col], format=datetime_format).min()
start_period = start_date.to_period(freq)
observation_period_end = start_period + t
repeated_and_first_transactions = _find_first_transactions(
transactions,
customer_id_col,
datetime_col,
datetime_format=datetime_format,
observation_period_end=observation_period_end,
freq=freq,
)
first_trans_mask = repeated_and_first_transactions["first"]
repeated_transactions = repeated_and_first_transactions[~first_trans_mask]
first_transactions = repeated_and_first_transactions[first_trans_mask]
date_range = pd.date_range(start_date, periods=t + 1, freq=freq)
date_periods = date_range.to_period(freq)
pred_cum_transactions = []
first_trans_size = first_transactions.groupby(datetime_col).size()
for i, period in enumerate(date_periods):
if i % freq_multiplier == 0 and i > 0:
times = np.array([d.n for d in period - first_trans_size.index])
times = times[times > 0].astype(float) / freq_multiplier
expected_trans_agg = model.expected_number_of_purchases_up_to_time(times)
mask = first_trans_size.index < period
expected_trans = sum(expected_trans_agg * first_trans_size[mask])
pred_cum_transactions.append(expected_trans)
act_trans = repeated_transactions.groupby(datetime_col).size()
act_tracking_transactions = act_trans.reindex(date_periods, fill_value=0)
act_cum_transactions = []
for j in range(1, t // freq_multiplier + 1):
sum_trans = sum(act_tracking_transactions.iloc[: j * freq_multiplier])
act_cum_transactions.append(sum_trans)
if set_index_date:
index = date_periods[freq_multiplier - 1 : -1 : freq_multiplier]
else:
index = range(0, t // freq_multiplier)
df_cum_transactions = pd.DataFrame(
{"actual": act_cum_transactions, "predicted": pred_cum_transactions}, index=index
)
return df_cum_transactions | 0.002699 |
def parse_intervals(diff_report):
"""
Parse a diff into an iterator of Intervals.
"""
for patch in diff_report.patch_set:
try:
old_pf = diff_report.old_file(patch.source_file)
new_pf = diff_report.new_file(patch.target_file)
except InvalidPythonFile:
continue
for hunk in patch:
for line in hunk:
if line.line_type == LINE_TYPE_ADDED:
idx = line.target_line_no
yield ContextInterval(new_pf.filename, new_pf.context(idx))
elif line.line_type == LINE_TYPE_REMOVED:
idx = line.source_line_no
yield ContextInterval(old_pf.filename, old_pf.context(idx))
elif line.line_type in (LINE_TYPE_EMPTY, LINE_TYPE_CONTEXT):
pass
else:
raise AssertionError("Unexpected line type: %s" % line) | 0.001053 |
def template_shebang(template, renderers, default, blacklist, whitelist, input_data):
'''
Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf
'''
line = ''
# Open up the first line of the sls template
if template == ':string:':
line = input_data.split()[0]
else:
with salt.utils.files.fopen(template, 'r') as ifile:
line = salt.utils.stringutils.to_unicode(ifile.readline())
# Check if it starts with a shebang and not a path
if line.startswith('#!') and not line.startswith('#!/'):
# pull out the shebang data
# If the shebang does not contain recognized/not-blacklisted/whitelisted
# renderers, do not fall back to the default renderer
return check_render_pipe_str(line.strip()[2:], renderers, blacklist, whitelist)
else:
return check_render_pipe_str(default, renderers, blacklist, whitelist) | 0.00337 |
def get_gatk_annotations(config, include_depth=True, include_baseqranksum=True,
gatk_input=True):
"""Retrieve annotations to use for GATK VariantAnnotator.
If include_depth is false, we'll skip annotating DP. Since GATK downsamples
this will undercount on high depth sequencing and the standard outputs
from the original callers may be preferable.
BaseQRankSum can cause issues with some MuTect2 and other runs, so we
provide option to skip it.
"""
broad_runner = broad.runner_from_config(config)
anns = ["MappingQualityRankSumTest", "MappingQualityZero",
"QualByDepth", "ReadPosRankSumTest", "RMSMappingQuality"]
if include_baseqranksum:
anns += ["BaseQualityRankSumTest"]
# Some annotations not working correctly with external datasets and GATK 3
if gatk_input or broad_runner.gatk_type() == "gatk4":
anns += ["FisherStrand"]
if broad_runner.gatk_type() == "gatk4":
anns += ["MappingQuality"]
else:
anns += ["GCContent", "HaplotypeScore", "HomopolymerRun"]
if include_depth:
anns += ["DepthPerAlleleBySample"]
if broad_runner.gatk_type() in ["restricted", "gatk4"]:
anns += ["Coverage"]
else:
anns += ["DepthOfCoverage"]
return anns | 0.000759 |
def render(self, template_name, **kw):
'''
Given a template name and template vars.
Searches a template file based on engine set, and renders it
with corresponding engine.
Returns a string.
'''
logger.debug('Rendering template "%s"', template_name)
vars = self.globs.copy()
vars.update(kw)
resolved_name, engine = self.resolve(template_name)
return engine.render(resolved_name, **vars) | 0.006329 |
def create(cls, op, *, derivs, vals=None):
"""Instantiate the derivative by repeatedly calling
the :meth:`~QuantumExpression._diff` method of `op` and evaluating the
result at the given `vals`.
"""
# To ensure stable ordering in Expression._get_instance_key, we explicitly
# convert `derivs` and `vals` to a tuple structure with a custom sorting key.
if not isinstance(derivs, tuple):
derivs = cls._dict_to_ordered_tuple(dict(derivs))
if not (isinstance(vals, tuple) or vals is None):
vals = cls._dict_to_ordered_tuple(dict(vals))
return super().create(op, derivs=derivs, vals=vals) | 0.0059 |
def sim(
model,
params_file=True,
tmax=None,
branching=None,
nrRealizations=None,
noiseObs=None,
noiseDyn=None,
step=None,
seed=None,
writedir=None,
) -> AnnData:
"""Simulate dynamic gene expression data [Wittmann09]_ [Wolf18]_.
Sample from a stochastic differential equation model built from
literature-curated boolean gene regulatory networks, as suggested by
[Wittmann09]_. The Scanpy implementation is due to [Wolf18]_.
Parameters
----------
model : {'krumsiek11', 'toggleswitch'}
Model file in 'sim_models' directory.
params_file : `bool`, (default: `True`)
Read default params from file.
tmax : `int`, optional (default: `None`)
Number of time steps per realization of time series.
branching : `bool`, optional (default: `None`)
Only write realizations that contain new branches.
nrRealizations : int, optional (default: `None`)
Number of realizations.
noiseObs : float, optional (default: `None`)
Observatory/Measurement noise.
noiseDyn : float, optional (default: `None`)
Dynamic noise.
step : int, optional (default: `None`)
Interval for saving state of system.
seed : int, optional (default: `None`)
Seed for generation of random numbers.
writedir: str, optional (default: `None`)
Path to directory for writing output files.
Returns
-------
Annotated data matrix.
Examples
--------
See this `use case <https://github.com/theislab/scanpy_usage/tree/master/170430_krumsiek11>`__
"""
params = locals()
if params_file:
model_key = os.path.basename(model).replace('.txt', '')
from .. import sim_models
pfile_sim = (os.path.dirname(sim_models.__file__)
+ '/' + model_key + '_params.txt')
default_params = readwrite.read_params(pfile_sim)
params = utils.update_params(default_params, params)
adata = sample_dynamic_data(params)
adata.uns['iroot'] = 0
return adata | 0.000969 |
def get_transform_vector(self, resx, resy):
""" Given resolution it returns a transformation vector
:param resx: Resolution in x direction
:type resx: float or int
:param resy: Resolution in y direction
:type resy: float or int
:return: A tuple with 6 numbers representing transformation vector
:rtype: tuple(float)
"""
return self.x_min, self._parse_resolution(resx), 0, self.y_max, 0, -self._parse_resolution(resy) | 0.006135 |
def update_xml_element(self):
"""
Updates the xml element contents to matches the instance contents.
:returns: Updated XML element.
:rtype: lxml.etree._Element
"""
if not hasattr(self, 'xml_element'):
self.xml_element = etree.Element(self.name, nsmap=NSMAP)
self.xml_element.clear()
if hasattr(self, 'abstract'):
self.xml_element.set('abstract', self.abstract)
if hasattr(self, 'prohibitChanges'):
self.xml_element.set('prohibitChanges', self.prohibitChanges)
self.xml_element.set('id', self.id)
for child in self.children:
if hasattr(child, 'update_xml_element'):
child.update_xml_element()
if hasattr(child, 'xml_element'):
self.xml_element.append(child.xml_element)
return self.xml_element | 0.002237 |
def _flush(self):
"""
Flush metadata to the backing file
:return:
"""
with open(self.metadata_file, 'w') as f:
json.dump(self.metadata, f) | 0.010526 |
def to_text(data):
"""
Serializes a python object as plain text
If the data can be serialized as JSON, this method will use the to_json
method to format the data, otherwise the data is returned as is.
"""
try:
serialized_content = to_json(data, indent=4)
except Exception, e:
serialized_content = data
return serialized_content | 0.00266 |
def summarization(text, cloud=None, batch=False, api_key=None, version=1, **kwargs):
"""
Given input text, returns a `top_n` length sentence summary.
Example usage:
.. code-block:: python
>>> from indicoio import summarization
>>> summary = summarization("https://en.wikipedia.org/wiki/Yahoo!_data_breach")
>>> summary
["This information was disclosed two years later on September 22, 2016.", "[1] The data breach is one of the largest in the history of the Internet.", "Specific details of material taken include names, email addresses, telephone numbers, dates of birth, and encrypted passwords.", "[2]\\n\\nEvents [ edit ]\\n\\nYahoo alleged in its statement that the breach was carried out by \\"state-sponsored\\" hackers,[3] but the organization did not name any country.", "We had our own use for it and other buyers did as well."]
:param text: The text to be analyzed.
:type text: str or unicode
:rtype: Dictionary of party probability pairs
"""
url_params = {"batch": batch, "api_key": api_key, "version": version}
return api_handler(text, cloud=cloud, api="summarization", url_params=url_params, **kwargs) | 0.004149 |
def set_multiple(self, **kwargs):
"""Configure multiple app key/value pairs"""
quiet = False
if not kwargs:
return
cmd = ["heroku", "config:set"]
for k in sorted(kwargs):
cmd.append("{}={}".format(k, quote(str(kwargs[k]))))
if self._is_sensitive_key(k):
quiet = True
cmd.extend(["--app", self.name])
if quiet:
self._run_quiet(cmd)
else:
self._run(cmd) | 0.004073 |
def rowsAfterValue(self, value, count):
"""
Retrieve some rows at or after a given sort-column value.
@param value: Starting value in the index for the current sort column
at which to start returning results. Rows with a column value for the
current sort column which is greater than or equal to this value will
be returned.
@type value: Some type compatible with the current sort column, or
None, to specify the beginning of the data.
@param count: The maximum number of rows to return.
@type count: C{int}
@return: A list of row data, ordered by the current sort column,
beginning at C{value} and containing at most C{count} elements.
"""
if value is None:
query = self.inequalityQuery(None, count, True)
else:
pyvalue = self._toComparableValue(value)
currentSortAttribute = self.currentSortColumn.sortAttribute()
query = self.inequalityQuery(currentSortAttribute >= pyvalue, count, True)
return self.constructRows(query) | 0.002717 |
def status(self, all_instances=None, instance_ids=None, filters=None):
"""List instance info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
if instance_ids:
params['InstanceIds'] = instance_ids
if all_instances is not None:
params['IncludeAllInstances'] = all_instances
statuses = self.call("DescribeInstanceStatus",
response_data_key="InstanceStatuses",
**params)
return statuses | 0.003617 |
def scale_rows(A, v, copy=True):
"""Scale the sparse rows of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
"""
v = np.ravel(v)
M, N = A.shape
if not isspmatrix(A):
raise ValueError('scale rows needs a sparse matrix')
if M != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_rows(M, N, A.indptr, A.indices, A.data, v)
elif isspmatrix_bsr(A):
R, C = A.blocksize
bsr_scale_rows(int(M/R), int(N/C), R, C, A.indptr, A.indices,
np.ravel(A.data), v)
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_rows(M, N, A.indptr, A.indices, A.data, v)
else:
fmt = A.format
A = scale_rows(csr_matrix(A), v).asformat(fmt)
return A | 0.0005 |
def scale_sfs(s):
"""Scale a site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum.
Returns
-------
sfs_scaled : ndarray, int, shape (n_chromosomes,)
Scaled site frequency spectrum.
"""
k = np.arange(s.size)
out = s * k
return out | 0.002825 |
def decode(self, fp: TextIO) -> BioCCollection:
"""
Deserialize ``fp`` to a BioC collection object.
Args:
fp: a ``.read()``-supporting file-like object containing a BioC collection
Returns:
an object of BioCollection
"""
# utf8_parser = etree.XMLParser(encoding='utf-8')
tree = etree.parse(fp)
collection = self.__parse_collection(tree.getroot())
collection.encoding = tree.docinfo.encoding
collection.standalone = tree.docinfo.standalone
collection.version = tree.docinfo.xml_version
return collection | 0.004808 |
def exception_retry_middleware(make_request, web3, errors, retries=5):
"""
Creates middleware that retries failed HTTP requests. Is a default
middleware for HTTPProvider.
"""
def middleware(method, params):
if check_if_retry_on_failure(method):
for i in range(retries):
try:
return make_request(method, params)
except errors:
if i < retries - 1:
continue
else:
raise
else:
return make_request(method, params)
return middleware | 0.001587 |
def list_build_configuration_set_records(page_size=200, page_index=0, sort="", q=""):
"""
List all build configuration set records.
"""
data = list_build_configuration_set_records_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | 0.010417 |
def apply_extends(self, rules):
"""Run through the given rules and translate all the pending @extends
declarations into real selectors on parent rules.
The list is modified in-place and also sorted in dependency order.
"""
# Game plan: for each rule that has an @extend, add its selectors to
# every rule that matches that @extend.
# First, rig a way to find arbitrary selectors quickly. Most selectors
# revolve around elements, classes, and IDs, so parse those out and use
# them as a rough key. Ignore order and duplication for now.
key_to_selectors = defaultdict(set)
selector_to_rules = defaultdict(set)
rule_selector_order = {}
order = 0
for rule in rules:
for selector in rule.selectors:
for key in selector.lookup_key():
key_to_selectors[key].add(selector)
selector_to_rules[selector].add(rule)
rule_selector_order[rule, selector] = order
order += 1
# Now go through all the rules with an @extends and find their parent
# rules.
for rule in rules:
for selector in rule.extends_selectors:
# This is a little dirty. intersection isn't a class method.
# Don't think about it too much.
candidates = set.intersection(*(
key_to_selectors[key] for key in selector.lookup_key()))
extendable_selectors = [
candidate for candidate in candidates
if candidate.is_superset_of(selector)]
if not extendable_selectors:
# TODO implement !optional
warn_deprecated(
rule,
"Can't find any matching rules to extend {0!r} -- this "
"will be fatal in 2.0, unless !optional is specified!"
.format(selector.render()))
continue
# Armed with a set of selectors that this rule can extend, do
# some substitution and modify the appropriate parent rules.
# One tricky bit: it's possible we're extending two selectors
# that both exist in the same parent rule, in which case we
# want to extend in the order the original selectors appear in
# that rule.
known_parents = []
for extendable_selector in extendable_selectors:
parent_rules = selector_to_rules[extendable_selector]
for parent_rule in parent_rules:
if parent_rule is rule:
# Don't extend oneself
continue
known_parents.append(
(parent_rule, extendable_selector))
# This will put our parents back in their original order
known_parents.sort(key=rule_selector_order.__getitem__)
for parent_rule, extendable_selector in known_parents:
more_parent_selectors = []
for rule_selector in rule.selectors:
more_parent_selectors.extend(
extendable_selector.substitute(
selector, rule_selector))
for parent in more_parent_selectors:
# Update indices, in case later rules try to extend
# this one
for key in parent.lookup_key():
key_to_selectors[key].add(parent)
selector_to_rules[parent].add(parent_rule)
rule_selector_order[parent_rule, parent] = order
order += 1
parent_rule.ancestry = (
parent_rule.ancestry.with_more_selectors(
more_parent_selectors))
# Remove placeholder-only rules
return [rule for rule in rules if not rule.is_pure_placeholder] | 0.000718 |
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc | 0.00885 |
def contents(self):
"""Return the list of contained directory entries, loading them
if not already loaded."""
if not self.contents_read:
self.contents_read = True
base = self.path
for entry in os.listdir(self.source_path):
source_path = os.path.join(self.source_path, entry)
target_path = os.path.join(base, entry)
if os.path.isdir(source_path):
self.filesystem.add_real_directory(
source_path, self.read_only, target_path=target_path)
else:
self.filesystem.add_real_file(
source_path, self.read_only, target_path=target_path)
return self.byte_contents | 0.002591 |
def update_member_details(self, member_id, payload_member_detail, **kwargs): # noqa: E501
"""Modify member details # noqa: E501
One of the paramters below is needed to modify member information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_member_details(member_id, payload_member_detail, async=True)
>>> result = thread.get()
:param async bool
:param str member_id: (required)
:param MemberDetail payload_member_detail: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_member_details_with_http_info(member_id, payload_member_detail, **kwargs) # noqa: E501
else:
(data) = self.update_member_details_with_http_info(member_id, payload_member_detail, **kwargs) # noqa: E501
return data | 0.0018 |
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment() | 0.004087 |
def check_nonparametric_sources(fname, smodel, investigation_time):
"""
:param fname:
full path to a source model file
:param smodel:
source model object
:param investigation_time:
investigation_time to compare with in the case of
nonparametric sources
:returns:
the nonparametric sources in the model
:raises:
a ValueError if the investigation_time is different from the expected
"""
# NonParametricSeismicSources
np = [src for sg in smodel.src_groups for src in sg
if hasattr(src, 'data')]
if np and smodel.investigation_time != investigation_time:
raise ValueError(
'The source model %s contains an investigation_time '
'of %s, while the job.ini has %s' % (
fname, smodel.investigation_time, investigation_time))
return np | 0.001143 |
def set_speech_text(self, text):
"""Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
"""
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text | 0.008929 |
def solve_coupled_ecc_solution(F0, e0, gamma0, phase0, mc, q, t):
"""
Compute the solution to the coupled system of equations
from from Peters (1964) and Barack & Cutler (2004) at
a given time.
:param F0: Initial orbital frequency [Hz]
:param e0: Initial orbital eccentricity
:param gamma0: Initial angle of precession of periastron [rad]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:param t: Time at which to evaluate solution [s]
:returns: (F(t), e(t), gamma(t), phase(t))
"""
y0 = np.array([F0, e0, gamma0, phase0])
y, infodict = odeint(get_coupled_ecc_eqns, y0, t, args=(mc,q), full_output=True)
if infodict['message'] == 'Integration successful.':
ret = y
else:
ret = 0
return ret | 0.012005 |
def day_night_duration(
self,
daybreak: datetime.time = datetime.time(NORMAL_DAY_START_H),
nightfall: datetime.time = datetime.time(NORMAL_DAY_END_H)) \
-> Tuple[datetime.timedelta, datetime.timedelta]:
"""
Returns a ``(day, night)`` tuple of ``datetime.timedelta`` objects
giving the duration of this interval that falls into day and night
respectively.
"""
daytotal = datetime.timedelta()
nighttotal = datetime.timedelta()
startdate = self.start.date()
enddate = self.end.date()
ndays = (enddate - startdate).days + 1
for i in range(ndays):
date = startdate + datetime.timedelta(days=i)
component = self.component_on_date(date)
# ... an interval on a single day
day = Interval.daytime(date, daybreak, nightfall)
daypart = component.intersection(day)
if daypart is not None:
daytotal += daypart.duration()
nighttotal += component.duration() - daypart.duration()
else:
nighttotal += component.duration()
return daytotal, nighttotal | 0.001657 |
def _sysfs_attr(name, value=None, log_lvl=None, log_msg=None):
'''
Simple wrapper with logging around sysfs.attr
'''
if isinstance(name, six.string_types):
name = [name]
res = __salt__['sysfs.attr'](os.path.join(*name), value)
if not res and log_lvl is not None and log_msg is not None:
log.log(LOG[log_lvl], log_msg)
return res | 0.002688 |
def _MI_setitem(self, args, value):
'Separate __setitem__ function of MIMapping'
indices = self.indices
N = len(indices)
empty = N == 0
if empty: # init the dict
index1, key, index2, index1_last = MI_parse_args(self, args, allow_new=True)
exist_names = [index1]
item = [key]
try:
MI_check_index_name(index2)
exist_names.append(index2)
item.append(value)
except TypeError:
Nvalue, value = get_value_len(value)
if len(index2) != Nvalue:
raise ValueError(
'Number of keys (%s) based on argument %s does not match '
'number of values (%s)' % (len(index2), index2, Nvalue))
exist_names.extend(index2)
item.extend(value)
if index1_last:
exist_names = exist_names[1:] + exist_names[:1]
item = item[1:] + item[:1]
_MI_init(self, [item], exist_names)
return
index1, key, index2, item, old_value = MI_parse_args(self, args, allow_new=True)
names = force_list(indices.keys())
is_new_key = item is None
single = isinstance(index2, int)
if single:
index2_list = [index2]
value = [value]
old_value = [old_value]
else:
index2_list = index2
Nvalue, value = get_value_len(value)
if len(index2_list) != Nvalue:
raise ValueError('Number of keys (%s) based on argument %s does not match '
'number of values (%s)' % (len(index2_list), index2, Nvalue))
if is_new_key:
old_value = [None] * Nvalue # fake it
# remove duplicate in index2_list
index2_d = OrderedDict()
for e, index in enumerate(index2_list):
index2_d[index] = e # order of each unique index
if len(index2_d) < len(index2_list): # exist duplicate indices
idx = index2_d.values()
index2_list = mget_list(index2_list, idx)
value = mget_list(value, idx)
old_value = mget_list(old_value, idx)
# check duplicate values
for i, v, old_v in zip(index2_list, value, old_value):
# index2_list may contain index1; not allow duplicate value for index1 either
if v in indices[i]:
if is_new_key or v != old_v:
raise ValueExistsError(v, i, names[i])
if is_new_key:
if set(index2_list + [index1]) != set(range(N)):
raise ValueError('Indices of the new item do not match existing indices')
d = {}
d[index1] = key
# index2_list may also override index1
d.update(zip(index2_list, value))
values = [d[i] for i in range(N)] # reorder based on the indices
key = values[0]
val = values[1] if len(values) == 2 else values[1:]
super(MIMapping, self).__setitem__(key, val)
for i, v in zip(names[1:], values[1:]):
indices[i][v] = key
else: # not new key
# import pdb;pdb.set_trace()
key1 = item[0]
item2 = list(item) # copy item first
mset_list(item2, index2_list, value) # index2_list may also override index1
key2 = item2[0]
val = item2[1] if len(item2) == 2 else item2[1:]
if key1 == key2:
super(MIMapping, self).__setitem__(key1, val)
else:
od_replace_key(self, key1, key2, val)
for i, v_old, v_new in zip(names[1:], item[1:], item2[1:]):
od_replace_key(indices[i], v_old, v_new, key2) | 0.003975 |
def register(self, name, obj):
"""Registers an unique type description"""
if name in self.all:
log.debug('register: %s already existed: %s', name, obj.name)
# code.interact(local=locals())
raise DuplicateDefinitionException(
'register: %s already existed: %s' % (name, obj.name))
log.debug('register: %s ', name)
self.all[name] = obj
return obj | 0.004587 |
def retrieve(customer_id):
"""
Retrieve a customer from its id.
:param customer_id: The customer id
:type customer_id: string
:return: The customer resource
:rtype: resources.Customer
"""
http_client = HttpClient()
response, __ = http_client.get(routes.url(routes.CUSTOMER_RESOURCE, resource_id=customer_id))
return resources.Customer(**response) | 0.007009 |
def datetime(self, timezone=None):
"""Returns a datetime object.
This object retains all information, including timezones.
:param timezone = self.timezone
The timezone (in seconds west of UTC) to return the value in. By
default, the timezone used when constructing the class is used
(local one by default). To use UTC, use timezone = 0. To use the
local tz, use timezone = chronyk.LOCALTZ.
"""
if timezone is None:
timezone = self.timezone
return _dtfromtimestamp(self.__timestamp__ - timezone) | 0.003311 |
def query(self, session=None):
'''Returns a new :class:`Query` for :attr:`Manager.model`.'''
if session is None or session.router is not self.router:
session = self.session()
return session.query(self.model) | 0.008097 |
def builder(sp, authnsign=False, wsign=False, valid_until=None, cache_duration=None, contacts=None, organization=None):
"""
Builds the metadata of the SP
:param sp: The SP data
:type sp: string
:param authnsign: authnRequestsSigned attribute
:type authnsign: string
:param wsign: wantAssertionsSigned attribute
:type wsign: string
:param valid_until: Metadata's expiry date
:type valid_until: string|DateTime|Timestamp
:param cache_duration: Duration of the cache in seconds
:type cache_duration: int|string
:param contacts: Contacts info
:type contacts: dict
:param organization: Organization info
:type organization: dict
"""
if valid_until is None:
valid_until = int(time()) + OneLogin_Saml2_Metadata.TIME_VALID
if not isinstance(valid_until, basestring):
if isinstance(valid_until, datetime):
valid_until_time = valid_until.timetuple()
else:
valid_until_time = gmtime(valid_until)
valid_until_str = strftime(r'%Y-%m-%dT%H:%M:%SZ', valid_until_time)
else:
valid_until_str = valid_until
if cache_duration is None:
cache_duration = OneLogin_Saml2_Metadata.TIME_CACHED
if not isinstance(cache_duration, basestring):
cache_duration_str = 'PT%sS' % cache_duration # 'P'eriod of 'T'ime x 'S'econds
else:
cache_duration_str = cache_duration
if contacts is None:
contacts = {}
if organization is None:
organization = {}
str_attribute_consuming_service = ''
if 'attributeConsumingService' in sp and len(sp['attributeConsumingService']):
attr_cs_desc_str = ''
if "serviceDescription" in sp['attributeConsumingService']:
attr_cs_desc_str = """ <md:ServiceDescription xml:lang="en">%s</md:ServiceDescription>
""" % sp['attributeConsumingService']['serviceDescription']
requested_attribute_data = []
for req_attribs in sp['attributeConsumingService']['requestedAttributes']:
req_attr_nameformat_str = req_attr_friendlyname_str = req_attr_isrequired_str = ''
req_attr_aux_str = ' />'
if 'nameFormat' in req_attribs.keys() and req_attribs['nameFormat']:
req_attr_nameformat_str = " NameFormat=\"%s\"" % req_attribs['nameFormat']
if 'friendlyName' in req_attribs.keys() and req_attribs['friendlyName']:
req_attr_friendlyname_str = " FriendlyName=\"%s\"" % req_attribs['friendlyName']
if 'isRequired' in req_attribs.keys() and req_attribs['isRequired']:
req_attr_isrequired_str = " isRequired=\"%s\"" % 'true' if req_attribs['isRequired'] else 'false'
if 'attributeValue' in req_attribs.keys() and req_attribs['attributeValue']:
if isinstance(req_attribs['attributeValue'], basestring):
req_attribs['attributeValue'] = [req_attribs['attributeValue']]
req_attr_aux_str = ">"
for attrValue in req_attribs['attributeValue']:
req_attr_aux_str += """
<saml:AttributeValue xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion">%(attributeValue)s</saml:AttributeValue>""" % \
{
'attributeValue': attrValue
}
req_attr_aux_str += """
</md:RequestedAttribute>"""
requested_attribute = """ <md:RequestedAttribute Name="%(req_attr_name)s"%(req_attr_nameformat_str)s%(req_attr_friendlyname_str)s%(req_attr_isrequired_str)s%(req_attr_aux_str)s""" % \
{
'req_attr_name': req_attribs['name'],
'req_attr_nameformat_str': req_attr_nameformat_str,
'req_attr_friendlyname_str': req_attr_friendlyname_str,
'req_attr_isrequired_str': req_attr_isrequired_str,
'req_attr_aux_str': req_attr_aux_str
}
requested_attribute_data.append(requested_attribute)
str_attribute_consuming_service = """ <md:AttributeConsumingService index="1">
<md:ServiceName xml:lang="en">%(service_name)s</md:ServiceName>
%(attr_cs_desc)s%(requested_attribute_str)s
</md:AttributeConsumingService>
""" % \
{
'service_name': sp['attributeConsumingService']['serviceName'],
'attr_cs_desc': attr_cs_desc_str,
'requested_attribute_str': '\n'.join(requested_attribute_data)
}
sls = ''
if 'singleLogoutService' in sp and 'url' in sp['singleLogoutService']:
sls = """ <md:SingleLogoutService Binding="%(binding)s"
Location="%(location)s" />\n""" % \
{
'binding': sp['singleLogoutService']['binding'],
'location': sp['singleLogoutService']['url'],
}
str_authnsign = 'true' if authnsign else 'false'
str_wsign = 'true' if wsign else 'false'
str_organization = ''
if len(organization) > 0:
organization_names = []
organization_displaynames = []
organization_urls = []
for (lang, info) in organization.items():
organization_names.append(""" <md:OrganizationName xml:lang="%s">%s</md:OrganizationName>""" % (lang, info['name']))
organization_displaynames.append(""" <md:OrganizationDisplayName xml:lang="%s">%s</md:OrganizationDisplayName>""" % (lang, info['displayname']))
organization_urls.append(""" <md:OrganizationURL xml:lang="%s">%s</md:OrganizationURL>""" % (lang, info['url']))
org_data = '\n'.join(organization_names) + '\n' + '\n'.join(organization_displaynames) + '\n' + '\n'.join(organization_urls)
str_organization = """ <md:Organization>
%(org)s
</md:Organization>\n""" % {'org': org_data}
str_contacts = ''
if len(contacts) > 0:
contacts_info = []
for (ctype, info) in contacts.items():
contact = """ <md:ContactPerson contactType="%(type)s">
<md:GivenName>%(name)s</md:GivenName>
<md:EmailAddress>%(email)s</md:EmailAddress>
</md:ContactPerson>""" % \
{
'type': ctype,
'name': info['givenName'],
'email': info['emailAddress'],
}
contacts_info.append(contact)
str_contacts = '\n'.join(contacts_info) + '\n'
metadata = u"""<?xml version="1.0"?>
<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"
%(valid)s
%(cache)s
entityID="%(entity_id)s">
<md:SPSSODescriptor AuthnRequestsSigned="%(authnsign)s" WantAssertionsSigned="%(wsign)s" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
%(sls)s <md:NameIDFormat>%(name_id_format)s</md:NameIDFormat>
<md:AssertionConsumerService Binding="%(binding)s"
Location="%(location)s"
index="1" />
%(attribute_consuming_service)s </md:SPSSODescriptor>
%(organization)s%(contacts)s</md:EntityDescriptor>""" % \
{
'valid': ('validUntil="%s"' % valid_until_str) if valid_until_str else '',
'cache': ('cacheDuration="%s"' % cache_duration_str) if cache_duration_str else '',
'entity_id': sp['entityId'],
'authnsign': str_authnsign,
'wsign': str_wsign,
'name_id_format': sp['NameIDFormat'],
'binding': sp['assertionConsumerService']['binding'],
'location': sp['assertionConsumerService']['url'],
'sls': sls,
'organization': str_organization,
'contacts': str_contacts,
'attribute_consuming_service': str_attribute_consuming_service
}
return metadata | 0.003303 |
def _print_speed(self):
'''Print the current speed.'''
if self._bandwidth_meter.num_samples:
speed = self._bandwidth_meter.speed()
if self._human_format:
file_size_str = wpull.string.format_size(speed)
else:
file_size_str = '{:.1f} b'.format(speed * 8)
speed_str = _('{preformatted_file_size}/s').format(
preformatted_file_size=file_size_str
)
else:
speed_str = _('-- B/s')
self._print(speed_str) | 0.00363 |
def _can_be_double(x):
"""
Return if the array can be safely converted to double.
That happens when the dtype is a float with the same size of
a double or narrower, or when is an integer that can be safely
converted to double (if the roundtrip conversion works).
"""
return ((np.issubdtype(x.dtype, np.floating) and
x.dtype.itemsize <= np.dtype(float).itemsize) or
(np.issubdtype(x.dtype, np.signedinteger) and
np.can_cast(x, float))) | 0.002 |
def FindClassIdInMoMetaIgnoreCase(classId):
""" Methods whether classId is valid or not . Given class is case insensitive. """
if not classId:
return None
if classId in _ManagedObjectMeta:
return classId
lClassId = classId.lower()
for key in _ManagedObjectMeta.keys():
if (key.lower() == lClassId):
return key
return None | 0.037572 |
def parseEC2Json2List(jsontext, region):
"""
Takes a JSON and returns a list of InstanceType objects representing EC2 instance params.
:param jsontext:
:param region:
:return:
"""
currentList = json.loads(jsontext)
ec2InstanceList = []
for k, v in iteritems(currentList["products"]):
if "location" in v["attributes"] and v["attributes"]["location"] == region:
# 3 tenant types: 'Host' (always $0.00; just a template?)
# 'Dedicated' (toil does not support; these are pricier)
# 'Shared' (AWS default and what toil uses)
if "tenancy" in v["attributes"] and v["attributes"]["tenancy"] == "Shared":
if v["attributes"]["operatingSystem"] == "Linux":
# The same instance can appear with multiple "operation"
# values; "RunInstances" is normal, and
# "RunInstances:<code>" is e.g. Linux with MS SQL Server
# installed.
if v["attributes"]["operation"] == "RunInstances":
disks, disk_capacity = parseStorage(v["attributes"]["storage"])
memory = parseMemory(v["attributes"]["memory"])
instance = InstanceType(name=v["attributes"]["instanceType"],
cores=v["attributes"]["vcpu"],
memory=memory,
disks=disks,
disk_capacity=disk_capacity)
if instance not in ec2InstanceList:
ec2InstanceList.append(instance)
else:
raise RuntimeError('EC2 JSON format has likely changed. '
'Duplicate instance {} found.'.format(instance))
return ec2InstanceList | 0.004523 |
def _download_file_vizier(cat,filePath,catalogname='catalog.dat'):
'''
Stolen from Jo Bovy's gaia_tools package!
'''
sys.stdout.write('\r'+"Downloading file %s ...\r" \
% (os.path.basename(filePath)))
sys.stdout.flush()
try:
# make all intermediate directories
os.makedirs(os.path.dirname(filePath))
except OSError: pass
# Safe way of downloading
downloading= True
interrupted= False
file, tmp_savefilename= tempfile.mkstemp()
os.close(file) #Easier this way
ntries= 1
while downloading:
try:
ftp= FTP('cdsarc.u-strasbg.fr')
ftp.login('anonymous', 'test')
ftp.cwd(os.path.join('pub','cats',cat))
with open(tmp_savefilename,'wb') as savefile:
ftp.retrbinary('RETR %s' % catalogname,savefile.write)
shutil.move(tmp_savefilename,filePath)
downloading= False
if interrupted:
raise KeyboardInterrupt
except:
raise
if not downloading: #Assume KeyboardInterrupt
raise
elif ntries > _MAX_NTRIES:
raise IOError('File %s does not appear to exist on the server ...' % (os.path.basename(filePath)))
finally:
if os.path.exists(tmp_savefilename):
os.remove(tmp_savefilename)
ntries+= 1
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
return None | 0.01731 |
def getMaintenanceTypes(self):
""" Return the current list of maintenance types
"""
types = [('Preventive',safe_unicode(_('Preventive')).encode('utf-8')),
('Repair', safe_unicode(_('Repair')).encode('utf-8')),
('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8'))]
return DisplayList(types) | 0.010929 |
def pop (self, key, *args):
"""Remove lowercase key from dict and return value."""
assert isinstance(key, basestring)
return dict.pop(self, key.lower(), *args) | 0.016393 |
def getInfo(self):
"""
Returns a DevInfo instance, a named tuple with the following items:
- bustype: one of BUS_USB, BUS_HIL, BUS_BLUETOOTH or BUS_VIRTUAL
- vendor: device's vendor number
- product: device's product number
"""
devinfo = _hidraw_devinfo()
self._ioctl(_HIDIOCGRAWINFO, devinfo, True)
return DevInfo(devinfo.bustype, devinfo.vendor, devinfo.product) | 0.004587 |
def add_group_mindist(self, group_definitions, group_pairs='all', threshold=None, periodic=True):
r"""
Adds the minimum distance between groups of atoms to the feature list. If the groups of
atoms are identical to residues, use :py:obj:`add_residue_mindist <pyemma.coordinates.data.featurizer.MDFeaturizer.add_residue_mindist>`.
Parameters
----------
group_definitions : list of 1D-arrays/iterables containing the group definitions via atom indices.
If there is only one group_definition, it is assumed the minimum distance within this group (excluding the
self-distance) is wanted. In this case, :py:obj:`group_pairs` is ignored.
group_pairs : Can be of two types:
'all'
Computes minimum distances between all pairs of groups contained in the group definitions
ndarray((n, 2), dtype=int):
n x 2 array with the pairs of groups for which the minimum distances will be computed.
threshold : float, optional, default is None
distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If
left to None, the numerical value will be returned
periodic : bool, optional, default = True
If `periodic` is True and the trajectory contains unitcell
information, we will treat dihedrals that cross periodic images
using the minimum image convention.
"""
from .distances import GroupMinDistanceFeature
# Some thorough input checking and reformatting
group_definitions, group_pairs, distance_list, group_identifiers = \
_parse_groupwise_input(group_definitions, group_pairs, self.logger, 'add_group_mindist')
distance_list = self._check_indices(distance_list)
f = GroupMinDistanceFeature(self.topology, group_definitions, group_pairs, distance_list, group_identifiers, threshold, periodic)
self.__add_feature(f) | 0.006404 |
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
"""
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
is_raster = is_raster_layer(self.parent.layer)
has_classifications = get_classifications(subcategory['key'])
# Vector
if not is_raster:
return self.parent.step_kw_field
# Raster and has classifications
elif has_classifications:
return self.parent.step_kw_multi_classifications
# else go to source
return self.parent.step_kw_source | 0.002899 |
def fit(self, features, classes):
"""Constructs the MDR ensemble from the provided training data
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
classes: array-like {n_samples}
List of class labels for prediction
Returns
-------
None
"""
self.ensemble.fit(features, classes)
# Construct the feature map from the ensemble predictions
unique_rows = list(set([tuple(row) for row in features]))
for row in unique_rows:
self.feature_map[row] = self.ensemble.predict([row])[0] | 0.003086 |
def get_tree_type(tree):
"""Return the (sub)tree type: 'root', 'nucleus', 'satellite', 'text' or 'leaf'
Parameters
----------
tree : nltk.tree.ParentedTree
a tree representing a rhetorical structure (or a part of it)
"""
if is_leaf_node(tree):
return SubtreeType.leaf
tree_type = tree.label().lower().split(':')[0]
assert tree_type in SUBTREE_TYPES
return tree_type | 0.004773 |
def confirm(self, token=None):
"""Returns the status of the invoice
STATUSES: pending, completed, cancelled
"""
_token = token if token else self._response.get("token")
return self._process('checkout-invoice/confirm/' + str(_token)) | 0.007326 |
def bokehjsdir(dev=False):
""" Get the location of the bokehjs source files. If dev is True,
the files in bokehjs/build are preferred. Otherwise uses the files
in bokeh/server/static.
"""
dir1 = join(ROOT_DIR, '..', 'bokehjs', 'build')
dir2 = join(serverdir(), 'static')
if dev and isdir(dir1):
return dir1
else:
return dir2 | 0.002688 |
def populate_db(sql_path):
"""Load data in the `sql_path` file into DATABASES['default']"""
logger.info("Populating DB %s from %s", repr(DB["NAME"]), repr(sql_path))
shell(
'psql -U "{USER}" -h "{HOST}" -d "{NAME}" --file={sql_path}'.format(
sql_path=sql_path, **DB
)
) | 0.003185 |
def install(cls, uninstallable, prefix, path_items, root=None, warning=None):
"""Install an importer for modules found under ``path_items`` at the given import ``prefix``.
:param bool uninstallable: ``True`` if the installed importer should be uninstalled and any
imports it performed be un-imported when ``uninstall`` is called.
:param str prefix: The import prefix the installed importer will be responsible for.
:param path_items: The paths relative to ``root`` containing modules to expose for import under
``prefix``.
:param str root: The root path of the distribution containing the vendored code. NB: This is the
the path to the pex code, which serves as the root under which code is vendored
at ``pex/vendor/_vendored``.
:param str warning: An optional warning to emit if any imports are made through the installed
importer.
:return:
"""
root = cls._abs_root(root)
importables = tuple(cls._iter_importables(root=root, path_items=path_items, prefix=prefix))
vendor_importer = cls(root=root,
importables=importables,
uninstallable=uninstallable,
warning=warning)
sys.meta_path.insert(0, vendor_importer)
_tracer().log('Installed {}'.format(vendor_importer), V=3)
return vendor_importer | 0.006916 |
def unregistercls(self, schemacls=None, data_types=None):
"""Unregister schema class or associated data_types.
:param type schemacls: sub class of Schema.
:param list data_types: data_types to unregister.
"""
if schemacls is not None:
# clean schemas by data type
for data_type in list(self._schbytype):
_schemacls = self._schbytype[data_type]
if _schemacls is schemacls:
del self._schbytype[data_type]
if data_types is not None:
for data_type in data_types:
if data_type in self._schbytype:
del self._schbytype[data_type] | 0.002865 |
def n_way_models(mdr_instance, X, y, n=[2], feature_names=None):
"""Fits a MDR model to all n-way combinations of the features in X.
Note that this function performs an exhaustive search through all feature combinations and can be computationally expensive.
Parameters
----------
mdr_instance: object
An instance of the MDR type to use.
X: array-like (# rows, # features)
NumPy matrix containing the features
y: array-like (# rows, 1)
NumPy matrix containing the target values
n: list (default: [2])
The maximum size(s) of the MDR model to generate.
e.g., if n == [3], all 3-way models will be generated.
feature_names: list (default: None)
The corresponding names of the features in X.
If None, then the features will be named according to their order.
Returns
----------
(fitted_model, fitted_model_score, fitted_model_features): tuple of (list, list, list)
fitted_model contains the MDR model fitted to the data.
fitted_model_score contains the training scores corresponding to the fitted MDR model.
fitted_model_features contains a list of the names of the features that were used in the corresponding model.
"""
if feature_names is None:
feature_names = list(range(X.shape[1]))
for cur_n in n:
for features in itertools.combinations(range(X.shape[1]), cur_n):
mdr_model = copy.deepcopy(mdr_instance)
mdr_model.fit(X[:, features], y)
mdr_model_score = mdr_model.score(X[:, features], y)
model_features = [feature_names[feature] for feature in features]
yield mdr_model, mdr_model_score, model_features | 0.002897 |
def get_users(self, limit=100, offset=0):
"""
Get all users from your current team
"""
url = self.TEAM_USERS_URL + "?limit=%s&offset=%s" % (limit, offset)
connection = Connection(self.token)
connection.set_url(self.production, url)
return connection.get_request() | 0.006231 |
def render(self, template, fail='## :todo: add {template}'):
"""
Returns the rendered value for the inputted template name.
:param template | <str>
"""
try:
return self._templates[template].render(scaffold=self)
except KeyError:
return fail.format(template=template) | 0.008523 |
def run_shex_manifest(manifest_url, index=0, debug=False):
"""
:param manifest: A url to a manifest that contains all the ingredients to run a shex conformance test
:param index: Manifests are stored in lists. This method only handles one manifest, hence by default the first
manifest is going to be selected
:return:
"""
manifest = json.loads(manifest_url, debug=False)
manifest_results = dict()
for case in manifest[index]:
if case.data.startswith("Endpoint:"):
sparql_endpoint = case.data.replace("Endpoint: ", "")
schema = requests.get(case.schemaURL).text
shex = ShExC(schema).schema
evaluator = ShExEvaluator(schema=shex, debug=debug)
sparql_query = case.queryMap.replace("SPARQL '''", "").replace("'''@START", "")
df = WDItemEngine.execute_sparql_query(sparql_query)
for row in df["results"]["bindings"]:
wdid = row["item"]["value"]
if wdid not in manifest_results.keys():
manifest_results[wdid] = dict()
slurpeddata = SlurpyGraph(sparql_endpoint)
results = evaluator.evaluate(rdf=slurpeddata, focus=wdid, debug=debug)
for result in results:
if result.result:
manifest_results[wdid]["status"] = "CONFORMS"
else:
manifest_results[wdid]["status"] = "DOES NOT CONFORM"
manifest_results[wdid]["debug"] = result.reason
return manifest_results | 0.00467 |
def _getDocstringLineno(self, node_type, node):
"""
Get line number of the docstring.
@param node_type: type of node_type
@param node: node of currently checking
@return: line number
"""
docstringStriped = node.as_string().strip()
linenoDocstring = (node.lineno + docstringStriped
.count("\n", 0, docstringStriped.index('"""')))
if node_type == "module":
# Module starts from line 0.
linenoDocstring += 1
return linenoDocstring | 0.003578 |
def info(self, msg, *args, **kwargs) -> Task: # type: ignore
"""
Log msg with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
await logger.info("Houston, we have an interesting problem", exc_info=1)
"""
return self._make_log_task(logging.INFO, msg, args, **kwargs) | 0.007874 |
def deprecation(self, message, *args, **kws):
"""Show a deprecation warning."""
self._log(DEPRECATION, message, args, **kws) | 0.007576 |
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). These arguments
are interpreted as the query part of the URL. The order of keyword
arguments is not preserved in the request, but the keywords and
their arguments will be URL encoded.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
if kwargs:
# url is already a UrlEncoded. We have to manually declare
# the query to be encoded or it will get automatically URL
# encoded by being appended to url.
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
message = {
'method': "DELETE",
'headers': headers,
}
return self.request(url, message) | 0.003909 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.