text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def stop(self):
"""
Stop a node
"""
try:
yield from self.post("/stop", timeout=240, dont_connect=True)
# We don't care if a node is down at this step
except (ComputeError, aiohttp.ClientError, aiohttp.web.HTTPError):
pass
except asyncio.TimeoutError:
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when stopping {}".format(self._name)) | 0.006961 |
def get_single_file(self, pool, source, target):
'''Download a single file or a directory by adding a task into queue'''
if source[-1] == PATH_SEP:
if self.opt.recursive:
basepath = S3URL(source).path
for f in (f for f in self.s3walk(source) if not f['is_dir']):
pool.download(f['name'], os.path.join(target, os.path.relpath(S3URL(f['name']).path, basepath)))
else:
message('omitting directory "%s".' % source)
else:
pool.download(source, target) | 0.011765 |
async def collect(self):
"""
Create a `self` iterator and collect it into a `TotalList`
(a normal list with a `.total` attribute).
"""
result = helpers.TotalList()
async for message in self:
result.append(message)
result.total = self.total
return result | 0.006061 |
def get_artifact_nexus3(suppress_status=False, nexus_base_url=sample_nexus_base_url, repository=None,
timeout_sec=600, overwrite=True, username=None, password=None, **kwargs):
"""Retrieves an artifact from the Nexus 3 ReST API
:param suppress_status: (bool) Set to True to suppress printing download status
:param nexus_base_url: (str) Base URL of the Nexus Server (domain name portion only, see sample)
:param repository: (str) Repository to query (e.g. snapshots) if not provided, will attempt to determine
:param timeout_sec: (int) Number of seconds to wait before
timing out the artifact retrieval.
:param overwrite: (bool) True overwrites the file on the local system if it exists,
False does will log an INFO message and exist if the file already exists
:param username: (str) username for basic auth
:param password: (str) password for basic auth
:param kwargs:
group_id: (str) The artifact's Group ID in Nexus
artifact_id: (str) The artifact's Artifact ID in Nexus
packaging: (str) The artifact's packaging (e.g. war, zip)
version: (str) Version of the artifact to retrieve (e.g.
LATEST, 4.8.4, 4.9.0-SNAPSHOT)
destination_dir: (str) Full path to the destination directory
classifier: (str) The artifact's classifier (e.g. bin)
:return: None
:raises: TypeError, ValueError, OSError, RuntimeError
"""
log = logging.getLogger(mod_logger + '.get_artifact_nexus3')
required_args = ['group_id', 'artifact_id', 'packaging', 'version', 'destination_dir']
if not isinstance(overwrite, bool):
msg = 'overwrite arg must be a string, found: {t}'.format(t=overwrite.__class__.__name__)
log.error(msg)
raise TypeError(msg)
if not isinstance(nexus_base_url, basestring):
msg = 'nexus_url arg must be a string, found: {t}'.format(t=nexus_base_url.__class__.__name__)
log.error(msg)
raise TypeError(msg)
log.debug('Using Nexus Server URL: {u}'.format(u=nexus_base_url))
# Ensure the required args are supplied, and that they are all strings
for required_arg in required_args:
try:
assert required_arg in kwargs
except AssertionError:
_, ex, trace = sys.exc_info()
msg = 'A required arg was not supplied. Required args are: group_id, artifact_id, classifier, version, ' \
'packaging and destination_dir\n{e}'.format(e=str(ex))
log.error(msg)
raise ValueError(msg)
if not isinstance(kwargs[required_arg], basestring):
msg = 'Arg {a} should be a string'.format(a=required_arg)
log.error(msg)
raise TypeError(msg)
# Set variables to be used in the REST call
group_id = kwargs['group_id']
artifact_id = kwargs['artifact_id']
version = kwargs['version']
packaging = kwargs['packaging']
destination_dir = kwargs['destination_dir']
# Ensure the destination directory exists
if not os.path.isdir(destination_dir):
log.debug('Specified destination_dir not found on file system, creating: {d}'.format(d=destination_dir))
try:
mkdir_p(destination_dir)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to create destination directory: {d}\n{e}'.format(d=destination_dir, e=str(ex))
raise OSError(msg)
# Determine the auth based on username and password
basic_auth = None
if (username is not None) and (password is not None):
log.info('Using the provided username/password for basic authentication...')
basic_auth = HTTPBasicAuth(username, password)
# Set the classifier if it was provided
classifier = None
if 'classifier' in kwargs:
if isinstance(kwargs['classifier'], basestring):
classifier = kwargs['classifier']
log.debug('Using classifier: {c}'.format(c=classifier))
else:
log.warn('Arg classifier provided but it was not an instance of basestring')
# Determine the repository (snapshots or releases)
if not repository:
if 'SNAPSHOT' in version:
repository = 'snapshots'
else:
repository = 'releases'
log.debug('Using repository: {r}'.format(r=repository))
# Compute the query URL
group_id_url = group_id.replace('.', '/')
# Get the Maven metadata
query_url_version = nexus_base_url + '/repository/{r}/{g}/{a}/{v}'.format(
r=repository, g=group_id_url, a=artifact_id, v=version
)
if 'snapshot' in repository.lower():
# Query nexus for metadata to determine the proper file name
query_url_metadata = query_url_version + '/maven-metadata.xml'
log.info('Attempting to query Nexus for the snapshot metadata using URL: {u}'.format(u=query_url_metadata))
try:
nexus_response = query_nexus(query_url=query_url_metadata, timeout_sec=timeout_sec, basic_auth=basic_auth)
except RuntimeError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem querying Nexus URL: {u}\n{e}'.format(
n=ex.__class__.__name__, u=query_url_metadata, e=str(ex))
log.error(msg)
raise RuntimeError, msg, trace
if nexus_response.status_code != 200:
raise RuntimeError('Bad response from Nexus metadata URL [{c}]: {u}'.format(
c=nexus_response.status_code, u=query_url_metadata))
# Parse the XML output
root = ET.fromstring(nexus_response.text)
log.info('Attempting to find the value of the file name...')
try:
value = root.find('versioning').find('snapshotVersions').find('snapshotVersion').find('value')
except AttributeError:
_, ex, trace = sys.exc_info()
msg = 'AttributeError: Unable to find versioning/snapshotVersions/snapshotVersion/value\n{e}'.format(
e=str(ex))
raise ValueError, msg, trace
# Ensure a value was found
if value is None:
raise ValueError('Unable to determine the value of the snapshot version')
# Get the text version
text_version = value.text
log.info('Found version value: {t}'.format(t=text_version))
# Determine the artifact file name
artifact_file_name = '{a}-{t}'.format(
a=artifact_id,
t=text_version
)
else:
# Construct the file name for releases (e.g. cons3rt-backend-install-18.14.0-package-otto.zip)
artifact_file_name = '{a}-{v}'.format(
a=artifact_id,
v=version
)
# Add classifier if provided and packaging
if classifier:
artifact_file_name += '-{c}'.format(c=classifier)
artifact_file_name += '.{p}'.format(p=packaging)
log.info('Using artifact file name: {n}'.format(n=artifact_file_name))
# Determine the full query URL
query_url = query_url_version + '/{n}'.format(n=artifact_file_name)
log.info('Using Nexus query URL: {u}'.format(u=query_url))
# Set up for download attempts
retry_sec = 5
max_retries = 6
try_num = 1
download_success = False
dl_err = None
failed_attempt = False
# Start the retry loop
while try_num <= max_retries:
# Break the loop if the download was successful
if download_success:
break
log.info('Attempting to query Nexus for the Artifact using URL: {u}'.format(u=query_url))
try:
nexus_response = query_nexus(query_url=query_url, timeout_sec=timeout_sec, basic_auth=basic_auth)
except RuntimeError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem querying Nexus URL: {u}\n{e}'.format(
n=ex.__class__.__name__, u=query_url, e=str(ex))
log.error(msg)
raise RuntimeError, msg, trace
# Attempt to get the content-length
file_size = 0
try:
file_size = int(nexus_response.headers['Content-Length'])
except(KeyError, ValueError):
log.debug('Could not get Content-Length, suppressing download status...')
suppress_status = True
else:
log.info('Artifact file size: {s}'.format(s=file_size))
# Determine the full download file path
file_name = nexus_response.url.split('/')[-1]
download_file = os.path.join(destination_dir, file_name)
# Attempt to download the content from the response
log.info('Attempting to download content of size {s} from Nexus to file: {d}'.format(
s=file_size, d=download_file))
# Remove the existing file if it exists, or exit if the file exists, overwrite is set,
# and there was not a previous failed attempted download
if os.path.isfile(download_file) and overwrite:
log.debug('File already exists, removing: {d}'.format(d=download_file))
os.remove(download_file)
elif os.path.isfile(download_file) and not overwrite and not failed_attempt:
log.info('File already downloaded, and overwrite is set to False. The Artifact will '
'not be retrieved from Nexus: {f}. To overwrite the existing downloaded file, '
'set overwrite=True'.format(f=download_file))
return
# Attempt to download content
log.debug('Attempt # {n} of {m} to download content from the Nexus response'.format(n=try_num, m=max_retries))
chunk_size = 1024
file_size_dl = 0
try:
with open(download_file, 'wb') as f:
for chunk in nexus_response.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
file_size_dl += len(chunk)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status += chr(8)*(len(status)+1)
if not suppress_status:
print(status),
except(requests.exceptions.ConnectionError, requests.exceptions.RequestException, OSError):
_, ex, trace = sys.exc_info()
dl_err = '{n}: There was an error reading content from the Nexus response. Downloaded ' \
'size: {s}.\n{e}'.format(n=ex.__class__.__name__, s=file_size_dl, t=retry_sec, e=str(ex))
failed_attempt = True
log.warn(dl_err)
if try_num < max_retries:
log.info('Retrying download in {t} sec...'.format(t=retry_sec))
time.sleep(retry_sec)
else:
log.info('File download of size {s} completed without error: {f}'.format(s=file_size_dl, f=download_file))
failed_attempt = False
download_success = True
try_num += 1
# Raise an exception if the download did not complete successfully
if not download_success:
msg = 'Unable to download file content from Nexus after {n} attempts'.format(n=max_retries)
if dl_err:
msg += '\n{m}'.format(m=dl_err)
log.error(msg)
raise RuntimeError(msg) | 0.003711 |
def delete(self, obj, matches=None, mt=None, lt=None, eq=None):
'''
Delete object from the database.
:param obj:
:param matches:
:param mt:
:param lt:
:param eq:
:return:
'''
deleted = False
objects = list()
for _obj in self.get(obj):
if not self.__criteria(_obj, matches=matches, mt=mt, lt=lt, eq=eq):
objects.append(_obj)
else:
deleted = True
self.flush(obj._TABLE)
self.create_table_from_object(obj())
for _obj in objects:
self.store(_obj)
return deleted | 0.003049 |
def _get_dataset_index(catalog, dataset_identifier, dataset_title,
logger=None):
"""Devuelve el índice de un dataset en el catálogo en función de su
identificador"""
logger = logger or pydj_logger
matching_datasets = []
for idx, dataset in enumerate(catalog["catalog_dataset"]):
if dataset["dataset_identifier"] == dataset_identifier:
if dataset["dataset_title"] == dataset_title:
matching_datasets.append(idx)
else:
logger.warning(
ce.DatasetUnexpectedTitle(
dataset_identifier,
dataset["dataset_title"],
dataset_title
)
)
# Debe haber exactamente un dataset con el identificador provisto.
no_dsets_msg = "No hay ningun dataset con el identifier {}".format(
dataset_identifier)
many_dsets_msg = "Hay mas de un dataset con el identifier {}: {}".format(
dataset_identifier, matching_datasets)
if len(matching_datasets) == 0:
logger.error(no_dsets_msg)
return None
elif len(matching_datasets) > 1:
logger.error(many_dsets_msg)
return None
else:
return matching_datasets[0] | 0.000777 |
def ready_argument_list(self, arguments):
"""ready argument list to be passed to the C function
:param arguments: List of arguments to be passed to the C function.
The order should match the argument list on the C function.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to the C function.
:rtype: list(Argument)
"""
ctype_args = [ None for _ in arguments]
for i, arg in enumerate(arguments):
if not isinstance(arg, (numpy.ndarray, numpy.number)):
raise TypeError("Argument is not numpy ndarray or numpy scalar %s" % type(arg))
dtype_str = str(arg.dtype)
data = arg.copy()
if isinstance(arg, numpy.ndarray):
if dtype_str in dtype_map.keys():
# In numpy <= 1.15, ndarray.ctypes.data_as does not itself keep a reference
# to its underlying array, so we need to store a reference to arg.copy()
# in the Argument object manually to avoid it being deleted.
# (This changed in numpy > 1.15.)
data_ctypes = data.ctypes.data_as(C.POINTER(dtype_map[dtype_str]))
else:
raise TypeError("unknown dtype for ndarray")
elif isinstance(arg, numpy.generic):
data_ctypes = dtype_map[dtype_str](arg)
ctype_args[i] = Argument(numpy=data, ctypes=data_ctypes)
return ctype_args | 0.005525 |
def angular_distance(ra1, dec1, ra2, dec2):
"""
Returns the angular distance between two points, two sets of points, or a set of points and one point.
:param ra1: array or float, longitude of first point(s)
:param dec1: array or float, latitude of first point(s)
:param ra2: array or float, longitude of second point(s)
:param dec2: array or float, latitude of second point(s)
:return: angular distance(s) in degrees
"""
# Vincenty formula, slower than the Haversine formula in some cases, but stable also at antipodes
lon1 = np.deg2rad(ra1)
lat1 = np.deg2rad(dec1)
lon2 = np.deg2rad(ra2)
lat2 = np.deg2rad(dec2)
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.rad2deg(np.arctan2(np.sqrt(num1 ** 2 + num2 ** 2), denominator)) | 0.002876 |
def get_link_page_text(link_page):
"""
Construct the dialog box to display a list of links to the user.
"""
text = ''
for i, link in enumerate(link_page):
capped_link_text = (link['text'] if len(link['text']) <= 20
else link['text'][:19] + '…')
text += '[{}] [{}]({})\n'.format(i, capped_link_text, link['href'])
return text | 0.004673 |
def find_repo_by_path(i):
"""
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
"""
p=i['path']
if p!='': p=os.path.normpath(p)
found=False
if p==work['dir_default_repo']:
uoa=cfg['repo_name_default']
uid=cfg['repo_uid_default']
alias=uoa
found=True
elif p==work['dir_local_repo']:
uoa=cfg['repo_name_local']
uid=cfg['repo_uid_local']
alias=uoa
found=True
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
for q in cache_repo_info:
qq=cache_repo_info[q]
if p==qq['dict'].get('path',''):
uoa=qq['data_uoa']
uid=qq['data_uid']
alias=uid
if not is_uid(uoa): alias=uoa
found=True
break
if not found:
return {'return':16, 'error': 'repository not found in this path'}
return {'return':0, 'repo_uoa': uoa, 'repo_uid': uid, 'repo_alias':alias} | 0.037722 |
def rotate(self, clat, clon, coord_degrees=True, dj_matrix=None,
nwinrot=None):
""""
Rotate the spherical-cap windows centered on the North pole to clat
and clon, and save the spherical harmonic coefficients in the
attribute coeffs.
Usage
-----
x.rotate(clat, clon [coord_degrees, dj_matrix, nwinrot])
Parameters
----------
clat, clon : float
Latitude and longitude of the center of the rotated spherical-cap
localization windows (default in degrees).
coord_degrees : bool, optional, default = True
True if clat and clon are in degrees.
dj_matrix : ndarray, optional, default = None
The djpi2 rotation matrix computed by a call to djpi2.
nwinrot : int, optional, default = (lwin+1)**2
The number of best concentrated windows to rotate, where lwin is
the spherical harmonic bandwidth of the localization windows.
Description
-----------
This function will take the spherical-cap localization windows
centered at the North pole (and saved in the attributes tapers and
orders), rotate each function to the coordinate (clat, clon), and save
the spherical harmonic coefficients in the attribute coeffs. Each
column of coeffs contains a single window, and the coefficients are
ordered according to the convention in SHCilmToVector.
"""
self.coeffs = _np.zeros(((self.lwin + 1)**2, self.nwin))
self.clat = clat
self.clon = clon
self.coord_degrees = coord_degrees
if nwinrot is not None:
self.nwinrot = nwinrot
else:
self.nwinrot = self.nwin
if self.coord_degrees:
angles = _np.radians(_np.array([0., -(90. - clat), -clon]))
else:
angles = _np.array([0., -(_np.pi/2. - clat), -clon])
if dj_matrix is None:
if self.dj_matrix is None:
self.dj_matrix = _shtools.djpi2(self.lwin + 1)
dj_matrix = self.dj_matrix
else:
dj_matrix = self.dj_matrix
if ((coord_degrees is True and clat == 90. and clon == 0.) or
(coord_degrees is False and clat == _np.pi/2. and clon == 0.)):
for i in range(self.nwinrot):
coeffs = self._taper2coeffs(i)
self.coeffs[:, i] = _shtools.SHCilmToVector(coeffs)
else:
coeffs = _shtools.SHRotateTapers(self.tapers, self.orders,
self.nwinrot, angles, dj_matrix)
self.coeffs = coeffs | 0.001112 |
def Analyze(self, hashes):
"""Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set.
"""
if not self._api_key:
raise RuntimeError('No API key specified for VirusTotal lookup.')
hash_analyses = []
json_response = self._QueryHashes(hashes) or []
# VirusTotal returns a dictionary when a single hash is queried
# and a list when multiple hashes are queried.
if isinstance(json_response, dict):
json_response = [json_response]
for result in json_response:
resource = result['resource']
hash_analysis = interface.HashAnalysis(resource, result)
hash_analyses.append(hash_analysis)
return hash_analyses | 0.006237 |
def select(self, df, args, inplace=False):
"""
After joining, selects a subset of arguments
df: the result of a call to self.join(left)
args: a collcetion of arguments to select, as accepted by drain.util.list_expand:
- a tuple corresponding to concat_args,
e.g. [('District', '12h'), ('Distict', '24h')]
- a dict to be exanded into the above,
e.g. {'District': ['12h', '24h']}
"""
if self.prefix is None:
raise ValueError('Cannot do selection on an Aggregation without a prefix')
# run list_expand and ensure all args to tuples for validation
args = [tuple(i) for i in util.list_expand(args)]
# check that the args passed are valid
for a in args:
has_arg = False
for argument in self.arguments:
if a == tuple(argument[k] for k in self.concat_args):
has_arg = True
break
if not has_arg:
raise ValueError('Invalid argument for selection: %s' % str(a))
df = data.select_features(
df, exclude=[self.prefix + '_.*'],
include=map(lambda a: self.args_prefix(a) + '.*', args), inplace=inplace)
return df | 0.003834 |
def AddFileReadData(self, path, file_data_path):
"""Adds a "regular" file to the fake file system.
Args:
path (str): path of the file within the fake file system.
file_data_path (str): path of the file to read the file data from.
Raises:
ValueError: if the path is already set.
"""
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
with open(file_data_path, 'rb') as file_object:
file_data = file_object.read()
self._AddParentDirectories(path)
self.file_system.AddFileEntry(path, file_data=file_data) | 0.004815 |
def download_from_link(self, link, **kwargs):
"""
Download torrent using a link.
:param link: URL Link or list of.
:param savepath: Path to download the torrent.
:param category: Label or Category of the torrent(s).
:return: Empty JSON data.
"""
# old:new format
old_arg_map = {'save_path': 'savepath'} # , 'label': 'category'}
# convert old option names to new option names
options = kwargs.copy()
for old_arg, new_arg in old_arg_map.items():
if options.get(old_arg) and not options.get(new_arg):
options[new_arg] = options[old_arg]
if type(link) is list:
options['urls'] = "\n".join(link)
else:
options['urls'] = link
# workaround to send multipart/formdata request
# http://stackoverflow.com/a/23131823/4726598
dummy_file = {'_dummy': (None, '_dummy')}
return self._post('command/download', data=options, files=dummy_file) | 0.001946 |
def _get_patches(installed_only=False):
'''
List all known patches in repos.
'''
patches = {}
cmd = [_yum(), '--quiet', 'updateinfo', 'list', 'all']
ret = __salt__['cmd.run_stdout'](
cmd,
python_shell=False
)
for line in salt.utils.itertools.split(ret, os.linesep):
inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)',
line).groups()
if inst != 'i' and installed_only:
continue
patches[advisory_id] = {
'installed': True if inst == 'i' else False,
'summary': pkg
}
return patches | 0.002963 |
def send(self, data, room=None, include_self=True, namespace=None,
callback=None):
"""Send a message to one or more connected clients."""
return self.socketio.send(data, room=room, include_self=include_self,
namespace=namespace or self.namespace,
callback=callback) | 0.008287 |
def absent(name, provider):
'''
Ensure the named Rackspace queue is deleted.
name
Name of the Rackspace queue.
provider
Salt Cloud provider
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
is_present = list(__salt__['cloud.action']('queues_exists', provider=provider, name=name)[provider].values())[0]
if is_present:
if __opts__['test']:
ret['comment'] = 'Rackspace queue {0} is set to be removed.'.format(
name)
ret['result'] = None
return ret
queue = __salt__['cloud.action']('queues_show', provider=provider, name=name)
deleted = __salt__['cloud.action']('queues_delete', provider=provider, name=name)
if deleted:
ret['changes']['old'] = queue
ret['changes']['new'] = {}
else:
ret['result'] = False
ret['comment'] = 'Failed to delete {0} Rackspace queue.'.format(name)
else:
ret['comment'] = '{0} does not exist.'.format(name)
return ret | 0.005602 |
def sha_hash_file(filename):
""" Compute the SHA1 hash of filename """
hash_sha = hashlib.sha1()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b""):
hash_sha.update(chunk)
return hash_sha.hexdigest() | 0.003731 |
def comm_all_best_paths(self, peer):
"""Shares/communicates current best paths with this peers.
Can be used to send initial updates after we have established session
with `peer`.
"""
LOG.debug('Communicating current best path for all afi/safi except'
' 1/132')
# We will enqueue best path from all global destination.
for route_family, table in self._table_manager.iter:
if route_family == RF_RTC_UC:
continue
if peer.is_mbgp_cap_valid(route_family):
for dest in table.values():
if dest.best_path:
peer.communicate_path(dest.best_path) | 0.002817 |
def _asciify_list(data):
""" Ascii-fies list values """
ret = []
for item in data:
if isinstance(item, unicode):
item = _remove_accents(item)
item = item.encode('utf-8')
elif isinstance(item, list):
item = _asciify_list(item)
elif isinstance(item, dict):
item = _asciify_dict(item)
ret.append(item)
return ret | 0.002469 |
def get_extension(self, index):
"""
Get a specific extension of the certificate by index.
Extensions on a certificate are kept in order. The index
parameter selects which extension will be returned.
:param int index: The index of the extension to retrieve.
:return: The extension at the specified index.
:rtype: :py:class:`X509Extension`
:raises IndexError: If the extension index was out of bounds.
.. versionadded:: 0.12
"""
ext = X509Extension.__new__(X509Extension)
ext._extension = _lib.X509_get_ext(self._x509, index)
if ext._extension == _ffi.NULL:
raise IndexError("extension index out of bounds")
extension = _lib.X509_EXTENSION_dup(ext._extension)
ext._extension = _ffi.gc(extension, _lib.X509_EXTENSION_free)
return ext | 0.002288 |
def join_channel(self, partner_address, partner_deposit):
"""Will be called, when we were selected as channel partner by another
node. It will fund the channel with up to the partners deposit, but
not more than remaining funds or the initial funding per channel.
If the connection manager has no funds, this is a noop.
"""
# Consider this race condition:
#
# - Partner opens the channel and starts the deposit.
# - This nodes learns about the new channel, starts ConnectionManager's
# retry_connect, which will start a deposit for this half of the
# channel.
# - This node learns about the partner's deposit before its own.
# join_channel is called which will try to deposit again.
#
# To fix this race, first the node must wait for the pending operations
# to finish, because in them could be a deposit, and then deposit must
# be called only if the channel is still not funded.
token_network_proxy = self.raiden.chain.token_network(self.token_network_identifier)
# Wait for any pending operation in the channel to complete, before
# deciding on the deposit
with self.lock, token_network_proxy.channel_operations_lock[partner_address]:
channel_state = views.get_channelstate_for(
views.state_from_raiden(self.raiden),
self.token_network_identifier,
self.token_address,
partner_address,
)
if not channel_state:
return
joining_funds = min(
partner_deposit,
self._funds_remaining,
self._initial_funding_per_partner,
)
if joining_funds <= 0 or self._leaving_state:
return
if joining_funds <= channel_state.our_state.contract_balance:
return
try:
self.api.set_total_channel_deposit(
self.registry_address,
self.token_address,
partner_address,
joining_funds,
)
except RaidenRecoverableError:
log.info(
'Channel not in opened state',
node=pex(self.raiden.address),
)
except InvalidDBData:
raise
except RaidenUnrecoverableError as e:
should_crash = (
self.raiden.config['environment_type'] != Environment.PRODUCTION or
self.raiden.config['unrecoverable_error_should_crash']
)
if should_crash:
raise
log.critical(
str(e),
node=pex(self.raiden.address),
)
else:
log.info(
'Joined a channel',
node=pex(self.raiden.address),
partner=pex(partner_address),
funds=joining_funds,
) | 0.00159 |
def disable_host_flap_detection(self, host):
"""Disable flap detection for a host
Format of the line that triggers function call::
DISABLE_HOST_FLAP_DETECTION;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if host.flap_detection_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
host.flap_detection_enabled = False
# Maybe the host was flapping, if so, stop flapping
if host.is_flapping:
host.is_flapping = False
host.flapping_changes = []
self.send_an_element(host.get_update_status_brok()) | 0.004076 |
def soundex(self, name, length=8):
'''Calculate soundex of given string
This function calculates soundex for Indian language string
as well as English string.
This function is exposed as service method for JSONRPC in
SILPA framework.
:param name: String whose Soundex value to be calculated
:param length: Length of final Soundex string, if soundex
caculated is more than this it will be
truncated to length.
:return: Soundex string of `name'
'''
sndx = []
fc = name[0]
# translate alpha chars in name to soundex digits
for c in name[1:].lower():
d = str(self.soundexCode(c))
# remove all 0s from the soundex code
if d == '0':
continue
# duplicate consecutive soundex digits are skipped
if len(sndx) == 0:
sndx.append(d)
elif d != sndx[-1]:
sndx.append(d)
# append first character to result
sndx.insert(0, fc)
if get_language(name[0]) == 'en_US':
# Don't padd
return ''.join(sndx)
if len(sndx) < length:
sndx.extend(repeat('0', length))
return ''.join(sndx[:length])
return ''.join(sndx[:length]) | 0.001438 |
def __status(self, job_directory, proxy_status):
""" Use proxied manager's status to compute the real
(stateful) status of job.
"""
if proxy_status == status.COMPLETE:
if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED):
job_status = status.POSTPROCESSING
else:
job_status = status.COMPLETE
else:
job_status = proxy_status
return job_status | 0.004329 |
def get_queryset(self):
"""The queryset is over-ridden to show only plug events in which the strain matches the breeding strain."""
self.strain = get_object_or_404(Strain, Strain_slug__iexact=self.kwargs['slug'])
return PlugEvents.objects.filter(Breeding__Strain=self.strain) | 0.013378 |
def nest(self, node, cls=None):
"""
Create a new nested scope that is within this instance, binding
the provided node to it.
"""
if cls is None:
cls = type(self)
nested_scope = cls(node, self)
self.children.append(nested_scope)
return nested_scope | 0.006154 |
def close(self):
"""
Finalize the GDSII stream library.
"""
self._outfile.write(struct.pack('>2h', 4, 0x0400))
if self._close:
self._outfile.close() | 0.01 |
def succeed(self, instance, action):
"""Returns if the task for the instance took place successfully
"""
uid = api.get_uid(instance)
return self.objects.get(uid, {}).get(action, {}).get('success', False) | 0.008511 |
def set_title(self, title):
"""Changes the <meta> title tag."""
self.head.title.attr(content=title)
return self | 0.014815 |
def load_variants(self, patients=None, filter_fn=None, **kwargs):
"""Load a dictionary of patient_id to varcode.VariantCollection
Parameters
----------
patients : str, optional
Filter to a subset of patients
filter_fn : function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Overrides default self.filter_fn. `None` passes through to self.filter_fn.
Returns
-------
merged_variants
Dictionary of patient_id to VariantCollection
"""
filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter)
filter_fn_name = self._get_function_name(filter_fn)
logger.debug("loading variants with filter_fn: {}".format(filter_fn_name))
patient_variants = {}
for patient in self.iter_patients(patients):
variants = self._load_single_patient_variants(patient, filter_fn, **kwargs)
if variants is not None:
patient_variants[patient.id] = variants
return patient_variants | 0.006244 |
def linkify_hd_by_h(self, hosts):
"""Replace dependent_host_name and host_name
in host dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for hostdep in self:
try:
h_name = hostdep.host_name
dh_name = hostdep.dependent_host_name
host = hosts.find_by_name(h_name)
if host is None:
err = "Error: the host dependency got a bad host_name definition '%s'" % h_name
hostdep.add_error(err)
dephost = hosts.find_by_name(dh_name)
if dephost is None:
err = "Error: the host dependency got " \
"a bad dependent_host_name definition '%s'" % dh_name
hostdep.add_error(err)
if host:
hostdep.host_name = host.uuid
if dephost:
hostdep.dependent_host_name = dephost.uuid
except AttributeError as exp:
err = "Error: the host dependency miss a property '%s'" % exp
hostdep.add_error(err) | 0.002402 |
def create_parser():
""" Create the language parser """
select = create_select()
scan = create_scan()
delete = create_delete()
update = create_update()
insert = create_insert()
create = create_create()
drop = create_drop()
alter = create_alter()
dump = create_dump()
load = create_load()
base = (
select | scan | delete | update | insert | create | drop | alter | dump | load
)
explain = upkey("explain").setResultsName("action") + Group(
select | scan | delete | update | insert | create | drop | alter
)
analyze = upkey("analyze").setResultsName("action") + Group(
select | scan | delete | update | insert
)
dql = explain | analyze | base
dql.ignore("--" + restOfLine)
return dql | 0.002548 |
def parse_mime_type(mime_type):
"""Carves up a mime-type and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/xhtml;q=0.5' would get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict([tuple([s.strip() for s in param.split("=")]) for param in parts[1:]])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a single "*"
# Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split("/")
return (type.strip(), subtype.strip(), params) | 0.006739 |
def connect(self, client_id: str = None, client_secret: str = None) -> dict:
"""Authenticate application and get token bearer.
Isogeo API uses oAuth 2.0 protocol (https://tools.ietf.org/html/rfc6749)
see: http://help.isogeo.com/api/fr/authentication/groupsapps.html
:param str client_id: application oAuth2 identifier
:param str client_secret: application oAuth2 secret
"""
# instanciated or direct call
if not client_id and not client_secret:
client_id = self.client_id
client_secret = self.client_secret
else:
pass
# Basic Authentication header in Base64 (https://en.wikipedia.org/wiki/Base64)
# see: http://tools.ietf.org/html/rfc2617#section-2
# using Client Credentials Grant method
# see: http://tools.ietf.org/html/rfc6749#section-4.4
payload = {"grant_type": "client_credentials"}
head = {"user-agent": self.app_name}
# passing request to get a 24h bearer
# see: http://tools.ietf.org/html/rfc6750#section-2
id_url = "https://id.{}.isogeo.com/oauth/token".format(self.api_url)
try:
conn = self.post(
id_url,
auth=(client_id, client_secret),
headers=head,
data=payload,
proxies=self.proxies,
verify=self.ssl,
)
except ConnectionError as e:
raise ConnectionError("Connection to Isogeo ID" "failed: {}".format(e))
# just a fast check
check_params = checker.check_api_response(conn)
if check_params == 1:
pass
elif isinstance(check_params, tuple) and len(check_params) == 2:
raise ValueError(2, check_params)
# getting access
self.token = conn.json()
# add expiration date - calculating with a prevention of 10%
expiration_delay = self.token.get("expires_in", 3600) - (
self.token.get("expires_in", 3600) / 10
)
self.token["expires_at"] = datetime.utcnow() + timedelta(
seconds=expiration_delay
)
# end of method
return self.token | 0.00225 |
def buildData(self, key, default=None):
"""
Returns the build information for the given key.
:param key | <str>
default | <variant>
:return <variant>
"""
return self._buildData.get(nativestring(key), default) | 0.012658 |
def bootstrap_alert(visitor, items):
"""
Format:
[[alert(class=error)]]:
message
"""
txt = []
for x in items:
cls = x['kwargs'].get('class', '')
if cls:
cls = 'alert-%s' % cls
txt.append('<div class="alert %s">' % cls)
if 'close' in x['kwargs']:
txt.append('<button class="close" data-dismiss="alert">×</button>')
text = visitor.parse_text(x['body'], 'article')
txt.append(text)
txt.append('</div>')
return '\n'.join(txt) | 0.005357 |
def perform_pot_corr(self,
axis_grid,
pureavg,
defavg,
lattice,
q,
defect_position,
axis,
madetol=0.0001,
widthsample=1.0):
"""
For performing planar averaging potential alignment
title is for name of plot, if you dont want a plot then leave it as None
widthsample is the width (in Angstroms) of the region in between defects where the potential alignment correction is averaged
"""
logging.debug("run Freysoldt potential alignment method for axis " + str(axis))
nx = len(axis_grid)
# shift these planar averages to have defect at origin
axfracval = lattice.get_fractional_coords(defect_position)[axis]
axbulkval = axfracval * lattice.abc[axis]
if axbulkval < 0:
axbulkval += lattice.abc[axis]
elif axbulkval > lattice.abc[axis]:
axbulkval -= lattice.abc[axis]
if axbulkval:
for i in range(nx):
if axbulkval < axis_grid[i]:
break
rollind = len(axis_grid) - i
pureavg = np.roll(pureavg, rollind)
defavg = np.roll(defavg, rollind)
# if not self._silence:
logger.debug("calculating lr part along planar avg axis")
reci_latt = lattice.reciprocal_lattice
dg = reci_latt.abc[axis]
dg /= ang_to_bohr # convert to bohr to do calculation in atomic units
# Build background charge potential with defect at origin
v_G = np.empty(len(axis_grid), np.dtype("c16"))
v_G[0] = 4 * np.pi * -q / self.dielectric * self.q_model.rho_rec_limit0
g = np.roll(np.arange(-nx / 2, nx / 2, 1, dtype=int), int(nx / 2)) * dg
g2 = np.multiply(g, g)[1:]
v_G[1:] = 4 * np.pi / (self.dielectric * g2) * -q * self.q_model.rho_rec(g2)
v_G[nx // 2] = 0 if not (nx % 2) else v_G[nx // 2]
# Get the real space potential by peforming a fft and grabbing the imaginary portion
v_R = np.fft.fft(v_G)
if abs(np.imag(v_R).max()) > self.madetol:
raise Exception("imaginary part found to be %s", repr(np.imag(v_R).max()))
v_R /= (lattice.volume * ang_to_bohr**3)
v_R = np.real(v_R) * hart_to_ev
# get correction
short = (defavg - pureavg - v_R)
checkdis = int((widthsample / 2) / (axis_grid[1] - axis_grid[0]))
mid = int(len(short) / 2)
tmppot = [short[i] for i in range(mid - checkdis, mid + checkdis + 1)]
logger.debug("shifted defect position on axis (%s) to origin", repr(axbulkval))
logger.debug("means sampling region is (%f,%f)", axis_grid[mid - checkdis], axis_grid[mid + checkdis])
C = -np.mean(tmppot)
logger.debug("C = %f", C)
final_shift = [short[j] + C for j in range(len(v_R))]
v_R = [elmnt - C for elmnt in v_R]
logger.info("C value is averaged to be %f eV ", C)
logger.info("Potentital alignment energy correction (-q*delta V): %f (eV)", -q * C)
self.pot_corr = -q * C
# log plotting data:
self.metadata["pot_plot_data"][axis] = {
"Vr": v_R,
"x": axis_grid,
"dft_diff": defavg - pureavg,
"final_shift": final_shift,
"check": [mid - checkdis, mid + checkdis + 1]
}
# log uncertainty:
self.metadata["pot_corr_uncertainty_md"][axis] = {"stats": stats.describe(tmppot)._asdict(), "potcorr": -q * C}
return self.pot_corr | 0.005666 |
def from_source(cls, filename, args=None, unsaved_files=None, options=0,
index=None):
"""Create a TranslationUnit by parsing source.
This is capable of processing source code both from files on the
filesystem as well as in-memory contents.
Command-line arguments that would be passed to clang are specified as
a list via args. These can be used to specify include paths, warnings,
etc. e.g. ["-Wall", "-I/path/to/include"].
In-memory file content can be provided via unsaved_files. This is an
iterable of 2-tuples. The first element is the str filename. The
second element defines the content. Content can be provided as str
source code or as file objects (anything with a read() method). If
a file object is being used, content will be read until EOF and the
read cursor will not be reset to its original position.
options is a bitwise or of TranslationUnit.PARSE_XXX flags which will
control parsing behavior.
index is an Index instance to utilize. If not provided, a new Index
will be created for this TranslationUnit.
To parse source from the filesystem, the filename of the file to parse
is specified by the filename argument. Or, filename could be None and
the args list would contain the filename(s) to parse.
To parse source from an in-memory buffer, set filename to the virtual
filename you wish to associate with this source (e.g. "test.c"). The
contents of that file are then provided in unsaved_files.
If an error occurs, a TranslationUnitLoadError is raised.
Please note that a TranslationUnit with parser errors may be returned.
It is the caller's responsibility to check tu.diagnostics for errors.
Also note that Clang infers the source language from the extension of
the input filename. If you pass in source code containing a C++ class
declaration with the filename "test.c" parsing will fail.
"""
if args is None:
args = []
if unsaved_files is None:
unsaved_files = []
if index is None:
index = Index.create()
args_array = None
if len(args) > 0:
args_array = (c_char_p * len(args))(*[b(x) for x in args])
unsaved_array = None
if len(unsaved_files) > 0:
unsaved_array = (_CXUnsavedFile * len(unsaved_files))()
for i, (name, contents) in enumerate(unsaved_files):
if hasattr(contents, "read"):
contents = contents.read()
unsaved_array[i].name = b(name)
unsaved_array[i].contents = b(contents)
unsaved_array[i].length = len(contents)
ptr = conf.lib.clang_parseTranslationUnit(index, filename, args_array,
len(args), unsaved_array,
len(unsaved_files), options)
if not ptr:
raise TranslationUnitLoadError("Error parsing translation unit.")
return cls(ptr, index=index) | 0.001261 |
def _start_again_message(self, message=None):
"""Simple method to form a start again message and give the answer in readable form."""
logging.debug("Start again message delivered: {}".format(message))
the_answer = ', '.join(
[str(d) for d in self.game.answer][:-1]
) + ', and ' + [str(d) for d in self.game.answer][-1]
return "{0}{1} The correct answer was {2}. Please start a new game.".format(
message,
"." if message[-1] not in [".", ",", ";", ":", "!"] else "",
the_answer
) | 0.006957 |
def getbyname(self, name):
"""Get schemas by given name.
:param str name: schema names to retrieve.
:rtype: list
:raises: KeyError if name is not registered already.
"""
if name not in self._schbyname:
raise KeyError('name {0} not registered'.format(name))
return self._schbyname[name] | 0.005634 |
def _consumeArgument(self,
memberName,
positionalArgumentKeyValueList,
kwargs,
defaultValue):
"""Returns member's value from kwargs if found or from positionalArgumentKeyValueList if found
or default value otherwise.
:type memberName: str
:type positionalArgumentKeyValueList: list(tuple)
:type kwargs: dict(string:*)
"""
# Warning: we use this dict to simplify the usage of the key-value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None.
positionalArgumentDict = dict(positionalArgumentKeyValueList)
if memberName in kwargs:
return kwargs[memberName]
if memberName in positionalArgumentDict:
return positionalArgumentDict[memberName]
return defaultValue | 0.008869 |
def proxyInit(self):
"""
To receive events the proxy has to tell the CCU / Homegear where to send the events. For that we call the init-method.
"""
# Call init() with local XML RPC config and interface_id (the name of
# the receiver) to receive events. XML RPC server has to be running.
for interface_id, proxy in self.proxies.items():
if proxy._skipinit:
continue
if proxy._callbackip and proxy._callbackport:
callbackip = proxy._callbackip
callbackport = proxy._callbackport
else:
callbackip = proxy._localip
callbackport = self._localport
LOG.debug("ServerThread.proxyInit: init('http://%s:%i', '%s')" %
(callbackip, callbackport, interface_id))
try:
proxy.init("http://%s:%i" %
(callbackip, callbackport), interface_id)
LOG.info("Proxy initialized")
except Exception as err:
LOG.debug("proxyInit: Exception: %s" % str(err))
LOG.warning("Failed to initialize proxy")
self.failed_inits.append(interface_id) | 0.002425 |
def action_fluent_variables(self) -> FluentParamsList:
'''Returns the instantiated action fluents in canonical order.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings.
'''
fluents = self.domain.action_fluents
ordering = self.domain.action_fluent_ordering
return self._fluent_params(fluents, ordering) | 0.004386 |
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))] | 0.008963 |
def new(self, name, summary=None, description=None, protected=None,
restricted=None, download_restricted=None, contains_phi=None, tags=None,
properties=None, bill_to=None, **kwargs):
"""
:param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info.
"""
input_hash = {}
input_hash["name"] = name
if summary is not None:
input_hash["summary"] = summary
if description is not None:
input_hash["description"] = description
if protected is not None:
input_hash["protected"] = protected
if restricted is not None:
input_hash["restricted"] = restricted
if download_restricted is not None:
input_hash["downloadRestricted"] = download_restricted
if contains_phi is not None:
input_hash["containsPHI"] = contains_phi
if bill_to is not None:
input_hash["billTo"] = bill_to
if tags is not None:
input_hash["tags"] = tags
if properties is not None:
input_hash["properties"] = properties
self.set_id(dxpy.api.project_new(input_hash, **kwargs)["id"])
self._desc = {}
return self._dxid | 0.003909 |
def normalize(self):
"""
Sum the values in a Counter, then create a new Counter
where each new value (while keeping the original key)
is equal to the original value divided by sum of all the
original values (this is sometimes referred to as the
normalization constant).
https://en.wikipedia.org/wiki/Normalization_(statistics)
"""
total = sum(self.values())
stats = {k: (v / float(total)) for k, v in self.items()}
return StatsCounter(stats) | 0.036403 |
def _unpickle_panel_compat(self, state): # pragma: no cover
"""
Unpickle the panel.
"""
from pandas.io.pickle import _unpickle_array
_unpickle = _unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data | 0.004396 |
def not_implemented(self, context, **response_kwargs):
'''
If DEBUG: raise NotImplemented Exception.
If not, raise 404.
:raises:`django.http.Http404` if production environment.
:raises:`NotImplementedError` if ``settings.DEBUG`` is True
'''
if settings.DEBUG:
raise NotImplementedError(_('This export type ({})is not yet supported.'.format(self.format)))
raise Http404 | 0.006757 |
def trace_method(method):
"""
Decorator to catch and print the exceptions that happen within async tasks.
Note: this should be applied to methods of VSphereCheck only!
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception:
args[0].print_exception("A worker thread crashed:\n" + traceback.format_exc())
return wrapper | 0.004902 |
def decode(self, litmap):
"""Convert the DNF to an expression."""
return Or(*[And(*[litmap[idx] for idx in clause])
for clause in self.clauses]) | 0.011111 |
def check_dirty(self):
'''
.. versionchanged:: 0.20
Do not log size change.
'''
if self._dirty_size is None:
if self._dirty_render:
self.render()
self._dirty_render = False
if self._dirty_draw:
self.draw()
self._dirty_draw = False
return True
width, height = self._dirty_size
self._dirty_size = None
self.reset_canvas(width, height)
self._dirty_render = True
self._dirty_draw = True
return True | 0.003413 |
def get_host_path(root, path, instance=None):
"""
Generates the host path for a container volume. If the given path is a dictionary, uses the entry of the instance
name.
:param root: Root path to prepend, if ``path`` does not already describe an absolute path.
:type root: unicode | str | AbstractLazyObject
:param path: Path string or dictionary of per-instance paths.
:type path: unicode | str | dict | AbstractLazyObject
:param instance: Optional instance name.
:type instance: unicode | str
:return: Path on the host that is mapped to the container volume.
:rtype: unicode | str
"""
r_val = resolve_value(path)
if isinstance(r_val, dict):
r_instance = instance or 'default'
r_path = resolve_value(r_val.get(r_instance))
if not r_path:
raise ValueError("No path defined for instance {0}.".format(r_instance))
else:
r_path = r_val
r_root = resolve_value(root)
if r_path and r_root and (r_path[0] != posixpath.sep):
return posixpath.join(r_root, r_path)
return r_path | 0.00366 |
def spawn_new(self, key):
"""Spawn a new task and save it to the queue."""
# Check if path exists
if not os.path.exists(self.queue[key]['path']):
self.queue[key]['status'] = 'failed'
error_msg = "The directory for this command doesn't exist anymore: {}".format(self.queue[key]['path'])
self.logger.error(error_msg)
self.queue[key]['stdout'] = ''
self.queue[key]['stderr'] = error_msg
else:
# Get file descriptors
stdout, stderr = self.get_descriptor(key)
if self.custom_shell != 'default':
# Create subprocess
self.processes[key] = subprocess.Popen(
[
self.custom_shell,
'-i',
'-c',
self.queue[key]['command'],
],
stdout=stdout,
stderr=stderr,
stdin=subprocess.PIPE,
universal_newlines=True,
preexec_fn=os.setsid,
cwd=self.queue[key]['path']
)
else:
# Create subprocess
self.processes[key] = subprocess.Popen(
self.queue[key]['command'],
shell=True,
stdout=stdout,
stderr=stderr,
stdin=subprocess.PIPE,
universal_newlines=True,
preexec_fn=os.setsid,
cwd=self.queue[key]['path']
)
self.queue[key]['status'] = 'running'
self.queue[key]['start'] = str(datetime.now().strftime("%H:%M"))
self.queue.write() | 0.001678 |
def RestrictFeedItemToGeoTarget(client, feed_item, location_id):
"""Restrict a feed item to a geo target location.
Args:
client: An AdWordsClient instance.
feed_item: A FeedItem.
location_id: The Id of the location to restrict to.
"""
# Retrieve the FeedItemTargetService
feed_item_target_service = client.GetService(
'FeedItemTargetService', version='v201809')
# Optional: Restrict the first feed item to only serve with ads for the
# specified geo target.
criterion_target = {
'xsi_type': 'FeedItemCriterionTarget',
'feedId': feed_item['feedId'],
'feedItemId': feed_item['feedItemId'],
# These IDs can be found in the documentation or retrieved with the
# LocationCriterionService.
'criterion': {
'xsi_type': 'Location',
'id': location_id
}
}
operation = {'operator': 'ADD', 'operand': criterion_target}
response = feed_item_target_service.mutate([operation])
new_location_target = response['value'][0]
print('Feed item target for feed ID %d and feed item ID %d was created to '
'restrict serving to location ID %d.' %
(new_location_target['feedId'],
new_location_target['feedItemId'],
new_location_target['criterion']['id'])) | 0.008648 |
def native(s):
"""
Convert :py:class:`bytes` or :py:class:`unicode` to the native
:py:class:`str` type, using UTF-8 encoding if conversion is necessary.
:raise UnicodeError: The input string is not UTF-8 decodeable.
:raise TypeError: The input is neither :py:class:`bytes` nor
:py:class:`unicode`.
"""
if not isinstance(s, (binary_type, text_type)):
raise TypeError("%r is neither bytes nor unicode" % s)
if PY3:
if isinstance(s, binary_type):
return s.decode("utf-8")
else:
if isinstance(s, text_type):
return s.encode("utf-8")
return s | 0.001575 |
def sum_to_n(n, size, limit=None): #from http://stackoverflow.com/questions/2065553/python-get-all-numbers-that-add-up-to-a-number
"""Produce all lists of `size` positive integers in decreasing order
that add up to `n`."""
if size == 1:
yield [n]
return
if limit is None:
limit = n
start = (n + size - 1) // size
stop = min(limit, n - size + 1) + 1
for i in range(start, stop):
for tail in sum_to_n(n - i, size - 1, i):
yield [i] + tail | 0.007874 |
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
if size is None:
size = 1
single_val = True
else:
single_val = False
out_shape = [len(self.bounds)]
try:
out_shape.extend(size)
except TypeError:
out_shape.append(size)
out = scipy.zeros(out_shape)
for j in xrange(0, len(self.bounds)):
if j != 2:
out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
high=self.bounds[j][1],
size=size)
else:
out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
high=out[j - 1, :],
size=size)
if not single_val:
return out
else:
return out.ravel() | 0.004085 |
async def add_ssh_key(self, user, key):
"""Add a public SSH key to this model.
:param str user: The username of the user
:param str key: The public ssh key
"""
key_facade = client.KeyManagerFacade.from_connection(self.connection())
return await key_facade.AddKeys([key], user) | 0.006135 |
def launch_ipython_shell(args): # pylint: disable=unused-argument
"""Open the SolveBio shell (IPython wrapper)"""
try:
import IPython # noqa
except ImportError:
_print("The SolveBio Python shell requires IPython.\n"
"To install, type: 'pip install ipython'")
return False
if hasattr(IPython, "version_info"):
if IPython.version_info > (5, 0, 0, ''):
return launch_ipython_5_shell(args)
_print("WARNING: Please upgrade IPython (you are running version: {})"
.format(IPython.__version__))
return launch_ipython_legacy_shell(args) | 0.0016 |
def transmogrify(l):
"""Fit a flat list into a treeable object."""
d = {l[0]: {}}
tmp = d
for c in l:
tmp[c] = {}
tmp = tmp[c]
return d | 0.045752 |
def _show_traceback(method):
"""decorator for showing tracebacks in IPython"""
def m(self, *args, **kwargs):
try:
return(method(self, *args, **kwargs))
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in widget method %s: %s", method, e, exc_info=True)
else:
ip.showtraceback()
return m | 0.004651 |
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind() | 0.006154 |
def indel_at( self, position, check_insertions=True, check_deletions=True, one_based=True ):
"""Does the read contain an indel at the given position?
Return True if the read contains an insertion at the given position
(position must be the base before the insertion event) or if the read
contains a deletion where the base at position is deleted. Return False
otherwise."""
(insertions, deletions) = self.get_indels( one_based=one_based )
if check_insertions:
for insertion in insertions:
if insertion[0] == position:
return True
if check_deletions:
for deletion in deletions:
if deletion[0] < position < deletion[0] + deletion[1] + 1:
return True
return False | 0.008454 |
def client_for(service, service_module, thrift_service_name=None):
"""Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def init(
self,
tchannel,
hostport=None,
trace=False,
protocol_headers=None,
):
self.async_thrift = self.__async_client_class__(
tchannel=tchannel,
hostport=hostport,
trace=trace,
protocol_headers=protocol_headers,
)
self.threadloop = tchannel._threadloop
init.__name__ = '__init__'
methods = {
'__init__': init,
'__async_client_class__': async_client_for(
service=service,
service_module=service_module,
thrift_service_name=thrift_service_name,
)
}
methods.update({
method_name: generate_method(method_name)
for method_name in method_names
})
return type(thrift_service_name + 'Client', (object,), methods) | 0.000393 |
def add_blacklisted_directories(self,
directories,
remove_from_stored_directories=True):
"""
Adds `directories` to be blacklisted. Blacklisted directories will not
be returned or searched recursively when calling the
`collect_directories` method.
`directories` may be a single instance or an iterable. Recommend
passing in absolute paths, but method will try to convert to absolute
paths based on the current working directory.
If `remove_from_stored_directories` is true, all `directories`
will be removed from `self.plugin_directories`
"""
absolute_paths = util.to_absolute_paths(directories)
self.blacklisted_directories.update(absolute_paths)
if remove_from_stored_directories:
plug_dirs = self.plugin_directories
plug_dirs = util.remove_from_set(plug_dirs,
directories) | 0.003914 |
def interpolate2dStructuredPointSpreadIDW(grid, mask, kernel=15, power=2,
maxIter=1e5, copy=True):
'''
same as interpolate2dStructuredIDW but using the point spread method
this is faster if there are bigger connected masked areas and the border
length is smaller
replace all values in [grid] indicated by [mask]
with the inverse distance weighted interpolation of all values within
px+-kernel
[power] -> distance weighting factor: 1/distance**[power]
[copy] -> False: a bit faster, but modifies 'grid' and 'mask'
'''
assert grid.shape == mask.shape, 'grid and mask shape are different'
border = np.zeros(shape=mask.shape, dtype=np.bool)
if copy:
# copy mask as well because if will be modified later:
mask = mask.copy()
grid = grid.copy()
return _calc(grid, mask, border, kernel, power, maxIter) | 0.001063 |
def tar_files(self, path: Path) -> bytes:
""" Returns a tar with the git repository.
"""
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tar.add(str(path), arcname="data", recursive=True)
tar.close()
return tarstream.getvalue() | 0.006494 |
def load_store(cls, store, decoder=None):
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attributes = decoder(variables, attributes)
obj = cls(variables, attrs=attributes)
obj._file_obj = store
return obj | 0.005391 |
def subscribe(object_type: str, subscriber: str,
callback_handler: Callable = None) -> EventQueue:
"""Subscribe to the specified object type.
Returns an EventQueue object which can be used to query events
associated with the object type for this subscriber.
Args:
object_type (str): Object type
subscriber (str): Subscriber name
callback_handler (function, optional): Callback handler function.
Returns:
EventQueue, event queue object.
"""
key = _keys.subscribers(object_type)
DB.remove_from_list(key, subscriber)
DB.append_to_list(key, subscriber)
return EventQueue(object_type, subscriber, callback_handler) | 0.001431 |
def delete(self, file_id):
"""Delete a file from GridFS by ``"_id"``.
Removes all data belonging to the file with ``"_id"``:
`file_id`.
.. warning:: Any processes/threads reading from the file while
this method is executing will likely see an invalid/corrupt
file. Care should be taken to avoid concurrent reads to a file
while it is being deleted.
:Parameters:
- `file_id`: ``"_id"`` of the file to delete
.. versionadded:: 1.6
"""
return defer.DeferredList([
self.__files.remove({"_id": file_id}, safe=True),
self.__chunks.remove({"files_id": file_id})
]) | 0.002857 |
def normalize_name(s):
"""Convert a string into a valid python attribute name.
This function is called to convert ASCII strings to something that can pass as
python attribute name, to be used with namedtuples.
>>> str(normalize_name('class'))
'class_'
>>> str(normalize_name('a-name'))
'a_name'
>>> str(normalize_name('a n\u00e4me'))
'a_name'
>>> str(normalize_name('Name'))
'Name'
>>> str(normalize_name(''))
'_'
>>> str(normalize_name('1'))
'_1'
"""
s = s.replace('-', '_').replace('.', '_').replace(' ', '_')
if s in keyword.kwlist:
return s + '_'
s = '_'.join(slug(ss, lowercase=False) for ss in s.split('_'))
if not s:
s = '_'
if s[0] not in string.ascii_letters + '_':
s = '_' + s
return s | 0.002475 |
def _find_im_paths(self, subj_mp, obs_name, target_polarity,
max_paths=1, max_path_length=5):
"""Check for a source/target path in the influence map.
Parameters
----------
subj_mp : pysb.MonomerPattern
MonomerPattern corresponding to the subject of the Statement
being checked.
obs_name : str
Name of the PySB model Observable corresponding to the
object/target of the Statement being checked.
target_polarity : int
Whether the influence in the Statement is positive (1) or negative
(-1).
Returns
-------
PathResult
PathResult object indicating the results of the attempt to find
a path.
"""
logger.info(('Running path finding with max_paths=%d,'
' max_path_length=%d') % (max_paths, max_path_length))
# Find rules in the model corresponding to the input
if subj_mp is None:
input_rule_set = None
else:
input_rule_set = self._get_input_rules(subj_mp)
if not input_rule_set:
return PathResult(False, 'INPUT_RULES_NOT_FOUND',
max_paths, max_path_length)
logger.info('Checking path metrics between %s and %s with polarity %s' %
(subj_mp, obs_name, target_polarity))
# -- Route to the path sampling function --
if self.do_sampling:
if not has_pg:
raise Exception('The paths_graph package could not be '
'imported.')
return self._sample_paths(input_rule_set, obs_name, target_polarity,
max_paths, max_path_length)
# -- Do Breadth-First Enumeration --
# Generate the predecessors to our observable and count the paths
path_lengths = []
path_metrics = []
for source, polarity, path_length in \
_find_sources(self.get_im(), obs_name, input_rule_set,
target_polarity):
pm = PathMetric(source, obs_name, polarity, path_length)
path_metrics.append(pm)
path_lengths.append(path_length)
logger.info('Finding paths between %s and %s with polarity %s' %
(subj_mp, obs_name, target_polarity))
# Now, look for paths
paths = []
if path_metrics and max_paths == 0:
pr = PathResult(True, 'MAX_PATHS_ZERO',
max_paths, max_path_length)
pr.path_metrics = path_metrics
return pr
elif path_metrics:
if min(path_lengths) <= max_path_length:
pr = PathResult(True, 'PATHS_FOUND', max_paths, max_path_length)
pr.path_metrics = path_metrics
# Get the first path
path_iter = enumerate(_find_sources_with_paths(
self.get_im(), obs_name,
input_rule_set, target_polarity))
for path_ix, path in path_iter:
flipped = _flip(self.get_im(), path)
pr.add_path(flipped)
if len(pr.paths) >= max_paths:
break
return pr
# There are no paths shorter than the max path length, so we
# don't bother trying to get them
else:
pr = PathResult(True, 'MAX_PATH_LENGTH_EXCEEDED',
max_paths, max_path_length)
pr.path_metrics = path_metrics
return pr
else:
return PathResult(False, 'NO_PATHS_FOUND',
max_paths, max_path_length) | 0.002073 |
def toarray(self):
"""
Converts a ktensor into a dense multidimensional ndarray
Returns
-------
arr : np.ndarray
Fully computed multidimensional array whose shape matches
the original ktensor.
"""
A = dot(self.lmbda, khatrirao(tuple(self.U)).T)
return A.reshape(self.shape) | 0.005525 |
def cart_add(self, items, CartId=None, HMAC=None, **kwargs):
"""CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
It is not possible to create an empty cart!
example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`.
"""
if not CartId or not HMAC:
raise CartException('CartId and HMAC required for CartAdd call')
if isinstance(items, dict):
items = [items]
if len(items) > 10:
raise CartException("You can't add more than 10 items at once")
offer_id_key_template = 'Item.{0}.OfferListingId'
quantity_key_template = 'Item.{0}.Quantity'
for i, item in enumerate(items):
kwargs[offer_id_key_template.format(i)] = item['offer_id']
kwargs[quantity_key_template.format(i)] = item['quantity']
response = self.api.CartAdd(CartId=CartId, HMAC=HMAC, **kwargs)
root = objectify.fromstring(response)
new_cart = AmazonCart(root)
self._check_for_cart_error(new_cart)
return new_cart | 0.001485 |
def merge(self, revision=None):
"""
Merge a revision into the current branch (without committing the result).
:param revision: The revision to merge in (a string or :data:`None`,
defaults to :attr:`default_revision`).
:raises: The following exceptions can be raised:
- :exc:`~vcs_repo_mgr.exceptions.MergeConflictError` if the
merge command reports an error and merge conflicts are
detected that can't be (or haven't been) resolved
interactively.
- :exc:`~executor.ExternalCommandFailed` if the merge command
reports an error but no merge conflicts are detected.
Refer to the documentation of :attr:`merge_conflict_handler` if you
want to customize the handling of merge conflicts.
"""
# Make sure the local repository exists and supports a working tree.
self.create()
self.ensure_working_tree()
# Merge the specified revision into the current branch.
revision = revision or self.default_revision
logger.info("Merging revision '%s' in %s ..", revision, format_path(self.local))
try:
self.context.execute(*self.get_merge_command(revision))
except ExternalCommandFailed as e:
# Check for merge conflicts.
conflicts = self.merge_conflicts
if conflicts:
# Always warn about merge conflicts and log the relevant filenames.
explanation = format("Merge failed due to conflicts in %s! (%s)",
pluralize(len(conflicts), "file"),
concatenate(sorted(conflicts)))
logger.warning("%s", explanation)
if self.merge_conflict_handler(e):
# Trust the operator (or caller) and swallow the exception.
return
else:
# Raise a specific exception for merge conflicts.
raise MergeConflictError(explanation)
else:
# Don't swallow the exception or obscure the traceback
# in case we're not `allowed' to handle the exception.
raise | 0.002606 |
def plot_factor_rank_auto_correlation(factor_autocorrelation,
period=1,
ax=None):
"""
Plots factor rank autocorrelation over time.
See factor_rank_autocorrelation for more details.
Parameters
----------
factor_autocorrelation : pd.Series
Rolling 1 period (defined by time_rule) autocorrelation
of factor values.
period: int, optional
Period over which the autocorrelation is calculated
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
factor_autocorrelation.plot(title='{} Period Factor Rank Autocorrelation'
.format(period), ax=ax)
ax.set(ylabel='Autocorrelation Coefficient', xlabel='')
ax.axhline(0.0, linestyle='-', color='black', lw=1)
ax.text(.05, .95, "Mean %.3f" % factor_autocorrelation.mean(),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
return ax | 0.000804 |
def get_driver_script(driver, script=None): # noqa: E501
"""Retrieve the contents of a script
Retrieve the contents of a script # noqa: E501
:param driver: The driver to use for the request. ie. github
:type driver: str
:param script: The script name.
:type script: str
:rtype: Response
"""
response = errorIfUnauthorized(role='user')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(driver)
response.body.add({'content': driver.getDriverScript(script)})
return Response(status=200, body=response.getResponseBody()) | 0.001538 |
def sample_colormap(cmap_name, n_samples):
"""
Sample a colormap from matplotlib
"""
colors = []
colormap = cm.cmap_d[cmap_name]
for i in np.linspace(0, 1, n_samples):
colors.append(colormap(i))
return colors | 0.004082 |
def schedule_enable(enable):
'''
Enable/disable automatic update scheduling.
:param enable: True/On/Yes/1 to turn on automatic updates. False/No/Off/0
to turn off automatic updates. If this value is empty, the current
status will be returned.
:type: bool str
:return: True if scheduling is enabled, False if disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.schedule_enable on|off
'''
status = salt.utils.mac_utils.validate_enabled(enable)
cmd = ['softwareupdate',
'--schedule',
salt.utils.mac_utils.validate_enabled(status)]
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.validate_enabled(schedule_enabled()) == status | 0.001276 |
def bootstrap_messages(context, *args, **kwargs):
"""
Show django.contrib.messages Messages in Bootstrap alert containers.
In order to make the alerts dismissable (with the close button),
we have to set the jquery parameter too when using the
bootstrap_javascript tag.
Uses the template ``bootstrap3/messages.html``.
**Tag name**::
bootstrap_messages
**Parameters**:
None.
**Usage**::
{% bootstrap_messages %}
**Example**::
{% bootstrap_javascript jquery=1 %}
{% bootstrap_messages %}
"""
# Force Django 1.8+ style, so dicts and not Context
# TODO: This may be due to a bug in Django 1.8/1.9+
if Context and isinstance(context, Context):
context = context.flatten()
context.update({"message_constants": message_constants})
return render_template_file("bootstrap3/messages.html", context=context) | 0.001088 |
def send_xapi_statements(self, lrs_configuration, days):
"""
Send xAPI analytics data of the enterprise learners to the given LRS.
Arguments:
lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations
of the LRS where to send xAPI learner analytics.
days (int): Include course enrollment of this number of days.
"""
for course_enrollment in self.get_course_enrollments(lrs_configuration.enterprise_customer, days):
try:
send_course_enrollment_statement(lrs_configuration, course_enrollment)
except ClientError:
LOGGER.exception(
'Client error while sending course enrollment to xAPI for'
' enterprise customer {enterprise_customer}.'.format(
enterprise_customer=lrs_configuration.enterprise_customer.name
)
) | 0.006122 |
def _format_envvar(param):
"""Format the envvars of a `click.Option` or `click.Argument`."""
yield '.. envvar:: {}'.format(param.envvar)
yield ' :noindex:'
yield ''
if isinstance(param, click.Argument):
param_ref = param.human_readable_name
else:
# if a user has defined an opt with multiple "aliases", always use the
# first. For example, if '--foo' or '-f' are possible, use '--foo'.
param_ref = param.opts[0]
yield _indent('Provide a default for :option:`{}`'.format(param_ref)) | 0.001838 |
def is_focused(self):
"""
Checks that *at least one* matched element is focused. More
specifically, it checks whether the element is document.activeElement.
If no matching element is focused, this returns `False`.
Returns:
bool
"""
active_el = self.browser.execute_script("return document.activeElement")
query_results = self.map(lambda el: el == active_el, 'focused').results
if query_results:
return any(query_results)
return False | 0.005566 |
def post(request):
"""
method called on POST request on this view
:param django.http.HttpRequest request: The current request object
:return: ``HttpResponse(u"yes\\n")`` if the POSTed tuple (username, password, service)
if valid (i.e. (username, password) is valid dans username is allowed on service).
``HttpResponse(u"no\\n…")`` otherwise, with possibly an error message on the second
line.
:rtype: django.http.HttpResponse
"""
username = request.POST.get('username')
password = request.POST.get('password')
service = request.POST.get('service')
secret = request.POST.get('secret')
if not settings.CAS_AUTH_SHARED_SECRET:
return HttpResponse(
"no\nplease set CAS_AUTH_SHARED_SECRET",
content_type="text/plain; charset=utf-8"
)
if secret != settings.CAS_AUTH_SHARED_SECRET:
return HttpResponse(u"no\n", content_type="text/plain; charset=utf-8")
if not username or not password or not service:
return HttpResponse(u"no\n", content_type="text/plain; charset=utf-8")
form = forms.UserCredential(
request.POST,
initial={
'service': service,
'method': 'POST',
'warn': False
}
)
if form.is_valid():
try:
user = models.User.objects.get_or_create(
username=form.cleaned_data['username'],
session_key=request.session.session_key
)[0]
user.save()
# is the service allowed
service_pattern = ServicePattern.validate(service)
# is the current user allowed on this service
service_pattern.check_user(user)
if not request.session.get("authenticated"):
user.delete()
return HttpResponse(u"yes\n", content_type="text/plain; charset=utf-8")
except (ServicePattern.DoesNotExist, models.ServicePatternException):
return HttpResponse(u"no\n", content_type="text/plain; charset=utf-8")
else:
return HttpResponse(u"no\n", content_type="text/plain; charset=utf-8") | 0.004655 |
def getURL(self, size='Medium', urlType='url'):
"""Retrieves a url for the photo. (flickr.photos.getSizes)
urlType - 'url' or 'source'
'url' - flickr page of photo
'source' - image file
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
for psize in data.rsp.sizes.size:
if psize.label == size:
return getattr(psize, urlType)
raise FlickrError, "No URL found" | 0.006148 |
def capture_stdout():
"""Intercept standard output in a with-context
:return: cStringIO instance
>>> with capture_stdout() as stdout:
...
print stdout.getvalue()
"""
stdout = sys.stdout
sys.stdout = six.moves.cStringIO()
try:
yield sys.stdout
finally:
sys.stdout = stdout | 0.002941 |
def generate_control_field(self, revision=None):
"""
Generate a Debian control file field referring for this repository and revision.
:param revision: A reference to a revision, most likely the name of a
branch (a string, defaults to :attr:`default_revision`).
:returns: A tuple with two strings: The name of the field and the value.
This generates a `Vcs-Bzr` field for Bazaar repositories, a `Vcs-Git`
field for Git repositories and a `Vcs-Hg` field for Mercurial
repositories. Here's an example based on the public git repository of
the `vcs-repo-mgr` project:
>>> from vcs_repo_mgr import coerce_repository
>>> repository = coerce_repository('https://github.com/xolox/python-vcs-repo-mgr.git')
>>> repository.generate_control_field()
('Vcs-Git', 'https://github.com/xolox/python-vcs-repo-mgr.git#b617731b6c0ca746665f597d2f24b8814b137ebc')
"""
value = "%s#%s" % (self.remote or self.local, self.find_revision_id(revision))
return self.control_field, value | 0.007233 |
def _get(self, key, section=None, default=_onion_dict_guard):
"""Try to get the key from each dict in turn.
If you specify the optional section it looks there first.
"""
if section is not None:
section_dict = self.__sections.get(section, {})
if key in section_dict:
return section_dict[key]
for d in self.__dictionaries:
if key in d:
return d[key]
if default is _onion_dict_guard:
raise KeyError(key)
else:
return default | 0.003515 |
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims and
(self._data is other._data or
equiv(self.data, other.data)))
except (TypeError, AttributeError):
return False | 0.002899 |
def execute(self):
"""Run all child tasks concurrently in separate threads.
Return 0 after all child tasks have completed execution.
"""
self.count = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Register termination callbacks for all my child tasks.
for task in list(self.taskseq):
self.taskset.append(task)
task.add_callback('resolved', self.child_done, self.count)
self.count += 1
self.numtasks = self.count
# Now start each child task.
with self.regcond:
for task in list(self.taskset):
task.initialize(self)
task.start()
# Account for time needed to start subtasks
self.totaltime = time.time() - self.totaltime
# Now give up the critical section and wait for last child
# task to terminate.
while self.count > 0:
self.regcond.wait()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
return 0 | 0.002098 |
def run_example(path):
""" Returns returncode of example """
cmd = "{0} {1}".format(sys.executable, path)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res = proc.communicate()
if proc.returncode:
print(res[1].decode())
return proc.returncode | 0.006309 |
def get_object_or_none(model, *args, **kwargs):
"""
Like get_object_or_404, but doesn't throw an exception.
Allows querying for an object that might not exist without triggering
an exception.
"""
try:
return model._default_manager.get(*args, **kwargs)
except model.DoesNotExist:
return None | 0.002985 |
def isoline_vmag(hemi, isolines=None, surface='midgray', min_length=2, **kw):
'''
isoline_vmag(hemi) calculates the visual magnification function f using the default set of
iso-lines (as returned by neuropythy.vision.visual_isolines()). The hemi argument may
alternately be a mesh object.
isoline_vmag(hemi, isolns) uses the given iso-lines rather than the default ones.
The return value of this funciton is a dictionary whose keys are 'tangential', 'radial', and
'areal', and whose values are the estimated visual magnification functions. These functions
are of the form f(x,y) where x and y can be numbers or arrays in the visual field.
'''
from neuropythy.util import (curry, zinv)
from neuropythy.mri import is_cortex
from neuropythy.vision import visual_isolines
from neuropythy.geometry import to_mesh
# if there's no isolines, get them
if isolines is None: isolines = visual_isolines(hemi, **kw)
# see if the isolines is a lazy map of visual areas; if so return a lazy map recursing...
if pimms.is_vector(isolines.keys(), 'int'):
f = lambda k: isoline_vmag(isolines[k], surface=surface, min_length=min_length)
return pimms.lazy_map({k:curry(f, k) for k in six.iterkeys(isolines)})
mesh = to_mesh((hemi, surface))
# filter by min length
if min_length is not None:
isolines = {k: {kk: {kkk: [vvv[ii] for ii in iis] for (kkk,vvv) in six.iteritems(vv)}
for (kk,vv) in six.iteritems(v)
for iis in [[ii for (ii,u) in enumerate(vv['polar_angles'])
if len(u) >= min_length]]
if len(iis) > 0}
for (k,v) in six.iteritems(isolines)}
(rlns,tlns) = [isolines[k] for k in ['eccentricity', 'polar_angle']]
if len(rlns) < 2: raise ValueError('fewer than 2 iso-eccentricity lines found')
if len(tlns) < 2: raise ValueError('fewer than 2 iso-angle lines found')
# grab the visual/surface lines
((rvlns,tvlns),(rslns,tslns)) = [[[u for lns in six.itervalues(xlns) for u in lns[k]]
for xlns in (rlns,tlns)]
for k in ('visual_coordinates','surface_coordinates')]
# calculate some distances
(rslen,tslen) = [[np.sqrt(np.sum((sx[:,:-1] - sx[:,1:])**2, 0)) for sx in slns]
for slns in (rslns,tslns)]
(rvlen,tvlen) = [[np.sqrt(np.sum((vx[:,:-1] - vx[:,1:])**2, 0)) for vx in vlns]
for vlns in (rvlns,tvlns)]
(rvxy, tvxy) = [[0.5*(vx[:,:-1] + vx[:,1:]) for vx in vlns] for vlns in (rvlns,tvlns)]
(rvlen,tvlen,rslen,tslen) = [np.concatenate(u) for u in (rvlen,tvlen,rslen,tslen)]
(rvxy,tvxy) = [np.hstack(vxy) for vxy in (rvxy,tvxy)]
(rvmag,tvmag) = [vlen * zinv(slen) for (vlen,slen) in zip([rvlen,tvlen],[rslen,tslen])]
return {k: {'visual_coordinates':vxy, 'visual_magnification': vmag,
'visual_lengths': vlen, 'surface_lengths': slen}
for (k,vxy,vmag,vlen,slen) in zip(['radial','tangential'], [rvxy,tvxy],
[rvmag,tvmag], [rvlen,tvlen], [rslen,tslen])} | 0.022643 |
def _parse_qcd_segment(self, fptr):
"""Parse the QCD segment.
Parameters
----------
fptr : file
Open file object.
Returns
-------
QCDSegment
The current QCD segment.
"""
offset = fptr.tell() - 2
read_buffer = fptr.read(3)
length, sqcd = struct.unpack('>HB', read_buffer)
spqcd = fptr.read(length - 3)
return QCDsegment(sqcd, spqcd, length, offset) | 0.004175 |
def get_proficiencies_by_query(self, proficiency_query):
"""Gets a list of ``Proficiencies`` matching the given proficiency query.
arg: proficiency_query (osid.learning.ProficiencyQuery): the
proficiency query
return: (osid.learning.ProficiencyList) - the returned
``ProficiencyList``
raise: NullArgument - ``proficiency_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``proficiency_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in proficiency_query._query_terms:
if '$in' in proficiency_query._query_terms[term] and '$nin' in proficiency_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': proficiency_query._query_terms[term]['$in']}},
{term: {'$nin': proficiency_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: proficiency_query._query_terms[term]})
for term in proficiency_query._keyword_terms:
or_list.append({term: proficiency_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('learning',
collection='Proficiency',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.ProficiencyList(result, runtime=self._runtime, proxy=self._proxy) | 0.003329 |
def period(self):
"""A Period tuple representing the daily start and end time."""
start_time = self.root.findtext('daily_start_time')
if start_time:
return Period(text_to_time(start_time), text_to_time(self.root.findtext('daily_end_time')))
return Period(datetime.time(0, 0), datetime.time(23, 59)) | 0.008772 |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2013-09-08 - Written - Bovy (IAS)
"""
r2= R**2.+z**2.
rb= nu.sqrt(r2+self.b2)
dPhidrr= -1./rb/(self.b+rb)**2.
return dPhidrr*R | 0.016949 |
async def send(self, pkt):
"""Send a packet to the client."""
if not await self.check_ping_timeout():
return
if self.upgrading:
self.packet_backlog.append(pkt)
else:
await self.queue.put(pkt)
self.server.logger.info('%s: Sending packet %s data %s',
self.sid, packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes)
else '<binary>') | 0.003781 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.