text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def main():
"""
Main method.
"""
run_config = _parse_args(sys.argv[1:])
if run_config.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
gitlab_config = GitLabConfig(run_config.url, run_config.token)
project_updater_builder = FileBasedProjectVariablesUpdaterBuilder(
setting_repositories=run_config.setting_repositories,
default_setting_extensions=run_config.default_setting_extensions)
updater = FileBasedProjectsVariablesUpdater(config_location=run_config.config_location, gitlab_config=gitlab_config,
project_variables_updater_builder=project_updater_builder)
updater.update() | 0.004144 |
def get_gateway_info(self):
"""
Return the gateway info.
Returns a Command.
"""
def process_result(result):
return GatewayInfo(result)
return Command('get',
[ROOT_GATEWAY, ATTR_GATEWAY_INFO],
process_result=process_result) | 0.006061 |
def trap_ctrl_c_ctrl_break() -> None:
"""
Prevent ``CTRL-C``, ``CTRL-BREAK``, and similar signals from doing
anything.
See
- https://docs.python.org/3/library/signal.html#signal.SIG_IGN
- https://msdn.microsoft.com/en-us/library/xdkz3x12.aspx
- https://msdn.microsoft.com/en-us/library/windows/desktop/ms682541(v=vs.85).aspx
Under Windows, the only options are:
=========== ======================= =====================================
Signal Meaning Comment
=========== ======================= =====================================
SIGABRT abnormal termination
SIGFPE floating-point error
SIGILL illegal instruction
SIGINT CTRL+C signal -- trapped here
SIGSEGV illegal storage access
SIGTERM termination request -- trapped here
SIGBREAK CTRL+BREAK -- trapped here under Windows
=========== ======================= =====================================
In Linux, you also find:
=========== =============================
Signal Meaning
=========== =============================
SIGBUS bus error / unaligned access
=========== =============================
To ignore, can do:
.. code-block:: python
signal.signal(signal.SIGINT, signal.SIG_IGN) # SIG_IGN = "ignore me"
or pass a specified handler, as in the code here.
""" # noqa
signal.signal(signal.SIGINT, ctrl_c_trapper)
signal.signal(signal.SIGTERM, sigterm_trapper)
if platform.system() == 'Windows':
# SIGBREAK isn't in the Linux signal module
# noinspection PyUnresolvedReferences
signal.signal(signal.SIGBREAK, ctrl_break_trapper) | 0.000555 |
def fetch_projects(self, **kwargs):
"""
List projects owned
Fetch projects that the currently authenticated user has access to because he or she is the owner of the project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.fetch_projects(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: PaginatedProjectResults
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.fetch_projects_with_http_info(**kwargs)
else:
(data) = self.fetch_projects_with_http_info(**kwargs)
return data | 0.002817 |
def get_xml_root(xml_file):
"""Returns XML root."""
try:
xml_root = etree.parse(os.path.expanduser(xml_file), NO_BLANKS_PARSER).getroot()
# pylint: disable=broad-except
except Exception as err:
raise Dump2PolarionException("Failed to parse XML file '{}': {}".format(xml_file, err))
return xml_root | 0.009009 |
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table | 0.0033 |
def appendSolution(self,new_solution):
'''
Appends one solution to another to create a ConsumerSolution whose
attributes are lists. Used in ConsMarkovModel, where we append solutions
*conditional* on a particular value of a Markov state to each other in
order to get the entire solution.
Parameters
----------
new_solution : ConsumerSolution
The solution to a consumption-saving problem; each attribute is a
list representing state-conditional values or functions.
Returns
-------
None
'''
if type(self.cFunc)!=list:
# Then we assume that self is an empty initialized solution instance.
# Begin by checking this is so.
assert NullFunc().distance(self.cFunc) == 0, 'appendSolution called incorrectly!'
# We will need the attributes of the solution instance to be lists. Do that here.
self.cFunc = [new_solution.cFunc]
self.vFunc = [new_solution.vFunc]
self.vPfunc = [new_solution.vPfunc]
self.vPPfunc = [new_solution.vPPfunc]
self.mNrmMin = [new_solution.mNrmMin]
else:
self.cFunc.append(new_solution.cFunc)
self.vFunc.append(new_solution.vFunc)
self.vPfunc.append(new_solution.vPfunc)
self.vPPfunc.append(new_solution.vPPfunc)
self.mNrmMin.append(new_solution.mNrmMin) | 0.008655 |
def write(self, session, data):
"""Writes data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
:param session: Unique logical identifier to a session.
:param data: data to be written.
:type data: str
:return: Number of bytes actually transferred, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
try:
sess = self.sessions[session]
except KeyError:
return constants.StatusCode.error_invalid_object
try:
return sess.write(data)
except AttributeError:
return constants.StatusCode.error_nonsupported_operation | 0.004082 |
def get_all(self):
"""Gets all items in file."""
logger.debug('Fetching items. Path: {data_file}'.format(
data_file=self.data_file
))
return load_file(self.client, self.bucket_name, self.data_file) | 0.008264 |
def fi_iban_bank_info(v: str) -> (str, str):
"""
Returns BIC code and bank name from FI IBAN number.
:param v: IBAN account number
:return: (BIC code, bank name) or ('', '') if not found
"""
from jutil.bank_const_fi import FI_BIC_BY_ACCOUNT_NUMBER, FI_BANK_NAME_BY_BIC
v = iban_filter(v)
bic = FI_BIC_BY_ACCOUNT_NUMBER.get(v[4:7], None)
return (bic, FI_BANK_NAME_BY_BIC[bic]) if bic is not None else ('', '') | 0.004505 |
def storage_type(self):
"""Depending on input data type, the storage type is either
"field" (complex) or "phase" (real)."""
nf = np.load(str(self.path), mmap_mode="c", allow_pickle=False)
if np.iscomplexobj(nf):
st = "field"
else:
st = "phase"
return st | 0.006154 |
def main():
"""
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto
"""
# Define Parser object and add to Toil
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
# Generate subparsers
subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')
group = parser_run.add_mutually_exclusive_group()
parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,
help='Path to the (filled in) config file, generated with "generate-config".')
group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,
help='Path to the (filled in) manifest file, generated with "generate-manifest". '
'\nDefault value: "%(default)s".')
group.add_argument('--sample', nargs='+', action=required_length(2, 3),
help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')
# Print docstring help if no arguments provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
Job.Runner.addToilOptions(parser_run)
args = parser.parse_args()
# Parse subparsers related to generation of config and manifest
cwd = os.getcwd()
if args.command == 'generate-config' or args.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)
if args.command == 'generate-manifest' or args.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)
# Pipeline execution
elif args.command == 'run':
require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))
if not args.sample:
args.sample = None
require(os.path.exists(args.manifest), '{} not found and no sample provided. '
'Please run "generate-manifest"'.format(args.manifest))
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}
config = argparse.Namespace(**parsed_config)
config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint
samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)
# Sanity checks
require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))
require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))
# Launch Pipeline
Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args) | 0.005049 |
def get_block(self, block_identifier: BlockSpecification) -> Dict:
"""Given a block number, query the chain to get its corresponding block hash"""
return self.web3.eth.getBlock(block_identifier) | 0.014286 |
def does_not_mutate(func):
"""Prevents methods from mutating the receiver"""
def wrapper(self, *args, **kwargs):
new = self.copy()
return func(new, *args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper | 0.003584 |
def get_params_parser():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(usage=ARTHUR_USAGE_MSG,
description=ARTHUR_DESC_MSG,
epilog=ARTHUR_EPILOG_MSG,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
ElasticOcean.add_params(parser)
parser.add_argument('-h', '--help', action='help',
help=argparse.SUPPRESS)
parser.add_argument('-g', '--debug', dest='debug',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument("--no_incremental", action='store_true',
help="don't use last state for data source")
parser.add_argument("--fetch_cache", action='store_true',
help="Use cache for item retrieval")
parser.add_argument("--enrich", action='store_true',
help="Enrich items after retrieving")
parser.add_argument("--enrich_only", action='store_true',
help="Only enrich items (DEPRECATED, use --only-enrich)")
parser.add_argument("--only-enrich", dest='enrich_only', action='store_true',
help="Only enrich items")
parser.add_argument("--filter-raw", dest='filter_raw',
help="Filter raw items. Format: field:value")
parser.add_argument("--filters-raw-prefix", nargs='*',
help="Filter raw items with prefix filter. Format: field:value field:value ...")
parser.add_argument("--events-enrich", dest='events_enrich', action='store_true',
help="Enrich events in items")
parser.add_argument('--index', help="Ocean index name")
parser.add_argument('--index-enrich', dest="index_enrich", help="Ocean enriched index name")
parser.add_argument('--db-user', help="User for db connection (default to root)",
default="root")
parser.add_argument('--db-password', help="Password for db connection (default empty)",
default="")
parser.add_argument('--db-host', help="Host for db connection (default to mariadb)",
default="mariadb")
parser.add_argument('--db-projects-map', help="Projects Mapping DB")
parser.add_argument('--json-projects-map', help="Projects Mapping JSON file")
parser.add_argument('--project', help="Project for the repository (origin)")
parser.add_argument('--refresh-projects', action='store_true', help="Refresh projects in enriched items")
parser.add_argument('--db-sortinghat', help="SortingHat DB")
parser.add_argument('--only-identities', action='store_true', help="Only add identities to SortingHat DB")
parser.add_argument('--refresh-identities', action='store_true', help="Refresh identities in enriched items")
parser.add_argument('--author_id', nargs='*', help="Field author_ids to be refreshed")
parser.add_argument('--author_uuid', nargs='*', help="Field author_uuids to be refreshed")
parser.add_argument('--github-token', help="If provided, github usernames will be retrieved in git enrich.")
parser.add_argument('--jenkins-rename-file', help="CSV mapping file with nodes renamed schema.")
parser.add_argument('--studies', action='store_true', help="Execute studies after enrichment.")
parser.add_argument('--only-studies', action='store_true', help="Execute only studies.")
parser.add_argument('--bulk-size', default=1000, type=int,
help="Number of items per bulk request to Elasticsearch.")
parser.add_argument('--scroll-size', default=100, type=int,
help="Number of items to get from Elasticsearch when scrolling.")
parser.add_argument('--arthur', action='store_true', help="Read items from arthur redis queue")
parser.add_argument('--pair-programming', action='store_true', help="Do pair programming in git enrich")
parser.add_argument('--studies-list', nargs='*', help="List of studies to be executed")
parser.add_argument('backend', help=argparse.SUPPRESS)
parser.add_argument('backend_args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser | 0.00589 |
def model_to_pymatbridge(model, variable_name="model", matlab=None):
"""send the model to a MATLAB workspace through pymatbridge
This model can then be manipulated through the COBRA toolbox
Parameters
----------
variable_name : str
The variable name to which the model will be assigned in the
MATLAB workspace
matlab : None or pymatbridge.Matlab instance
The MATLAB workspace to which the variable will be sent. If
this is None, then this will be sent to the same environment
used in IPython magics.
"""
if scipy_sparse is None:
raise ImportError("`model_to_pymatbridge` requires scipy!")
if matlab is None: # assumed to be running an IPython magic
from IPython import get_ipython
matlab = get_ipython().magics_manager.registry["MatlabMagics"].Matlab
model_info = create_mat_dict(model)
S = model_info["S"].todok()
model_info["S"] = 0
temp_S_name = "cobra_pymatbridge_temp_" + uuid4().hex
_check(matlab.set_variable(variable_name, model_info))
_check(matlab.set_variable(temp_S_name, S))
_check(matlab.run_code("%s.S = %s;" % (variable_name, temp_S_name)))
# all vectors need to be transposed
for i in model_info.keys():
if i == "S":
continue
_check(matlab.run_code("{0}.{1} = {0}.{1}';".format(variable_name, i)))
_check(matlab.run_code("clear %s;" % temp_S_name)) | 0.000695 |
def prepend(self, bs):
"""Prepend a bitstring to the current bitstring.
bs -- The bitstring to prepend.
"""
bs = self._converttobitstring(bs)
self._prepend(bs)
self._pos += bs.len | 0.008734 |
def get_adjacent_index(I, shape, size):
"""
Find indices 2d-adjacent to those in I. Helper function for get_border*.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region
shape : tuple(int, int)
region shape
size : int
region size (technically computable from shape)
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally adjacent to I
"""
m, n = shape
In = I % n
bL = In != 0
bR = In != n-1
J = np.concatenate([
# orthonally adjacent
I - n,
I[bL] - 1,
I[bR] + 1,
I + n,
# diagonally adjacent
I[bL] - n-1,
I[bR] - n+1,
I[bL] + n-1,
I[bR] + n+1])
# remove indices outside the array
J = J[(J>=0) & (J<size)]
return J | 0.005794 |
def networkTwoMode(self, tag1, tag2, directed = False, recordType = True, nodeCount = True, edgeWeight = True, stemmerTag1 = None, stemmerTag2 = None, edgeAttribute = None):
"""Creates a network of the objects found by two WOS tags _tag1_ and _tag2_, each node marked by which tag spawned it making the resultant graph bipartite.
A **networkTwoMode()** looks at each Record in the `RecordCollection` and extracts its values for the tags given by _tag1_ and _tag2_, e.g. the `'WC'` and `'LA'` tags. Then for each object returned by each tag and edge is created between it and every other object of the other tag. So the WOS defined subject tag `'WC'` and language tag `'LA'`, will give a two-mode network showing the connections between subjects and languages. Each node will have an attribute call `'type'` that gives the tag that created it or both if both created it, e.g. the node `'English'` would have the type attribute be `'LA'`.
The number of times each object occurs is count if _nodeCount_ is `True` and the edges count the number of co-occurrences if _edgeWeight_ is `True`. Both are`True` by default.
The _directed_ parameter if `True` will cause the network to be directed with the first tag as the source and the second as the destination.
# Parameters
_tag1_ : `str`
> A two character WOS tag or one of the full names for a tag, the source of edges on the graph
_tag1_ : `str`
> A two character WOS tag or one of the full names for a tag, the target of edges on the graph
_directed_ : `optional [bool]`
> Default `False`, if `True` the returned network is directed
_nodeCount_ : `optional [bool]`
> Default `True`, if `True` each node will have an attribute called "count" that contains an int giving the number of time the object occurred.
_edgeWeight_ : `optional [bool]`
> Default `True`, if `True` each edge will have an attribute called "weight" that contains an int giving the number of time the two objects co-occurrenced.
_stemmerTag1_ : `optional [func]`
> Default `None`, If _stemmerTag1_ is a callable object, basically a function or possibly a class, it will be called for the ID of every node given by _tag1_ in the graph, all IDs are strings.
> For example: the function `f = lambda x: x[0]` if given as the stemmer will cause all IDs to be the first character of their unstemmed IDs. e.g. the title `'Goos-Hanchen and Imbert-Fedorov shifts for leaky guided modes'` will create the node `'G'`.
_stemmerTag2_ : `optional [func]`
> Default `None`, see _stemmerTag1_ as it is the same but for _tag2_
# Returns
`networkx Graph or networkx DiGraph`
> A networkx Graph with the objects of the tags _tag1_ and _tag2_ as nodes and their co-occurrences as edges.
"""
if not isinstance(tag1, str):
raise TagError("{} is not a string it cannot be a tag.".format(tag1))
if not isinstance(tag2, str):
raise TagError("{} is not a string it cannot be a tag.".format(tag2))
if stemmerTag1 is not None:
if isinstance(stemmerTag1, collections.abc.Callable):
stemCheck = True
else:
raise TagError("stemmerTag1 must be callable, e.g. a function or class with a __call__ method.")
else:
stemmerTag1 = lambda x: x
if stemmerTag2 is not None:
if isinstance(stemmerTag2, collections.abc.Callable):
stemCheck = True
else:
raise TagError("stemmerTag2 must be callable, e.g. a function or class with a __call__ method.")
else:
stemmerTag2 = lambda x: x
count = 0
progArgs = (0, "Starting to make a two mode network of " + tag1 + " and " + tag2)
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if edgeAttribute is not None:
if directed:
grph = nx.MultiDiGraph()
else:
grph = nx.MultiGraph()
else:
if directed:
grph = nx.DiGraph()
else:
grph = nx.Graph()
for R in self:
if PBar:
count += 1
PBar.updateVal(count / len(self), "Analyzing: {}".format(R))
if edgeAttribute is not None:
edgeVals = R.get(edgeAttribute, [])
if not isinstance(edgeVals, list):
edgeVals = [edgeVals]
contents1 = R.get(tag1)
contents2 = R.get(tag2)
if isinstance(contents1, list):
contents1 = [stemmerTag1(str(v)) for v in contents1]
elif contents1 == None:
contents1 = []
else:
contents1 = [stemmerTag1(str(contents1))]
if isinstance(contents2, list):
contents2 = [stemmerTag2(str(v)) for v in contents2]
elif contents2 == None:
contents2 = []
else:
contents2 = [stemmerTag2(str(contents2))]
for node1 in contents1:
for node2 in contents2:
if edgeAttribute:
for edgeVal in edgeVals:
if grph.has_edge(node1, node2, key = edgeVal):
if edgeWeight:
grph.edges[node1, node2, edgeVal]['weight'] += 1
else:
if edgeWeight:
attrDict = {'key' : edgeVal, 'weight' : 1}
else:
attrDict = {'key' : edgeVal}
grph.add_edge(node1, node2, **attrDict)
elif edgeWeight:
try:
grph.edges[node1, node2]['weight'] += 1
except KeyError:
grph.add_edge(node1, node2, weight = 1)
else:
if not grph.has_edge(node1, node2):
grph.add_edge(node1, node2)
if nodeCount:
try:
grph.node[node1]['count'] += 1
except KeyError:
try:
grph.node[node1]['count'] = 1
if recordType:
grph.node[node1]['type'] = tag1
except KeyError:
if recordType:
grph.add_node(node1, type = tag1)
else:
grph.add_node(node1)
else:
if not grph.has_node(node1):
if recordType:
grph.add_node(node1, type = tag1)
else:
grph.add_node(node1)
elif recordType:
if 'type' not in grph.node[node1]:
grph.node[node1]['type'] = tag1
for node2 in contents2:
if nodeCount:
try:
grph.node[node2]['count'] += 1
except KeyError:
try:
grph.node[node2]['count'] = 1
if recordType:
grph.node[node2]['type'] = tag2
except KeyError:
grph.add_node(node2, count = 1)
if recordType:
grph.node[node2]['type'] = tag2
else:
if not grph.has_node(node2):
if recordType:
grph.add_node(node2, type = tag2)
else:
grph.add_node(node2)
elif recordType:
if 'type' not in grph.node[node2]:
grph.node[node2]['type'] = tag2
if PBar:
PBar.finish("Done making a two mode network of " + tag1 + " and " + tag2)
return grph | 0.006534 |
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"' | 0.00678 |
def query_info(self, jid, *,
node=None, require_fresh=False, timeout=None,
no_cache=False):
"""
Query the features and identities of the specified entity.
:param jid: The entity to query.
:type jid: :class:`aioxmpp.JID`
:param node: The node to query.
:type node: :class:`str` or :data:`None`
:param require_fresh: Boolean flag to discard previous caches.
:type require_fresh: :class:`bool`
:param timeout: Optional timeout for the response.
:type timeout: :class:`float`
:param no_cache: Boolean flag to forbid caching of the request.
:type no_cache: :class:`bool`
:rtype: :class:`.xso.InfoQuery`
:return: Service discovery information of the `node` at `jid`.
The requests are cached. This means that only one request is ever fired
for a given target (identified by the `jid` and the `node`). The
request is re-used for all subsequent requests to that identity.
If `require_fresh` is set to true, the above does not hold and a fresh
request is always created. The new request is the request which will be
used as alias for subsequent requests to the same identity.
The visible effects of this are twofold:
* Caching: Results of requests are implicitly cached
* Aliasing: Two concurrent requests will be aliased to one request to
save computing resources
Both can be turned off by using `require_fresh`. In general, you should
not need to use `require_fresh`, as all requests are implicitly
cancelled whenever the underlying session gets destroyed.
`no_cache` can be set to true to prevent future requests to be aliased
to this request, i.e. the request is not stored in the internal request
cache. This does not affect `require_fresh`, i.e. if a cached result is
available, it is used.
The `timeout` can be used to restrict the time to wait for a
response. If the timeout triggers, :class:`TimeoutError` is raised.
If :meth:`~.Client.send` raises an
exception, all queries which were running simultanously for the same
target re-raise that exception. The result is not cached though. If a
new query is sent at a later point for the same target, a new query is
actually sent, independent of the value chosen for `require_fresh`.
.. versionchanged:: 0.9
The `no_cache` argument was added.
"""
key = jid, node
if not require_fresh:
try:
request = self._info_pending[key]
except KeyError:
pass
else:
try:
return (yield from request)
except asyncio.CancelledError:
pass
request = asyncio.ensure_future(
self.send_and_decode_info_query(jid, node)
)
request.add_done_callback(
functools.partial(
self._handle_info_received,
jid,
node
)
)
if not no_cache:
self._info_pending[key] = request
try:
if timeout is not None:
try:
result = yield from asyncio.wait_for(
request,
timeout=timeout)
except asyncio.TimeoutError:
raise TimeoutError()
else:
result = yield from request
except: # NOQA
if request.done():
try:
pending = self._info_pending[key]
except KeyError:
pass
else:
if pending is request:
del self._info_pending[key]
raise
return result | 0.001011 |
def add_execution_profile(self, name, profile, pool_wait_timeout=5):
"""
Adds an :class:`.ExecutionProfile` to the cluster. This makes it available for use by ``name`` in :meth:`.Session.execute`
and :meth:`.Session.execute_async`. This method will raise if the profile already exists.
Normally profiles will be injected at cluster initialization via ``Cluster(execution_profiles)``. This method
provides a way of adding them dynamically.
Adding a new profile updates the connection pools according to the specified ``load_balancing_policy``. By default,
this method will wait up to five seconds for the pool creation to complete, so the profile can be used immediately
upon return. This behavior can be controlled using ``pool_wait_timeout`` (see
`concurrent.futures.wait <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.wait>`_
for timeout semantics).
"""
if not isinstance(profile, ExecutionProfile):
raise TypeError("profile must be an instance of ExecutionProfile")
if self._config_mode == _ConfigMode.LEGACY:
raise ValueError("Cannot add execution profiles when legacy parameters are set explicitly.")
if name in self.profile_manager.profiles:
raise ValueError("Profile {} already exists".format(name))
contact_points_but_no_lbp = (
self._contact_points_explicit and not
profile._load_balancing_policy_explicit)
if contact_points_but_no_lbp:
log.warning(
'Tried to add an ExecutionProfile with name {name}. '
'{self} was explicitly configured with contact_points, but '
'{ep} was not explicitly configured with a '
'load_balancing_policy. In the next major version, trying to '
'add an ExecutionProfile without an explicitly configured LBP '
'to a cluster with explicitly configured contact_points will '
'raise an exception; please specify a load-balancing policy '
'in the ExecutionProfile.'
''.format(name=repr(name), self=self, ep=profile))
self.profile_manager.profiles[name] = profile
profile.load_balancing_policy.populate(self, self.metadata.all_hosts())
# on_up after populate allows things like DCA LBP to choose default local dc
for host in filter(lambda h: h.is_up, self.metadata.all_hosts()):
profile.load_balancing_policy.on_up(host)
futures = set()
for session in tuple(self.sessions):
futures.update(session.update_created_pools())
_, not_done = wait_futures(futures, pool_wait_timeout)
if not_done:
raise OperationTimedOut("Failed to create all new connection pools in the %ss timeout.") | 0.004158 |
def load(self, template, parameters=None):
"""
'template'
Loads template text from a 'string' or 'file' type
Template text contains {{TOKEN}} symbols to be replaced
'parameters'
parameters contains environment-specific sections as discussed in the class documentation.
the 'parameters' arg can be None, a 'string', 'file', or 'dictionary'
Whether from a string or file, or already in a dictionary, parameters must follow the
logical format documented in the class docstring.
if 'parameters' is omitted, template resolution will proceed with AWS, credential, and
version lookups.
"""
# load template
if isinstance(template, str):
self.template = template
elif isinstance(template, file):
try:
self.template = template.read()
template.close()
except IOError as error:
fail("Exception loading template from file: ", error)
else:
fail("Unknown type loading template; expected string or file: " + type(template))
# load parameters, if any
if parameters:
if isinstance(parameters, str):
try:
self.parameters = yaml.safe_load(parameters)
except ValueError as error:
fail("Exception loading parameters from string: ", error)
elif isinstance(parameters, file):
try:
self.parameters = yaml.safe_load(parameters)
parameters.close()
except ValueError as error:
fail("Exception loading parameters from file: {}".format(error), sys.exc_info())
elif isinstance(parameters, dict):
self.parameters = parameters
else:
fail("Unknown type loading parameters; expected string, file, or dict: " + type(parameters))
# sanity check the loaded parameters
if "params" not in self.parameters:
fail("'params' field not found in parameters")
# just the params, please
self.parameters = self.parameters["params"]
# are all the keys valid (must have legal characters)
for k in set().union(*(self.parameters[d].keys() for d in self.parameters.keys())):
invalid_char = re.search(ILLEGAL_PARAMETER_CHARS, k)
if invalid_char:
fail("illegal character: '" + invalid_char.group(0) + "' in parameter key: " + k) | 0.01268 |
def _save_config(jira_url, username, password, error_reporting):
"""
Saves the username and password to the config
"""
# Delete what is there before we re-write. New user means new everything
os.path.exists(_config) and os.remove(_config)
config = ConfigParser.SafeConfigParser()
config.read(_config)
if not config.has_section('jira'):
config.add_section('jira')
if 'http' not in jira_url:
jira_url = 'http://' + jira_url
try:
resp = urllib.urlopen(jira_url)
url = urlparse.urlparse(resp.url)
jira_url = url.scheme + "://" + url.netloc
except IOError, e:
print "It doesn't appear that {0} is responding to a request.\
Please make sure that you typed the hostname, \
i.e. jira.atlassian.com.\n{1}".format(jira_url, e)
sys.exit(1)
config.set('jira', 'url', jira_url)
config.set('jira', 'username', username)
config.set('jira', 'password', base64.b64encode(password))
config.set('jira', 'error_reporting', str(error_reporting))
with open(_config, 'w') as ini:
os.chmod(_config, 0600)
config.write(ini) | 0.000855 |
def create_from_intermediate(cls, crypto, intermediate_point, seed, compressed=True, include_cfrm=True):
"""
Given an intermediate point, given to us by "owner", generate an address
and encrypted private key that can be decoded by the passphrase used to generate
the intermediate point.
"""
flagbyte = b'\x20' if compressed else b'\x00'
payload = b58decode_check(str(intermediate_point))
ownerentropy = payload[8:16]
passpoint = payload[16:-4]
x, y = uncompress(passpoint)
if not is_py2:
seed = bytes(seed, 'ascii')
seedb = hexlify(sha256(seed).digest())[:24]
factorb = int(hexlify(sha256(sha256(seedb).digest()).digest()), 16)
generatedaddress = pubtoaddr(fast_multiply((x, y), factorb))
wrap = lambda x: x
if not is_py2:
wrap = lambda x: bytes(x, 'ascii')
addresshash = sha256(sha256(wrap(generatedaddress)).digest()).digest()[:4]
encrypted_seedb = scrypt.hash(passpoint, addresshash + ownerentropy, 1024, 1, 1, 64)
derivedhalf1, derivedhalf2 = encrypted_seedb[:32], encrypted_seedb[32:]
aes = AES.new(derivedhalf2)
block1 = long(seedb[0:16], 16) ^ long(hexlify(derivedhalf1[0:16]), 16)
encryptedpart1 = aes.encrypt(unhexlify('%0.32x' % block1))
block2 = long(hexlify(encryptedpart1[8:16]) + seedb[16:24], 16) ^ long(hexlify(derivedhalf1[16:32]), 16)
encryptedpart2 = aes.encrypt(unhexlify('%0.32x' % block2))
# 39 bytes 2 1 4 8 8 16
payload = b"\x01\x43" + flagbyte + addresshash + ownerentropy + encryptedpart1[:8] + encryptedpart2
encrypted_pk = b58encode_check(payload)
if not include_cfrm:
return generatedaddress, encrypted_pk
confirmation_code = Bip38ConfirmationCode.create(flagbyte, ownerentropy, factorb, derivedhalf1, derivedhalf2, addresshash)
return generatedaddress, cls(crypto, encrypted_pk), confirmation_code | 0.006241 |
def get_by_provider_display_name(self, provider_display_name):
"""
Gets a SAN Manager by provider display name.
Args:
provider_display_name: Name of the Provider Display Name
Returns:
dict: SAN Manager.
"""
san_managers = self._client.get_all()
result = [x for x in san_managers if x['providerDisplayName'] == provider_display_name]
return result[0] if result else None | 0.006536 |
def endpoints(self):
"""
Gets the Endpoints API client.
Returns:
Endpoints:
"""
if not self.__endpoints:
self.__endpoints = Endpoints(self.__connection)
return self.__endpoints | 0.008032 |
def get_authorization_form(self, *args, **kwargs):
"""Pass through to provider AuthorizationAdminSession.get_authorization_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'authorization_record_types' in kwargs:
return self.get_authorization_form_for_create(*args, **kwargs)
else:
return self.get_authorization_form_for_update(*args, **kwargs) | 0.006897 |
def multinomLog2(selectors):
"""
Function calculates logarithm 2 of a kind of multinom.
selectors: list of integers
"""
ln2 = 0.69314718055994528622
noAll = sum(selectors)
lgNf = math.lgamma(noAll + 1.0) / ln2 # log2(N!)
lgnFac = []
for selector in selectors:
if selector == 0 or selector == 1:
lgnFac.append(0.0)
elif selector == 2:
lgnFac.append(1.0)
elif selector == noAll:
lgnFac.append(lgNf)
else:
lgnFac.append(math.lgamma(selector + 1.0) / ln2)
return lgNf - sum(lgnFac) | 0.001664 |
def connect(self, *, db=None):
"""
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
"""
if not self.properties.network:
self.new_state(DeviceFromDB)
else:
try:
name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
segmentation = self.properties.network.read(
"{} device {} segmentationSupported".format(
self.properties.address, self.properties.device_id
)
)
if not self.segmentation_supported or segmentation not in (
"segmentedTransmit",
"segmentedBoth",
):
segmentation_supported = False
self._log.debug("Segmentation not supported")
else:
segmentation_supported = True
if name:
if segmentation_supported:
self.new_state(RPMDeviceConnected)
else:
self.new_state(RPDeviceConnected)
except SegmentationNotSupported:
self.segmentation_supported = False
self._log.warning(
"Segmentation not supported.... expect slow responses."
)
self.new_state(RPDeviceConnected)
except (NoResponseFromController, AttributeError) as error:
if self.properties.db_name:
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Offline: provide database name to load stored data."
)
self._log.warning("Ex. controller.connect(db = 'backup')") | 0.001913 |
def colocate(self, others, why):
"""
Colocate this operator with another.
"""
if isinstance(self, Marker):
return
colocate_tag = '__spl_' + why + '$' + str(self.index)
self._colocate_tag(colocate_tag)
for op in others:
op._colocate_tag(colocate_tag) | 0.006079 |
def datasets(self):
"""List of datasets in this mart."""
if self._datasets is None:
self._datasets = self._fetch_datasets()
return self._datasets | 0.01105 |
def to_dataframe(self, dtypes=None):
"""Create a :class:`pandas.DataFrame` of all rows in the stream.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
frames = []
for page in self.pages:
frames.append(page.to_dataframe(dtypes=dtypes))
return pandas.concat(frames) | 0.001838 |
def preRun_(self):
"""Create the shared memory client immediately after fork
"""
self.report("preRun_")
super().preRun_()
self.client = ShmemRGBClient(
name=self.shmem_name,
n_ringbuffer=self.n_buffer, # size of ring buffer
width=self.image_dimensions[0],
height=self.image_dimensions[1],
# client timeouts if nothing has been received in 1000 milliseconds
mstimeout=1000,
verbose=False
) | 0.003831 |
def deleteByPk(self, pk):
'''
deleteByPk - Delete object associated with given primary key
'''
obj = self.mdl.objects.getOnlyIndexedFields(pk)
if not obj:
return 0
return self.deleteOne(obj) | 0.043689 |
def response_add(self, request, obj, post_url_continue=None, **kwargs):
"""Redirects to the appropriate items' 'continue' page on item add.
As we administer tree items within tree itself, we
should make some changes to redirection process.
"""
if post_url_continue is None:
post_url_continue = '../item_%s/' % obj.pk
return self._redirect(request, super(TreeItemAdmin, self).response_add(request, obj, post_url_continue)) | 0.006198 |
def positionMinError(G, vmini, extension=0.0):
"""
Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the
smallest astrometric errors.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond.
"""
parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension)
return _astrometricErrorFactors['alphaStar'].min()*parallaxError, \
_astrometricErrorFactors['delta'].min()*parallaxError | 0.010216 |
def update_by_external_id(self, api_objects):
"""
Update (PUT) one or more API objects by external_id.
:param api_objects:
"""
if not isinstance(api_objects, collections.Iterable):
api_objects = [api_objects]
return CRUDRequest(self).put(api_objects, update_many_external=True) | 0.005917 |
def draw_graph(G: nx.DiGraph, filename: str):
""" Draw a networkx graph with Pygraphviz. """
A = to_agraph(G)
A.graph_attr["rankdir"] = "LR"
A.draw(filename, prog="dot") | 0.005405 |
def cache_dir() -> str:
"""Return the default cache directory where downloaded models are stored."""
if config.VENDOR is None:
raise RuntimeError("modelforge is not configured; look at modelforge.configuration. "
"Depending on your objective you may or may not want to create a "
"modelforgecfg.py file which sets VENDOR and the rest.")
return os.path.join("~", "." + config.VENDOR) | 0.0125 |
def _make_stack(im, include_diagonals=False):
r'''
Creates a stack of images with one extra dimension to the input image
with length equal to the number of borders to search + 1.
Image is rolled along the axial shifts so that the border pixel is
overlapping the original pixel. First image in stack is the original.
Stacking makes direct vectorized array comparisons possible.
'''
ndim = len(np.shape(im))
axial_shift = _get_axial_shifts(ndim, include_diagonals)
if ndim == 2:
stack = np.zeros([np.shape(im)[0],
np.shape(im)[1],
len(axial_shift)+1])
stack[:, :, 0] = im
for i in range(len(axial_shift)):
ax0, ax1 = axial_shift[i]
temp = np.roll(np.roll(im, ax0, 0), ax1, 1)
stack[:, :, i+1] = temp
return stack
elif ndim == 3:
stack = np.zeros([np.shape(im)[0],
np.shape(im)[1],
np.shape(im)[2],
len(axial_shift)+1])
stack[:, :, :, 0] = im
for i in range(len(axial_shift)):
ax0, ax1, ax2 = axial_shift[i]
temp = np.roll(np.roll(np.roll(im, ax0, 0), ax1, 1), ax2, 2)
stack[:, :, :, i+1] = temp
return stack | 0.00076 |
def get_lll_frac_coords(self, frac_coords: Vector3Like) -> np.ndarray:
"""
Given fractional coordinates in the lattice basis, returns corresponding
fractional coordinates in the lll basis.
"""
return dot(frac_coords, self.lll_inverse) | 0.010949 |
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad | 0.001472 |
def update_security_group(self, security_group, body=None):
"""Updates a security group."""
return self.put(self.security_group_path %
security_group, body=body) | 0.00995 |
def print_config_values(self, prefix='- '):
"""a wrapper to print_config_value to print all configuration values
Parameters
==========
prefix: the character prefix to put before the printed config value
defaults to "- "
"""
print('Configuration values from ' + self.config_dir)
self.print_config_value(self.CONFIG_NAME_USER, prefix=prefix)
self.print_config_value(self.CONFIG_NAME_PATH, prefix=prefix)
self.print_config_value(self.CONFIG_NAME_PROXY, prefix=prefix)
self.print_config_value(self.CONFIG_NAME_COMPETITION, prefix=prefix) | 0.003115 |
def delete_local_variable(self, onnx_name):
'''
Remove the variable whose onnx_name is the input onnx_name
'''
if onnx_name not in self.onnx_variable_names or onnx_name not in self.variables:
raise RuntimeError('The variable to be removed not found')
self.onnx_variable_names.discard(onnx_name)
raw_name = self.variables[onnx_name].raw_name
self.variable_name_mapping[raw_name].remove(onnx_name)
del self.variables[onnx_name] | 0.005988 |
def _get_mechanism(self, rup, coeffs):
"""
Compute fifth term of equation (1) on p. 1200:
``b6 * H``
"""
is_strike_slip = self.get_fault_type_dummy_variables(rup)
return coeffs['b6']*is_strike_slip | 0.00813 |
def GetForwardedIps(self, interface, interface_ip=None):
"""Retrieve the list of configured forwarded IP addresses.
Args:
interface: string, the output device to query.
interface_ip: string, current interface ip address.
Returns:
list, the IP address strings.
"""
args = ['ls', 'table', 'local', 'type', 'local']
options = self._CreateRouteOptions(dev=interface)
result = self._RunIpRoute(args=args, options=options)
result = re.sub(r'local\s', r'', result)
return self.ParseForwardedIps(result.split()) | 0.001789 |
def MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, otherPartFilePath = None):
"""
Archive all parts of multi-part compressed file.
If file has been extracted (via part1) then move all subsequent parts directly to archive directory.
If file has not been extracted then if part >1 add to other part skipped list and only archive
when the first part is sent for archiving.
Parameters
----------
firstPartExtractList : list
File directory to search.
otherPartSkippedList : list
List which any file matches will be added to.
archiveDir : list
List of directories to ignore in recursive lookup (currently unused).
otherPartFilePath : list [optional : default = None]
List of supported file formats to search for.
"""
if otherPartFilePath is None:
for filePath in list(otherPartSkippedList):
MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, filePath)
else:
baseFileName = re.findall("(.+?)[.]part.+?rar", otherPartFilePath)[0]
if baseFileName in firstPartExtractList:
util.ArchiveProcessedFile(otherPartFilePath, archiveDir)
if otherPartFilePath in otherPartSkippedList:
otherPartSkippedList.remove(otherPartFilePath)
elif otherPartFilePath not in otherPartSkippedList:
otherPartSkippedList.append(otherPartFilePath) | 0.010167 |
def AssignGroupNodes(r, group, nodes, force=False, dry_run=False):
"""
Assigns nodes to a group.
@type group: string
@param group: Node gropu name
@type nodes: list of strings
@param nodes: List of nodes to assign to the group
@rtype: int
@return: job id
"""
query = {
"force": force,
"dry-run": dry_run,
}
body = {
"nodes": nodes,
}
return r.request("put", "/2/groups/%s/assign-nodes" % group, query=query,
content=body) | 0.001894 |
def vm_disk_snapshot_create(name, kwargs=None, call=None):
'''
Takes a new snapshot of the disk image.
.. versionadded:: 2016.3.0
name
The name of the VM of which to take the snapshot.
disk_id
The ID of the disk to save.
description
The description for the snapshot.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_disk_snapshot_create my-vm disk_id=0 description="My Snapshot Description"
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_disk_snapshot_create action must be called with -a or --action.'
)
if kwargs is None:
kwargs = {}
disk_id = kwargs.get('disk_id', None)
description = kwargs.get('description', None)
if disk_id is None or description is None:
raise SaltCloudSystemExit(
'The vm_disk_snapshot_create function requires a \'disk_id\' and a \'description\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': name}))
response = server.one.vm.disksnapshotcreate(auth,
vm_id,
int(disk_id),
description)
data = {
'action': 'vm.disksnapshotcreate',
'created': response[0],
'snapshot_id': response[1],
'error_code': response[2],
}
return data | 0.002613 |
def GetNewEventId(self, event_time=None):
"""Return a unique Event ID string."""
if event_time is None:
event_time = int(time.time() * 1e6)
return "%s:%s:%s" % (event_time, socket.gethostname(), os.getpid()) | 0.00885 |
def set_mlimits(self, min=None, max=None):
"""Set limits for the point meta (colormap).
Point meta values outside this range will be clipped.
:param min: value corresponding to the start of the colormap.
If None, it will be calculated.
:param max: value corresponding to the end of the colormap.
If None, it will be calculated.
"""
self.limits['mmin'] = min
self.limits['mmax'] = max | 0.004292 |
def focus_next(self):
"""focus next message in depth first order"""
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | 0.006969 |
def workbook_to_reader(xlwt_wb):
"""
convert xlwt Workbook instance to an xlrd instance for reading
"""
_xlrd_required()
fh = BytesIO()
xlwt_wb.save(fh)
# prep for reading
fh.seek(0)
return xlrd.open_workbook(file_contents=fh.read()) | 0.003663 |
def _GetShowID(self, stringSearch, origStringSearch = None):
"""
Search for given string as an existing entry in the database file name
table or, if no match is found, as a show name from the TV guide.
If an exact match is not found in the database the user can accept
or decline the best match from the TV guide or can provide an alternate
match to lookup.
Parameters
----------
stringSearch : string
String to look up in database or guide.
origStringSearch : string [optional: default = None]
Original search string, used by recusive function calls.
Returns
----------
tvfile.ShowInfo or None
If no show id could be found this returns None, otherwise
it returns a tvfile.ShowInfo object containing show name
and show id.
"""
showInfo = tvfile.ShowInfo()
if origStringSearch is None:
goodlogging.Log.Info("RENAMER", "Looking up show ID for: {0}".format(stringSearch))
origStringSearch = stringSearch
goodlogging.Log.IncreaseIndent()
showInfo.showID = self._db.SearchFileNameTable(stringSearch)
if showInfo.showID is None:
goodlogging.Log.Info("RENAMER", "No show ID match found for '{0}' in database".format(stringSearch))
showNameList = self._guide.ShowNameLookUp(stringSearch)
if self._skipUserInput is True:
if len(showNameList) == 1:
showName = showNameList[0]
goodlogging.Log.Info("RENAMER", "Automatic selection of showname: {0}".format(showName))
else:
showName = None
goodlogging.Log.Info("RENAMER", "Show skipped - could not make automatic selection of showname")
else:
showName = util.UserAcceptance(showNameList)
if showName in showNameList:
libEntry = self._db.SearchTVLibrary(showName = showName)
if libEntry is None:
if self._skipUserInput is True:
response = 'y'
else:
goodlogging.Log.Info("RENAMER", "No show by this name found in TV library database. Is this a new show for the database?")
response = goodlogging.Log.Input("RENAMER", "Enter 'y' (yes), 'n' (no) or 'ls' (list existing shows): ")
response = util.ValidUserResponse(response, ('y', 'n', 'ls'))
if response.lower() == 'ls':
dbLibList = self._db.SearchTVLibrary()
if dbLibList is None:
goodlogging.Log.Info("RENAMER", "TV library is empty")
response = 'y'
else:
dbShowNameList = [i[1] for i in dbLibList]
dbShowNameStr = ', '.join(dbShowNameList)
goodlogging.Log.Info("RENAMER", "Existing shows in database are: {0}".format(dbShowNameStr))
response = goodlogging.Log.Input("RENAMER", "Is this a new show? [y/n]: ")
response = util.ValidUserResponse(response, ('y', 'n'))
if response.lower() == 'y':
showInfo.showID = self._db.AddShowToTVLibrary(showName)
showInfo.showName = showName
else:
try:
dbShowNameList
except NameError:
dbLibList = self._db.SearchTVLibrary()
if dbLibList is None:
goodlogging.Log.Info("RENAMER", "No show ID found - TV library is empty")
return None
dbShowNameList = [i[1] for i in dbLibList]
while showInfo.showID is None:
matchShowList = util.GetBestMatch(showName, dbShowNameList)
showName = util.UserAcceptance(matchShowList)
if showName is None:
goodlogging.Log.Info("RENAMER", "No show ID found - could not match to existing show")
return None
elif showName in matchShowList:
showInfo.showID = self._db.SearchTVLibrary(showName = showName)[0][0]
showInfo.showName = showName
else:
showInfo.showID = libEntry[0][0]
self._db.AddToFileNameTable(origStringSearch, showInfo.showID)
goodlogging.Log.DecreaseIndent()
return showInfo
elif showName is None:
goodlogging.Log.DecreaseIndent()
return None
else:
goodlogging.Log.DecreaseIndent()
return self._GetShowID(showName, origStringSearch)
else:
goodlogging.Log.Info("RENAMER", "Match found: show ID = {0}".format(showInfo.showID))
if origStringSearch != stringSearch:
self._db.AddToFileNameTable(origStringSearch, showInfo.showID)
goodlogging.Log.DecreaseIndent()
return showInfo | 0.011212 |
def describe_function(FunctionName, region=None, key=None,
keyid=None, profile=None):
'''
Given a function name describe its properties.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_function myfunction
'''
try:
func = _find_function(FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
if func:
keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
'CodeSize', 'Description', 'Timeout', 'MemorySize',
'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
return {'function': dict([(k, func.get(k)) for k in keys])}
else:
return {'function': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | 0.002144 |
def get_version(file, name='__version__'):
"""Get the version of the package from the given file by
executing it and extracting the given `name`.
"""
path = os.path.realpath(file)
version_ns = {}
with io.open(path, encoding="utf8") as f:
exec(f.read(), {}, version_ns)
return version_ns[name] | 0.003049 |
def replace_cr_with_newline(message: str):
"""
TQDM and requests use carriage returns to get the training line to update for each batch
without adding more lines to the terminal output. Displaying those in a file won't work
correctly, so we'll just make sure that each batch shows up on its one line.
:param message: the message to permute
:return: the message with carriage returns replaced with newlines
"""
if '\r' in message:
message = message.replace('\r', '')
if not message or message[-1] != '\n':
message += '\n'
return message | 0.006667 |
def nailgunned_stdio(cls, sock, env, handle_stdin=True):
"""Redirects stdio to the connected socket speaking the nailgun protocol."""
# Determine output tty capabilities from the environment.
stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(env)
is_tty_capable = all((stdin_isatty, stdout_isatty, stderr_isatty))
if is_tty_capable:
with cls._tty_stdio(env) as finalizer:
yield finalizer
else:
with cls._pipe_stdio(
sock,
stdin_isatty,
stdout_isatty,
stderr_isatty,
handle_stdin
) as finalizer:
yield finalizer | 0.007862 |
def get(self, key):
'''Return timings for `key`. Returns 0 if not present.'''
if key in self and len(self[key]) > 0:
return min(self[key])
else:
return 0 | 0.00995 |
def query(self, model, **kwargs):
'''Create a new :class:`Query` for *model*.'''
sm = self.model(model)
query_class = sm.manager.query_class or Query
return query_class(sm._meta, self, **kwargs) | 0.008696 |
def include(url_prefix_or_module_name: str,
module_name: Optional[str] = None,
*,
attr: str = 'routes',
exclude: Optional[Endpoints] = None,
only: Optional[Endpoints] = None,
) -> RouteGenerator:
"""
Include the routes from another module at that point in the tree. For example::
# project-root/bundles/primes/routes.py
routes = lambda: [
controller('/two', TwoController),
controller('/three', ThreeController),
controller('/five', FiveController),
]
# project-root/bundles/blog/routes.py
routes = lambda: [
func('/', index),
controller('/authors', AuthorController),
controller('/posts', PostController),
]
# project-root/your_app_bundle/routes.py
routes = lambda: [
include('some_bundle.routes'),
# these last two are equivalent
include('/blog', 'bundles.blog.routes'),
prefix('/blog', [
include('bundles.blog.routes'),
]),
]
:param url_prefix_or_module_name: The module name, or a url prefix for all
of the included routes in the module name
passed as the second argument.
:param module_name: The module name of the routes to include if a url prefix
was given as the first argument.
:param attr: The attribute name in the module, if different from ``routes``.
:param exclude: An optional list of endpoints to exclude.
:param only: An optional list of endpoints to only include.
"""
url_prefix = None
if module_name is None:
module_name = url_prefix_or_module_name
else:
url_prefix = url_prefix_or_module_name
module = importlib.import_module(module_name)
try:
routes = getattr(module, attr)()
except AttributeError:
raise AttributeError(f'Could not find a variable named `{attr}` '
f'in the {module_name} module!')
routes = _reduce_routes(routes, exclude=exclude, only=only)
if url_prefix:
yield from prefix(url_prefix, routes)
else:
yield from routes | 0.001738 |
def run_single_eval(nlp, loading_time, print_name, text_path, gold_ud, tmp_output_path, out_file, print_header,
check_parse, print_freq_tasks):
"""" Run an evaluation of a model nlp on a certain specified treebank """
with text_path.open(mode='r', encoding='utf-8') as f:
flat_text = f.read()
# STEP 1: tokenize text
tokenization_start = time.time()
texts = split_text(flat_text)
docs = list(nlp.pipe(texts))
tokenization_end = time.time()
tokenization_time = tokenization_end - tokenization_start
# STEP 2: record stats and timings
tokens_per_s = int(len(gold_ud.tokens) / tokenization_time)
print_header_1 = ['date', 'text_path', 'gold_tokens', 'model', 'loading_time', 'tokenization_time', 'tokens_per_s']
print_string_1 = [str(datetime.date.today()), text_path.name, len(gold_ud.tokens),
print_name, "%.2f" % loading_time, "%.2f" % tokenization_time, tokens_per_s]
# STEP 3: evaluate predicted tokens and features
with tmp_output_path.open(mode="w", encoding="utf8") as tmp_out_file:
write_conllu(docs, tmp_out_file)
with tmp_output_path.open(mode="r", encoding="utf8") as sys_file:
sys_ud = conll17_ud_eval.load_conllu(sys_file, check_parse=check_parse)
tmp_output_path.unlink()
scores = conll17_ud_eval.evaluate(gold_ud, sys_ud, check_parse=check_parse)
# STEP 4: format the scoring results
eval_headers = EVAL_PARSE
if not check_parse:
eval_headers = EVAL_NO_PARSE
for score_name in eval_headers:
score = scores[score_name]
print_string_1.extend(["%.2f" % score.precision,
"%.2f" % score.recall,
"%.2f" % score.f1])
print_string_1.append("-" if score.aligned_accuracy is None else "%.2f" % score.aligned_accuracy)
print_string_1.append("-" if score.undersegmented is None else "%.4f" % score.under_perc)
print_string_1.append("-" if score.oversegmented is None else "%.4f" % score.over_perc)
print_header_1.extend([score_name + '_p', score_name + '_r', score_name + '_F', score_name + '_acc',
score_name + '_under', score_name + '_over'])
if score_name in print_freq_tasks:
print_header_1.extend([score_name + '_word_under_ex', score_name + '_shape_under_ex',
score_name + '_word_over_ex', score_name + '_shape_over_ex'])
d_under_words = get_freq_tuples(score.undersegmented, PRINT_TOTAL)
d_under_shapes = get_freq_tuples([word_shape(x) for x in score.undersegmented], PRINT_TOTAL)
d_over_words = get_freq_tuples(score.oversegmented, PRINT_TOTAL)
d_over_shapes = get_freq_tuples([word_shape(x) for x in score.oversegmented], PRINT_TOTAL)
# saving to CSV with ; seperator so blinding ; in the example output
print_string_1.append(
str({k: v for k, v in d_under_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))
print_string_1.append(
str({k: v for k, v in d_under_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))
print_string_1.append(
str({k: v for k, v in d_over_words if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))
print_string_1.append(
str({k: v for k, v in d_over_shapes if v > PRINT_FREQ}).replace(";", "*SEMICOLON*"))
# STEP 5: print the formatted results to CSV
if print_header:
out_file.write(';'.join(map(str, print_header_1)) + '\n')
out_file.write(';'.join(map(str, print_string_1)) + '\n') | 0.004891 |
def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
) | 0.003788 |
def get_all_devices_in_portal(self):
"""
This loops through the get_multiple_devices method 10 rids at a time.
"""
rids = self.get_portal_by_name(
self.portal_name()
)[2][1]['info']['aliases']
# print("RIDS: {0}".format(rids))
device_rids = [ rid.strip() for rid in rids ]
blocks_of_ten = [ device_rids[x:x+10] for x in range(0, len(device_rids), 10) ]
devices = []
for block_of_ten in blocks_of_ten:
retval = self.get_multiple_devices(block_of_ten)
if retval is not None:
devices.extend( retval )
else:
print("Not adding to device list: {!r}".format(retval))
# Parse 'meta' key's raw string values for each device
for device in devices:
dictify_device_meta(device)
return devices | 0.011062 |
def key(self):
"""Embedded supports curies."""
if self.curie is None:
return self.name
return ":".join((self.curie.name, self.name)) | 0.011905 |
def rename_notes_folder(self, title, folderid):
"""Rename a folder
:param title: New title of the folder
:param folderid: The UUID of the folder to rename
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders/rename/{}'.format(folderid), post_data={
'title' : title
})
return response | 0.009074 |
def list_device_data_sources(self, device_rid):
"""
List data sources of a portal device with rid 'device_rid'.
http://docs.exosite.com/portals/#list-device-data-source
"""
headers = {
'User-Agent': self.user_agent(),
}
headers.update(self.headers())
r = requests.get( self.portals_url()+'/devices/'+device_rid+'/data-sources',
headers=headers, auth=self.auth())
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
return None | 0.008621 |
def validate_week(year, week):
"""Validate week."""
max_week = datetime.strptime("{}-{}-{}".format(12, 31, year), "%m-%d-%Y").isocalendar()[1]
if max_week == 1:
max_week = 53
return 1 <= week <= max_week | 0.012097 |
def task_remove_user(self, *args, **kwargs):
"""Remove the selected user from the task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_task:
return
i = self.task_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
self.cur_task.users.remove(user)
item.set_parent(None) | 0.004525 |
def distance_between(self, string, start, end):
"""Returns number of lines between start and end"""
count = 0
started = False
for line in string.split("\n"):
if self.scan_line(line, start) and not started:
started = True
if self.scan_line(line, end):
return count
if started:
count += 1
return count | 0.004673 |
def update(self, step, T, E, acceptance, improvement):
"""Print progress."""
if acceptance is None:
acceptance = 0
if improvement is None:
improvement = 0
if step > 0:
elapsed = time.time() - self.start
remain = (self.steps - step) * (elapsed / step)
# print('Time {} ({} Remaing)'.format(time_string(elapsed), time_string(remain)))
else:
elapsed = 0
remain = 0
curr = self.cmd(self.state)
curr_score = float(E)
best = self.cmd(self.best_state)
best_score = self.best_energy
report = progress_report(
curr,
best,
curr_score,
best_score,
step,
self.steps,
acceptance * 100,
improvement * 100,
time_string(elapsed),
time_string(remain),
)
print(report)
if fig:
imgs[1].set_data(
reshape_as_image(self.apply_color(self.src.copy(), self.state))
)
imgs[2].set_data(
reshape_as_image(self.apply_color(self.src.copy(), self.best_state))
)
if txt:
txt.set_text(report)
fig.canvas.draw() | 0.003056 |
def create_vm_image(self, vm_image):
'''
Creates a VM Image in the image repository that is associated with the
specified subscription using a specified set of virtual hard disks.
vm_image:
An instance of VMImage class.
vm_image.name: Required. Specifies the name of the image.
vm_image.label: Required. Specifies an identifier for the image.
vm_image.description: Optional. Specifies the description of the image.
vm_image.os_disk_configuration:
Required. Specifies configuration information for the operating
system disk that is associated with the image.
vm_image.os_disk_configuration.host_caching:
Optional. Specifies the caching behavior of the operating system disk.
Possible values are: None, ReadOnly, ReadWrite
vm_image.os_disk_configuration.os_state:
Required. Specifies the state of the operating system in the image.
Possible values are: Generalized, Specialized
A Virtual Machine that is fully configured and running contains a
Specialized operating system. A Virtual Machine on which the
Sysprep command has been run with the generalize option contains a
Generalized operating system.
vm_image.os_disk_configuration.os:
Required. Specifies the operating system type of the image.
vm_image.os_disk_configuration.media_link:
Required. Specifies the location of the blob in Windows Azure
storage. The blob location belongs to a storage account in the
subscription specified by the <subscription-id> value in the
operation call.
vm_image.data_disk_configurations:
Optional. Specifies configuration information for the data disks
that are associated with the image. A VM Image might not have data
disks associated with it.
vm_image.data_disk_configurations[].host_caching:
Optional. Specifies the caching behavior of the data disk.
Possible values are: None, ReadOnly, ReadWrite
vm_image.data_disk_configurations[].lun:
Optional if the lun for the disk is 0. Specifies the Logical Unit
Number (LUN) for the data disk.
vm_image.data_disk_configurations[].media_link:
Required. Specifies the location of the blob in Windows Azure
storage. The blob location belongs to a storage account in the
subscription specified by the <subscription-id> value in the
operation call.
vm_image.data_disk_configurations[].logical_size_in_gb:
Required. Specifies the size, in GB, of the data disk.
vm_image.language: Optional. Specifies the language of the image.
vm_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
vm_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
vm_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
vm_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
vm_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
vm_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
vm_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
vm_image.show_in_gui:
Optional. Indicates whether the VM Images should be listed in the
portal.
'''
_validate_not_none('vm_image', vm_image)
_validate_not_none('vm_image.name', vm_image.name)
_validate_not_none('vm_image.label', vm_image.label)
_validate_not_none('vm_image.os_disk_configuration.os_state',
vm_image.os_disk_configuration.os_state)
_validate_not_none('vm_image.os_disk_configuration.os',
vm_image.os_disk_configuration.os)
_validate_not_none('vm_image.os_disk_configuration.media_link',
vm_image.os_disk_configuration.media_link)
return self._perform_post(
self._get_vm_image_path(),
_XmlSerializer.create_vm_image_to_xml(vm_image),
as_async=True) | 0.001445 |
async def close_interface(self, conn_id, interface):
"""Close an interface on this IOTile device.
See :meth:`AbstractDeviceAdapter.close_interface`.
"""
adapter_id = self._get_property(conn_id, 'adapter')
await self.adapters[adapter_id].close_interface(conn_id, interface) | 0.006369 |
def collect(self):
"""
Collector GPU stats
"""
stats_config = self.config['stats']
if USE_PYTHON_BINDING:
collect_metrics = self.collect_via_pynvml
else:
collect_metrics = self.collect_via_nvidia_smi
collect_metrics(stats_config) | 0.006452 |
def _get_key_value(self, key, is_hll=False):
'''
Returns the proper key value for the stats
@param key: the redis key
@param is_hll: the key is a HyperLogLog, else is a sorted set
'''
if is_hll:
# get hll value
return self.redis_conn.execute_command("PFCOUNT", key)
else:
# get zcard value
return self.redis_conn.zcard(key) | 0.004673 |
def plotTimeline(dataTask, filename):
"""Build a timeline"""
fig = plt.figure()
ax = fig.gca()
worker_names = [x for x in dataTask.keys() if "broker" not in x]
min_time = getMinimumTime(dataTask)
ystep = 1. / (len(worker_names) + 1)
y = 0
for worker, vals in dataTask.items():
if "broker" in worker:
continue
y += ystep
if hasattr(vals, 'values'):
for future in vals.values():
start_time = [future['start_time'][0] - min_time]
end_time = [future['end_time'][0] - min_time]
timelines(ax, y, start_time, end_time)
#ax.xaxis_date()
#myFmt = DateFormatter('%H:%M:%S')
#ax.xaxis.set_major_formatter(myFmt)
#ax.xaxis.set_major_locator(SecondLocator(0, interval=20))
#delta = (stop.max() - start.min())/10
ax.set_yticks(np.arange(ystep, 1, ystep))
ax.set_yticklabels(worker_names)
ax.set_ylim(0, 1)
#fig.xlim()
ax.set_xlabel('Time')
fig.savefig(filename) | 0.006829 |
def server_bind(self):
"""
Called by constructor to bind the socket.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address) | 0.007752 |
def call_from_executor(self, callback, _max_postpone_until=None):
"""
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
:param _max_postpone_until: `None` or `time.time` value. For interal
use. If the eventloop is saturated, consider this task to be low
priority and postpone maximum until this timestamp. (For instance,
repaint is done using low priority.)
"""
assert _max_postpone_until is None or isinstance(_max_postpone_until, float)
self._calls_from_executor.append((callback, _max_postpone_until))
if self._schedule_pipe:
try:
os.write(self._schedule_pipe[1], b'x')
except (AttributeError, IndexError, OSError):
# Handle race condition. We're in a different thread.
# - `_schedule_pipe` could have become None in the meantime.
# - We catch `OSError` (actually BrokenPipeError), because the
# main thread could have closed the pipe already.
pass | 0.002705 |
def set_vhost_permissions(self, vname, username, config, rd, wr):
"""
Set permissions for a given username on a given vhost. Both
must already exist.
:param string vname: Name of the vhost to set perms on.
:param string username: User to set permissions for.
:param string config: Permission pattern for configuration operations
for this user in this vhost.
:param string rd: Permission pattern for read operations for this user
in this vhost
:param string wr: Permission pattern for write operations for this user
in this vhost.
Permission patterns are regex strings. If you're unfamiliar with this,
you should definitely check out this section of the RabbitMQ docs:
http://www.rabbitmq.com/admin-guide.html#access-control
"""
vname = quote(vname, '')
body = json.dumps({"configure": config, "read": rd, "write": wr})
path = Client.urls['vhost_permissions'] % (vname, username)
return self._call(path, 'PUT', body,
headers=Client.json_headers) | 0.002622 |
def validate(self):
"""
Error check the attributes of the ActivateRequestPayload object.
"""
if self.unique_identifier is not None:
if not isinstance(self.unique_identifier,
attributes.UniqueIdentifier):
msg = "invalid unique identifier"
raise TypeError(msg)
if self.compromise_occurrence_date is not None:
if not isinstance(self.compromise_occurrence_date,
primitives.DateTime):
msg = "invalid compromise time"
raise TypeError(msg)
if not isinstance(self.revocation_reason, objects.RevocationReason):
msg = "invalid revocation reason"
raise TypeError(msg) | 0.002577 |
def _filter_kwargs(self, keep_list, **kwargs):
''' Filters the dict of *kwargs*, keeping only arguments
whose keys are in *keep_list* and discarding all other
arguments.
Based on the filtring, constructs and returns a new
dict.
'''
new_kwargs = {}
for argName, argVal in kwargs.items():
if argName.lower() in keep_list:
new_kwargs[argName.lower()] = argVal
return new_kwargs | 0.009901 |
def drawBernoulli(N,p=0.5,seed=0):
'''
Generates arrays of booleans drawn from a simple Bernoulli distribution.
The input p can be a float or a list-like of floats; its length T determines
the number of entries in the output. The t-th entry of the output is an
array of N booleans which are True with probability p[t] and False otherwise.
Arguments
---------
N : int
Number of draws in each row.
p : float or [float]
Probability or probabilities of the event occurring (True).
seed : int
Seed for random number generator.
Returns
-------
draws : np.array or [np.array]
T-length list of arrays of Bernoulli draws each of size N, or a single
array of size N (if sigma is a scalar).
'''
# Set up the RNG
RNG = np.random.RandomState(seed)
if isinstance(p,float):# Return a single array of size N
draws = RNG.uniform(size=N) < p
else: # Set up empty list to populate, then loop and populate list with draws:
draws=[]
for t in range(len(p)):
draws.append(RNG.uniform(size=N) < p[t])
return draws | 0.008734 |
def _parse_player_position(self, player_info):
"""
Parse the player's position.
The player's position isn't contained within a unique tag and the
player's meta information should be iterated through until 'Position'
is found as it contains the desired text.
Parameters
----------
player_info : PyQuery object
A PyQuery object of the player's information on the HTML stats
page.
"""
for section in player_info('div#meta p').items():
if 'Position' in str(section):
position = section.text().replace('Position: ', '')
setattr(self, '_position', position)
break | 0.002766 |
def is_lazy(self, k):
'''
lmap.is_lazy(k) yields True if the given k is lazy and unmemoized in the given lazy map,
lmap, otherwise False.
'''
v = ps.PMap.__getitem__(self, k)
if not isinstance(v, (types.FunctionType, partial)) or \
id(v) in self._memoized or \
[] != getargspec_py27like(v)[0]:
return False
else:
return True | 0.007042 |
def _auth_with_refresh_token(session, refresh_token):
"""Authenticate using OAuth refresh token.
Raises GoogleAuthError if authentication fails.
Returns access token string.
"""
# Make a token request.
token_request_data = {
'client_id': OAUTH2_CLIENT_ID,
'client_secret': OAUTH2_CLIENT_SECRET,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
res = _make_token_request(session, token_request_data)
return res['access_token'] | 0.001957 |
def collect_manifest_dependencies(manifest_data, lockfile_data):
"""Convert the manifest format to the dependencies schema"""
output = {}
for dependencyName, dependencyConstraint in manifest_data.items():
output[dependencyName] = {
# identifies where this dependency is installed from
'source': 'example-package-manager',
# the constraint that the user is using (i.e. "> 1.0.0")
'constraint': dependencyConstraint,
# all available versions above and outside of their constraint
# - usually you would need to use the package manager lib or API
# to get this information (we just fake it here)
'available': [
{'name': '2.0.0'},
],
}
return output | 0.001244 |
def _addModuleInfo(self, moduleInfo):
""" Adds a line with module info to the editor
:param moduleInfo: can either be a string or a module info class.
In the first case, an object is instantiated as ImportedModuleInfo(moduleInfo).
"""
if is_a_string(moduleInfo):
moduleInfo = mi.ImportedModuleInfo(moduleInfo)
line = "{:15s}: {}".format(moduleInfo.name, moduleInfo.verboseVersion)
self.editor.appendPlainText(line)
QtWidgets.QApplication.instance().processEvents() | 0.005425 |
def request_ride(
self,
ride_type=None,
start_latitude=None,
start_longitude=None,
start_address=None,
end_latitude=None,
end_longitude=None,
end_address=None,
primetime_confirmation_token=None,
):
"""Request a ride on behalf of an Lyft user.
Parameters
ride_type (str)
Name of the type of ride you're requesting.
E.g., lyft, lyft_plus
start_latitude (float)
Latitude component of a start location.
start_longitude (float)
Longitude component of a start location.
start_address (str)
Optional pickup address.
end_latitude (float)
Optional latitude component of a end location.
Destination would be NULL in this case.
end_longitude (float)
Optional longitude component of a end location.
Destination would be NULL in this case.
end_address (str)
Optional destination address.
primetime_confirmation_token (str)
Optional string containing the Prime Time confirmation token
to book rides having Prime Time Pricing.
Returns
(Response)
A Response object containing the ride request ID and other
details about the requested ride..
"""
args = {
'ride_type': ride_type,
'origin': {
'lat': start_latitude,
'lng': start_longitude,
'address': start_address,
},
'destination': {
'lat': end_latitude,
'lng': end_longitude,
'address': end_address,
},
'primetime_confirmation_token': primetime_confirmation_token,
}
return self._api_call('POST', 'v1/rides', args=args) | 0.001511 |
def delete(self, docids):
"""Delete specified documents from the index."""
logger.info("asked to drop %i documents" % len(docids))
for index in [self.opt_index, self.fresh_index]:
if index is not None:
index.delete(docids)
self.flush(save_index=True) | 0.006452 |
def get_critical_original_kink_ratio(self):
"""
Returns a list of molar mixing ratio for each kink between ORIGINAL
(instead of processed) reactant compositions. This is the
same list as mixing ratio obtained from get_kinks method
if self.norm = False.
Returns:
A list of floats representing molar mixing ratios between
the original reactant compositions for each kink.
"""
ratios = []
if self.c1_original == self.c2_original:
return [0, 1]
reaction_kink = [k[3] for k in self.get_kinks()]
for rxt in reaction_kink:
ratios.append(abs(self._get_original_composition_ratio(rxt)))
return ratios | 0.002717 |
def xml_import(self,
filepath=None,
xml_content=None,
markings=None,
identifier_ns_uri=None,
initialize_importer=True,
**kwargs):
"""
Import an OpenIOC indicator xml (root element 'ioc') from file <filepath> or
from a string <xml_content>
You can provide:
- a list of markings with which all generated Information Objects
will be associated (e.g., in order to provide provenance function)
- The uri of a namespace of the identifiers for the generated information objects.
This namespace identifiers the 'owner' of the object. For example, if importing
IOCs published by Mandiant (e.g., as part of the APT1 report), chose an namespace
such as 'mandiant.com' or similar (and be consistent about it, when importing
other stuff published by Mandiant).
The kwargs are not read -- they are present to allow the use of the
DingoImportCommand class for easy definition of commandline import commands
(the class passes all command line arguments to the xml_import function, so
without the **kwargs parameter, an error would occur.
"""
if initialize_importer:
# Clear state in case xml_import is used several times, but keep namespace info
self.__init__()
# Initialize default arguments
# '[]' would be mutable, so we initialize here
if not markings:
markings = []
# Initializing here allows us to also get the default namespace when
# explicitly passing 'None' as parameter.
if identifier_ns_uri:
self.identifier_ns_uri = identifier_ns_uri
# Use the generic XML import customized for OpenIOC import
# to turn XML into DingoObjDicts
import_result = MantisImporter.xml_import(xml_fname=filepath,
xml_content=xml_content,
ns_mapping=self.namespace_dict,
embedded_predicate=self.openioc_embedding_pred,
id_and_revision_extractor=self.id_and_revision_extractor,
transformer=self.transformer,
keep_attrs_in_created_reference=False,
)
# The MANTIS/DINGOS xml importer returns then the following structure::
#
#
# {'id_and_rev_info': Id and revision info of top-level element of the form
# {'id': ... , 'timestamp': ...}
# 'elt_name': Element name of top-level element
# 'dict_repr': Dictionary representation of XML, minus the embedded objects -- for
# those, an 'idref' reference has been generated
# 'embedded_objects': List of embedded objects, as dictionary
# {"id_and_revision_info": id and revision info of extracted object,
# "elt_name": Element name,
# "dict_repr" : dictionary representation of XML of embedded object
# }
# 'unprocessed' : List of unprocessed embedded objects (as libxml2 Node object)
# (e.g. for handover to other importer
# 'file_content': Content of imported file (or, if content was passed instead of a file name,
# the original content)}
id_and_rev_info = import_result['id_and_rev_info']
elt_name = import_result['elt_name']
elt_dict = import_result['dict_repr']
embedded_objects = import_result['embedded_objects']
default_ns = self.namespace_dict.get(elt_dict.get('@@ns',None),'http://schemas.mandiant.com/unknown/ioc')
# Export family information.
family_info_dict = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL,default_ns)
if family_info_dict:
self.iobject_family_name="%s.mandiant.com" % family_info_dict['family']
self.iobject_family_revision_name=family_info_dict['revision']
# Initialize stack with import_results.
# First, the result from the top-level import
pending_stack = deque()
pending_stack.append((id_and_rev_info, elt_name,elt_dict))
# Then the embedded objects
while embedded_objects:
embedded_object = embedded_objects.pop()
id_and_rev_info = embedded_object['id_and_rev_info']
elt_name = embedded_object['elt_name']
elt_dict = embedded_object['dict_repr']
pending_stack.append((id_and_rev_info,elt_name,elt_dict))
if id_and_rev_info['timestamp']:
ts = id_and_rev_info['timestamp']
else:
ts = self.create_timestamp
while pending_stack:
(id_and_rev_info, elt_name, elt_dict) = pending_stack.pop()
# Call the importer that turns DingoObjDicts into Information Objects in the database
iobject_type_name = elt_name
iobject_type_namespace_uri = self.namespace_dict.get(elt_dict.get('@@ns',None),DINGOS_GENERIC_FAMILY_NAME)
MantisImporter.create_iobject(iobject_family_name = self.iobject_family_name,
iobject_family_revision_name= self.iobject_family_revision_name,
iobject_type_name=iobject_type_name,
iobject_type_namespace_uri=iobject_type_namespace_uri,
iobject_type_revision_name= '',
iobject_data=elt_dict,
uid=id_and_rev_info['id'],
identifier_ns_uri= self.identifier_ns_uri,
timestamp = ts,
create_timestamp = self.create_timestamp,
markings=markings,
config_hooks = {'special_ft_handler' : self.fact_handler_list(),
'datatype_extractor' : self.datatype_extractor,
'attr_ignore_predicate' : self.attr_ignore_predicate},
namespace_dict=self.namespace_dict,
) | 0.010274 |
def service_available(service_name):
"""Determine whether a system service is available"""
try:
subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e:
return b'unrecognized service' not in e.output
else:
return True | 0.002695 |
def save_yamlf(data: Union[list, dict], fpath: str, encoding: str) -> str:
"""
:param data: list | dict data
:param fpath: write path
:param encoding: encoding
:rtype: written path
"""
with codecs.open(fpath, mode='w', encoding=encoding) as f:
f.write(dump_yaml(data))
return fpath | 0.003077 |
def get_formset(self):
"""Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back.
"""
if self.folder:
queryset = self.folder.files.all()
else:
queryset = File.objects.none()
if self._formset is None:
self._formset = self.formset_class(
self.request.POST or None,
initial=self._get_formset_data(),
prefix=self._meta.name,
queryset=queryset)
return self._formset | 0.003431 |
def _app(self):
"""The application object to work with; this is either the app
that we have been bound to, or the current application.
"""
if self.app is not None:
return self.app
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app
try:
from flask import _app_ctx_stack
app_ctx = _app_ctx_stack.top
if app_ctx is not None:
return app_ctx.app
except ImportError:
pass
raise RuntimeError('assets instance not bound to an application, '+
'and no application in current context') | 0.0059 |
def list_extmods():
'''
.. versionadded:: 2017.7.0
List Salt modules which have been synced externally
CLI Examples:
.. code-block:: bash
salt '*' saltutil.list_extmods
'''
ret = {}
ext_dir = os.path.join(__opts__['cachedir'], 'extmods')
mod_types = os.listdir(ext_dir)
for mod_type in mod_types:
ret[mod_type] = set()
for _, _, files in salt.utils.path.os_walk(os.path.join(ext_dir, mod_type)):
for fh_ in files:
ret[mod_type].add(fh_.split('.')[0])
ret[mod_type] = list(ret[mod_type])
return ret | 0.003306 |
def ativar_sat(self, tipo_certificado, cnpj, codigo_uf):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.ativar_sat`.
:return: Uma resposta SAT especilizada em ``AtivarSAT``.
:rtype: satcfe.resposta.ativarsat.RespostaAtivarSAT
"""
retorno = super(ClienteSATLocal, self).ativar_sat(
tipo_certificado, cnpj, codigo_uf)
return RespostaAtivarSAT.analisar(retorno) | 0.004773 |
def setGroups(self, *args, **kwargs):
"""Adds the groups assigned to this user to a 'groups' field.
Returns the number of requests done to Mambu.
"""
try:
groups = self.mambugroupsclass(creditOfficerUsername=self['username'], *args, **kwargs)
except AttributeError as ae:
from .mambugroup import MambuGroups
self.mambugroupsclass = MambuGroups
groups = self.mambugroupsclass(creditOfficerUsername=self['username'], *args, **kwargs)
self['groups'] = groups
return 1 | 0.007018 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.