text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def by_geopoint(self, lat, long, radius, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a geopoint and radius tuple.
Args:
lat - geopoint latitude
long - geopoint longitude
radius - search radius (in miles)
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
lat = lat,
long = long,
radius = radius,
term = None,
num_biz_requested = None
)
return json.loads(content) | 0.028451 |
def commit_or_abort(self, ctx, timeout=None, metadata=None,
credentials=None):
"""Runs commit or abort operation."""
return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata,
credentials=credentials) | 0.010274 |
def regular_fragments(self):
"""
Iterates through the regular fragments in the list
(which are sorted).
:rtype: generator of (int, :class:`~aeneas.syncmap.SyncMapFragment`)
"""
for i, fragment in enumerate(self.__fragments):
if fragment.fragment_type == SyncMapFragment.REGULAR:
yield (i, fragment) | 0.005333 |
def human(self, size, base=1000, units=' kMGTZ'):
"""Convert the input ``size`` to human readable, short form."""
sign = '+' if size >= 0 else '-'
size = abs(size)
if size < 1000:
return '%s%d' % (sign, size)
for i, suffix in enumerate(units):
unit = 1000 ** (i + 1)
if size < unit:
return ('%s%.01f%s' % (
sign,
size / float(unit) * base,
suffix,
)).strip()
raise OverflowError | 0.00361 |
def service_command(name, command):
"""Run an init.d/upstart command."""
service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE',
u'/etc/init.d/%(name)s %(command)s')
sudo(service_command_template % {'name': name,
'command': command}, pty=False) | 0.002841 |
def whoami(self):
"""
Return a Deferred which fires with a 2-tuple of (dotted quad ip, port
number).
"""
def cbWhoAmI(result):
return result['address']
return self.callRemote(WhoAmI).addCallback(cbWhoAmI) | 0.007576 |
def get_config(ini_path=None, rootdir=None):
""" Load configuration from INI.
:return Namespace:
"""
config = Namespace()
config.default_section = 'pylama'
if not ini_path:
path = get_default_config_file(rootdir)
if path:
config.read(path)
else:
config.read(ini_path)
return config | 0.002833 |
def mark_alert_as_read(self, alert_id):
"""
Mark an alert as read.
:param alert_id: The ID of the alert to mark as read.
:return:
"""
req = self.url + "/api/alert/{}/markAsRead".format(alert_id)
try:
return requests.post(req, headers={'Content-Type': 'application/json'}, proxies=self.proxies, auth=self.auth, verify=self.cert)
except requests.exceptions.RequestException:
raise AlertException("Mark alert as read error: {}".format(e)) | 0.005725 |
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api['id'])['item']:
self.remove_log_group('API-Gateway-Execution-Logs_{}/{}'.format(rest_api['id'], stage['stageName'])) | 0.009804 |
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link, self.gallery_dir)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link | 0.000954 |
def list_folder(self, folder_id=None):
"""Request a list of files and folders in specified folder.
Note:
if folder_id is not provided, ``Home`` folder will be listed
Args:
folder_id (:obj:`str`, optional): id of the folder to be listed.
Returns:
dict: dictionary containing only two keys ("folders", "files"), \
each key represents a list of dictionaries. ::
{
"folders": [
{
"id": "5144",
"name": ".videothumb"
},
{
"id": "5792",
"name": ".subtitles"
},
...
],
"files": [
{
"name": "big_buck_bunny.mp4.mp4",
"sha1": "c6531f5ce9669d6547023d92aea4805b7c45d133",
"folderid": "4258",
"upload_at": "1419791256",
"status": "active",
"size": "5114011",
"content_type": "video/mp4",
"download_count": "48",
"cstatus": "ok",
"link": "https://openload.co/f/UPPjeAk--30/big_buck_bunny.mp4.mp4",
"linkextid": "UPPjeAk--30"
},
...
]
}
"""
params = {'folder': folder_id} if folder_id else {}
return self._get('file/listfolder', params=params) | 0.001655 |
def set_nodes_aggregation_flag(self, peak_current_branch_max):
""" Set Load Areas with too high demand to aggregated type.
Args
----
peak_current_branch_max: float
Max. allowed current for line/cable
"""
for lv_load_area in self.grid_district.lv_load_areas():
peak_current_node = (lv_load_area.peak_load / (3**0.5) / self.v_level) # units: kVA / kV = A
if peak_current_node > peak_current_branch_max:
lv_load_area.is_aggregated = True
# add peak demand for all Load Areas of aggregation type
self.grid_district.add_aggregated_peak_demand() | 0.004545 |
def _send_to_address(self, address, data, timeout=10):
"""send data to *address* and *port* without verification of response.
"""
# Socket to talk to server
socket = get_context().socket(REQ)
try:
socket.setsockopt(LINGER, timeout * 1000)
if address.find(":") == -1:
socket.connect("tcp://%s:%d" % (address, self.default_port))
else:
socket.connect("tcp://%s" % address)
socket.send_string(data)
message = socket.recv_string()
if message != "ok":
LOGGER.warn("invalid acknowledge received: %s" % message)
finally:
socket.close() | 0.002821 |
def _normal_model(self, beta):
""" Creates the structure of the model (model matrices, etc) for
a Normal family ARIMAX model.
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for the latent variables
Returns
----------
mu : np.ndarray
Contains the predicted values (location) for the time series
Y : np.ndarray
Contains the length-adjusted time series (accounting for lags)
"""
Y = self.y[self.max_lag:]
# Transform latent variables
z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
# Constant and AR terms
if self.ar == 0:
mu = np.transpose(self.ar_matrix)
elif self.ar == 1:
mu = np.transpose(self.ar_matrix)*z[:-self.family_z_no-self.ma-len(self.X_names)][0]
else:
mu = np.matmul(np.transpose(self.ar_matrix),z[:-self.family_z_no-self.ma-len(self.X_names)])
# X terms
mu = mu + np.matmul(self.X[self.integ+self.max_lag:],z[self.ma+self.ar:(self.ma+self.ar+len(self.X_names))])
# MA terms
if self.ma != 0:
mu = arimax_recursion(z, mu, Y, self.max_lag, Y.shape[0], self.ar, self.ma)
return mu, Y | 0.007391 |
def wrap(self, message):
"""
[MS-NLMP] v28.0 2016-07-14
3.4.6 GSS_WrapEx()
Emulates the GSS_Wrap() implementation to sign and seal messages if the correct flags
are set.
@param message: The message data that will be wrapped
@return message: The message that has been sealed if flags are set
@return signature: The signature of the message, None if flags are not set
"""
if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL:
encrypted_message = self._seal_message(message)
signature = self._get_signature(message)
message = encrypted_message
elif self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN:
signature = self._get_signature(message)
else:
signature = None
return message, signature | 0.004566 |
def aes_key(self, data):
"""
AES128 key to program into YubiKey.
Supply data as either a raw string, or a hexlified string prefixed by 'h:'.
The result, after any hex decoding, must be 16 bytes.
"""
old = self.key
if data:
new = self._decode_input_string(data)
if len(new) == 16:
self.key = new
else:
raise yubico_exception.InputError('AES128 key must be exactly 16 bytes')
return old | 0.007722 |
def ledger_effects(self, ledger_id, cursor=None, order='asc', limit=10):
"""This endpoint represents all effects that occurred in the given
ledger.
`GET /ledgers/{id}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-ledger.html>`_
:param int ledger_id: The id of the ledger to look up.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: The effects for a single ledger.
:rtype: dict
"""
endpoint = '/ledgers/{ledger_id}/effects'.format(ledger_id=ledger_id)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params) | 0.00333 |
def community_names(name, communities=None):
'''
Manage the SNMP accepted community names and their permissions.
:param str communities: A dictionary of SNMP communities and permissions.
Example of usage:
.. code-block:: yaml
snmp-community-names:
win_snmp.community_names:
- communities:
TestCommunity: Read Only
OtherCommunity: Read Write
'''
ret = {'name': name,
'changes': dict(),
'comment': six.text_type(),
'result': None}
ret_communities = {'changes': dict(),
'failures': dict()}
if not communities:
communities = dict()
current_communities = __salt__['win_snmp.get_community_names']()
# Note any existing communities that should be removed.
for current_vname in current_communities:
if current_vname not in communities:
ret_communities['changes'][current_vname] = {'old': current_communities[current_vname],
'new': None}
# Note any new communities or existing communities that should be changed.
for vname in communities:
current_vdata = None
if vname in current_communities:
current_vdata = current_communities[vname]
if communities[vname] != current_vdata:
ret_communities['changes'][vname] = {'old': current_vdata,
'new': communities[vname]}
if not ret_communities['changes']:
ret['comment'] = 'Communities already contain the provided values.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Communities will be changed.'
ret['changes'] = ret_communities
return ret
__salt__['win_snmp.set_community_names'](communities=communities)
new_communities = __salt__['win_snmp.get_community_names']()
# Verify that any communities that needed to be removed were removed.
for new_vname in new_communities:
if new_vname not in communities:
ret_communities['failures'][new_vname] = {'old': current_communities[new_vname],
'new': new_communities[new_vname]}
ret_communities['changes'].pop(new_vname, None)
# Verify that any new communities or existing communities that
# needed to be changed were changed.
for vname in communities:
new_vdata = None
if vname in new_communities:
new_vdata = new_communities[vname]
if communities[vname] != new_vdata:
ret_communities['failures'][vname] = {'old': current_communities[vname],
'new': new_vdata}
ret_communities['changes'].pop(vname, None)
if ret_communities['failures']:
ret['comment'] = 'Some communities failed to change.'
ret['changes'] = ret_communities
ret['result'] = False
else:
ret['comment'] = 'Set communities to contain the provided values.'
ret['changes'] = ret_communities['changes']
ret['result'] = True
return ret | 0.001552 |
def _api_scrape(json_inp, ndx):
"""
Internal method to streamline the getting of data from the json
Args:
json_inp (json): json input from our caller
ndx (int): index where the data is located in the api
Returns:
If pandas is present:
DataFrame (pandas.DataFrame): data set from ndx within the
API's json
else:
A dictionary of both headers and values from the page
"""
try:
headers = json_inp['resultSets'][ndx]['headers']
values = json_inp['resultSets'][ndx]['rowSet']
except KeyError:
# This is so ugly but this is what you get when your data comes out
# in not a standard format
try:
headers = json_inp['resultSet'][ndx]['headers']
values = json_inp['resultSet'][ndx]['rowSet']
except KeyError:
# Added for results that only include one set (ex. LeagueLeaders)
headers = json_inp['resultSet']['headers']
values = json_inp['resultSet']['rowSet']
if HAS_PANDAS:
return DataFrame(values, columns=headers)
else:
# Taken from www.github.com/bradleyfay/py-goldsberry
return [dict(zip(headers, value)) for value in values] | 0.000796 |
def resolve_redirects_if_needed(self, uri):
"""
substitute with final uri after 303 redirects (if it's a www location!)
:param uri:
:return:
"""
if type(uri) == type("string") or type(uri) == type(u"unicode"):
if uri.startswith("www."): # support for lazy people
uri = "http://%s" % str(uri)
if uri.startswith("http://"):
# headers = "Accept: application/rdf+xml" # old way
headers = {'Accept': "application/rdf+xml"}
req = urllib2.Request(uri, headers=headers)
res = urllib2.urlopen(req)
uri = res.geturl()
else:
raise Exception("A URI must be in string format.")
return uri | 0.003866 |
def build_engine_session(connection: str,
echo: bool = False,
autoflush: Optional[bool] = None,
autocommit: Optional[bool] = None,
expire_on_commit: Optional[bool] = None,
scopefunc=None) -> Tuple:
"""Build an engine and a session.
:param connection: An RFC-1738 database connection string
:param echo: Turn on echoing SQL
:param autoflush: Defaults to True if not specified in kwargs or configuration.
:param autocommit: Defaults to False if not specified in kwargs or configuration.
:param expire_on_commit: Defaults to False if not specified in kwargs or configuration.
:param scopefunc: Scoped function to pass to :func:`sqlalchemy.orm.scoped_session`
:rtype: tuple[Engine,Session]
From the Flask-SQLAlchemy documentation:
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function. If it's not provided, Flask's app
context stack identity is used. This will ensure that sessions are
created and removed with the request/response cycle, and should be fine
in most cases.
"""
if connection is None:
raise ValueError('can not build engine when connection is None')
engine = create_engine(connection, echo=echo)
if autoflush is None:
autoflush = config.get('PYBEL_MANAGER_AUTOFLUSH', False)
if autocommit is None:
autocommit = config.get('PYBEL_MANAGER_AUTOCOMMIT', False)
if expire_on_commit is None:
expire_on_commit = config.get('PYBEL_MANAGER_AUTOEXPIRE', True)
log.debug('auto flush: %s, auto commit: %s, expire on commmit: %s', autoflush, autocommit, expire_on_commit)
#: A SQLAlchemy session maker
session_maker = sessionmaker(
bind=engine,
autoflush=autoflush,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
)
#: A SQLAlchemy session object
session = scoped_session(
session_maker,
scopefunc=scopefunc,
)
return engine, session | 0.002836 |
def things_near(self, location, radius=None):
"Return all things within radius of location."
if radius is None: radius = self.perceptible_distance
radius2 = radius * radius
return [thing for thing in self.things
if distance2(location, thing.location) <= radius2] | 0.009677 |
def get_label(self, name):
"""Find the label by name."""
label_tag = self._find_label(name)
return _Label(label_tag.get('id'), label_tag.get('color'), label_tag.text) | 0.015789 |
def sendIq(self, entity):
"""
:type entity: IqProtocolEntity
"""
if entity.getType() == IqProtocolEntity.TYPE_SET and entity.getXmlns() == "w:m":
#media upload!
self._sendIq(entity, self.onRequestUploadSuccess, self.onRequestUploadError) | 0.017065 |
def percentage_of_reoccurring_datapoints_to_all_datapoints(x):
"""
Returns the percentage of unique values, that are present in the time series
more than once.
len(different values occurring more than once) / len(different values)
This means the percentage is normalized to the number of unique values,
in contrast to the percentage_of_reoccurring_values_to_all_values.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if len(x) == 0:
return np.nan
unique, counts = np.unique(x, return_counts=True)
if counts.shape[0] == 0:
return 0
return np.sum(counts > 1) / float(counts.shape[0]) | 0.002646 |
def start(st_reg_number):
"""Checks the number valiaty for the Paraiba state"""
#st_reg_number = str(st_reg_number)
weights = [9, 8, 7, 6, 5, 4, 3, 2]
digit_state_registration = st_reg_number[-1]
if len(st_reg_number) != 9:
return False
sum_total = 0
for i in range(0, 8):
sum_total = sum_total + weights[i] * int(st_reg_number[i])
if sum_total % 11 == 0:
return digit_state_registration[-1] == '0'
digit_check = 11 - sum_total % 11
return str(digit_check) == digit_state_registration | 0.00361 |
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_sqlatable(table):
return db.session.query(SqlaTable).join(Database).filter(
SqlaTable.table_name == table.table_name,
SqlaTable.schema == table.schema,
Database.id == table.database_id,
).first()
def lookup_database(table):
return db.session.query(Database).filter_by(
database_name=table.params_dict['database_name']).one()
return import_datasource.import_datasource(
db.session, i_datasource, lookup_database, lookup_sqlatable,
import_time) | 0.002103 |
def _refresh_html_home(self):
"""
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
"""
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status() | 0.003802 |
def track_event(self, name: str, properties: Dict[str, object] = None,
measurements: Dict[str, object] = None) -> None:
"""
Send information about a single event that has occurred in the context of the application.
:param name: the data to associate to this event.
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
pass | 0.013746 |
def ref_file_from_bam(bam_file, data):
"""Subset a fasta input file to only a fraction of input contigs.
"""
new_ref = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "inputs", "ref")),
"%s-subset.fa" % dd.get_genome_build(data))
if not utils.file_exists(new_ref):
with file_transaction(data, new_ref) as tx_out_file:
contig_file = "%s-contigs.txt" % utils.splitext_plus(new_ref)[0]
with open(contig_file, "w") as out_handle:
for contig in [x.contig for x in idxstats(bam_file, data) if x.contig != "*"]:
out_handle.write("%s\n" % contig)
cmd = "seqtk subseq -l 100 %s %s > %s" % (dd.get_ref_file(data), contig_file, tx_out_file)
do.run(cmd, "Subset %s to BAM file contigs" % dd.get_genome_build(data))
ref.fasta_idx(new_ref, data["config"])
runner = broad.runner_from_path("picard", data["config"])
runner.run_fn("picard_index_ref", new_ref)
return {"base": new_ref} | 0.004817 |
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes) | 0.00072 |
def addtoindex(self,norecurse=None):
"""Makes sure this element (and all subelements), are properly added to the index"""
if not norecurse: norecurse = (Word, Morpheme, Phoneme)
if self.id:
self.doc.index[self.id] = self
for e in self.data:
if all([not isinstance(e, C) for C in norecurse]):
try:
e.addtoindex(norecurse)
except AttributeError:
pass | 0.010504 |
def nodes(self, data=False, native=True):
"""
Returns a list of all nodes in the :class:`.GraphCollection`\.
Parameters
----------
data : bool
(default: False) If True, returns a list of 2-tuples containing
node labels and attributes.
Returns
-------
nodes : list
"""
nodes = self.master_graph.nodes(data=data)
if native:
if data:
nodes = [(self.node_index[n], attrs) for n, attrs in nodes]
else:
nodes = [self.node_index[n] for n in nodes]
return nodes | 0.004747 |
def clear(self):
"""Convenience method to reset all the block values to 0
"""
if self._bit_count == 0:
return
block = self._qpart.document().begin()
while block.isValid():
if self.getBlockValue(block):
self.setBlockValue(block, 0)
block = block.next() | 0.005814 |
def bces(x1, x2, x1err=[], x2err=[], cerr=[], logify=True, model='yx', \
bootstrap=5000, verbose='normal', full_output=True):
"""
Bivariate, Correlated Errors and intrinsic Scatter (BCES)
translated from the FORTRAN code by Christina Bird and Matthew Bershady
(Akritas & Bershady, 1996)
Linear regression in the presence of heteroscedastic errors on both
variables and intrinsic scatter
Parameters
----------
x1 : array of floats
Independent variable, or observable
x2 : array of floats
Dependent variable
x1err : array of floats (optional)
Uncertainties on the independent variable
x2err : array of floats (optional)
Uncertainties on the dependent variable
cerr : array of floats (optional)
Covariances of the uncertainties in the dependent and
independent variables
logify : bool (default True)
Whether to take the log of the measurements in order to
estimate the best-fit power law instead of linear relation
model : {'yx', 'xy', 'bi', 'orth'}
BCES model with which to calculate regression. See Notes
below for details.
bootstrap : False or int (default 5000)
get the errors from bootstrap resampling instead of the
analytical prescription? if bootstrap is an int, it is the
number of bootstrap resamplings
verbose : str (default 'normal')
Verbose level. Options are {'quiet', 'normal', 'debug'}
full_output : bool (default True)
If True, return also the covariance between the
normalization and slope of the regression.
Returns
-------
a : tuple of length 2
Best-fit normalization and its uncertainty (a, da)
b : tuple of length 2
Best-fit slope and its uncertainty (b, db)
Optional outputs
----------------
cov_ab : 2x2 array of floats
covariance between a and b. Returned if full_output is set to
True.
Notes
-----
If verbose is normal or debug, the results from all the BCES models will
be printed (still, only the one selected in *model* will be returned).
the *model* parameter:
-'yx' stands for BCES(Y|X)
-'xy' stands for BCES(X|Y)
-'bi' stands for BCES Bisector
-'orth' stands for BCES Orthogonal
"""
def _bess_bootstrap(npts, x1, x2, x1err, x2err, cerr,nsim):
##added by Gerrit, July 2014
##Unfortunately I needed a copy of the _bess function for bootstrapping.
#Would be nicer if those two could be combined
"""
Do the entire regression calculation for 4 slopes:
OLS(Y|X), OLS(X|Y), bisector, orthogonal
"""
#calculate sigma's for datapoints using length of confidence intervals
sig11var = numpy.sum(x1err ** 2,axis=1,keepdims=True) / npts
sig22var = numpy.sum(x2err ** 2,axis=1,keepdims=True) / npts
sig12var = numpy.sum(cerr,axis=1,keepdims=True) / npts
# calculate means and variances
x1av = numpy.mean(x1,axis=1,keepdims=True)
x1var = x1.var(axis=1,keepdims=True)
x2av = numpy.mean(x2,axis=1,keepdims=True)
x2var = x2.var(axis=1,keepdims=True)
covar_x1x2 = numpy.mean((x1-numpy.mean(x1,axis=1,keepdims=True)) * \
(x2-numpy.mean(x2,axis=1,keepdims=True)),
axis=1,keepdims=True)
# compute the regression slopes for OLS(X2|X1), OLS(X1|X2),
# bisector and orthogonal
if model == 'yx':
modelint = 1
else:
modelint = 4
b = numpy.zeros((modelint,nsim))
b[0] = ((covar_x1x2 - sig12var) / (x1var - sig11var)).flatten()
if model != 'yx':
b[1] = ((x2var - sig22var) / (covar_x1x2 - sig12var)).flatten()
b[2] = ((b[0] * b[1] - 1 + numpy.sqrt((1 + b[0] ** 2) * \
(1 + b[1] ** 2))) / (b[0] + b[1])).flatten()
b[3] = 0.5 * ((b[1] - 1 / b[0]) + numpy.sign(covar_x1x2).flatten()* \
numpy.sqrt(4 + (b[1] - 1 / b[0]) ** 2))
# compute intercepts for above 4 cases:
a = x2av.flatten() - b * x1av.flatten()
# set up variables to calculate standard deviations of slope and
# intercept
xi = []
xi.append(((x1 - x1av) * (x2 - b[0].reshape(nsim,1) * x1 - \
a[0].reshape(nsim,1)) + \
b[0].reshape(nsim,1) * x1err ** 2) / \
(x1var - sig11var))
if model != 'yx':
xi.append(((x2 - x2av) * (x2 - b[1].reshape(nsim,1) * x1 - \
a[1].reshape(nsim,1)) + x2err ** 2) / \
covar_x1x2)
xi.append((xi[0] * (1 + b[1].reshape(nsim,1) ** 2) + \
xi[1] * (1 + b[0].reshape(nsim,1) ** 2)) / \
((b[0].reshape(nsim,1) + \
b[1].reshape(nsim,1)) * \
numpy.sqrt((1 + b[0].reshape(nsim,1) ** 2) * \
(1 + b[1].reshape(nsim,1) ** 2))))
xi.append((xi[0] / b[0].reshape(nsim,1) ** 2 + xi[1]) * \
b[3].reshape(nsim,1) / \
numpy.sqrt(4 + (b[1].reshape(nsim,1) - \
1 / b[0].reshape(nsim,1)) ** 2))
zeta = []
for i in xrange(modelint):
zeta.append(x2 - b[i].reshape(nsim,1) * x1 - x1av * xi[i])
# calculate variance for all a and b
bvar = numpy.zeros((4,nsim))
avar = numpy.zeros((4,nsim))
for i in xrange(modelint):
bvar[i] = xi[i].var(axis=1,keepdims=False)/ npts
avar[i] = zeta[i].var(axis=1,keepdims=False) / npts
return a, b, avar, bvar, xi, zeta
def _bess(npts, x1, x2, x1err, x2err, cerr):
"""
Do the entire regression calculation for 4 slopes:
OLS(Y|X), OLS(X|Y), bisector, orthogonal
"""
# calculate sigma's for datapoints using length of confidence
# intervals
sig11var = sum(x1err ** 2) / npts
sig22var = sum(x2err ** 2) / npts
sig12var = sum(cerr) / npts
# calculate means and variances
x1av = numpy.average(x1)
x1var = numpy.std(x1) ** 2
x2av = numpy.average(x2)
x2var = numpy.std(x2) ** 2
covar_x1x2 = sum((x1 - x1av) * (x2 - x2av)) / npts
# compute the regression slopes for OLS(X2|X1), OLS(X1|X2),
# bisector and orthogonal
b = numpy.zeros(4)
b[0] = (covar_x1x2 - sig12var) / (x1var - sig11var)
b[1] = (x2var - sig22var) / (covar_x1x2 - sig12var)
b[2] = (b[0] * b[1] - 1 + numpy.sqrt((1 + b[0] ** 2) * \
(1 + b[1] ** 2))) / (b[0] + b[1])
b[3] = 0.5 * ((b[1] - 1 / b[0]) + numpy.sign(covar_x1x2) * \
numpy.sqrt(4 + (b[1] - 1 / b[0]) ** 2))
# compute intercepts for above 4 cases:
a = x2av - b * x1av
# set up variables to calculate standard deviations of slope
# and intercept
xi = []
xi.append(((x1 - x1av) * \
(x2 - b[0] * x1 - a[0]) + b[0] * x1err ** 2) / \
(x1var - sig11var))
xi.append(((x2 - x2av) * (x2 - b[1] * x1 - a[1]) + x2err ** 2) / \
covar_x1x2)
xi.append((xi[0] * (1 + b[1] ** 2) + xi[1] * (1 + b[0] ** 2)) / \
((b[0] + b[1]) * \
numpy.sqrt((1 + b[0] ** 2) * (1 + b[1] ** 2))))
xi.append((xi[0] / b[0] ** 2 + xi[1]) * b[3] / \
numpy.sqrt(4 + (b[1] - 1 / b[0]) ** 2))
zeta = []
for i in xrange(4):
zeta.append(x2 - b[i]*x1 - x1av*xi[i])
# calculate variance for all a and b
bvar = numpy.zeros(4)
avar = numpy.zeros(4)
for i in xrange(4):
bvar[i] = numpy.std(xi[i]) ** 2 / npts
avar[i] = numpy.std(zeta[i]) ** 2 / npts
return a, b, avar, bvar, xi, zeta
def _bootspbec(npts, x, y, xerr, yerr, cerr):
"""
Bootstrap samples
"""
j = numpy.random.randint(npts, size = npts)
xboot = x[j]
xerrboot = xerr[j]
yboot = y[j]
yerrboot = yerr[j]
cerrboot = cerr[j]
return xboot, yboot, xerrboot, yerrboot, cerrboot
# ---- Main routine starts here ---- #
# convert to numpy arrays just in case
x1 = numpy.array(x1)
x2 = numpy.array(x2)
x1err = numpy.array(x1err)
x2err = numpy.array(x2err)
if logify:
x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err)
cerr = numpy.array(cerr)
models = [['yx', 'xy', 'bi', 'orth'],
['BCES(Y|X)', 'BCES(X|Y)', 'BCES Bisector', 'BCES Orthogonal']]
# which to return?
j = models[0].index(model)
npts = len(x1)
# are the errors defined?
if len(x1err) == 0:
x1err = numpy.zeros(npts)
if len(x2err) == 0:
x2err = numpy.zeros(npts)
if len(cerr) == 0:
cerr = numpy.zeros(npts)
if verbose == 'debug':
print 'x1 =', x1
print 'x1err =', x1err
print 'x2 =', x2
print 'x2err =', x2err
print 'cerr =', cerr
print '\n ** Returning values for', models[1][j], '**'
if bootstrap is not False:
print ' with errors from %d bootstrap resamplings' %bootstrap
print ''
# calculate nominal fits
bessresults = _bess(npts, x1, x2, x1err, x2err, cerr)
(a, b, avar, bvar, xi, zeta) = bessresults
# covariance between normalization and slope
if full_output:
covar_ab = numpy.cov(xi[j], zeta[j])
if bootstrap is not False:
# make bootstrap simulated datasets, and compute averages and
# standard deviations of regression coefficients
asum = numpy.zeros(4)
assum = numpy.zeros(4)
bsum = numpy.zeros(4)
bssum = numpy.zeros(4)
sda = numpy.zeros(4)
sdb = numpy.zeros(4)
for i in xrange(bootstrap):
samples = _bootspbec(npts, x1, x2, x1err, x2err, cerr)
(x1sim, x2sim, x1errsim, x2errsim, cerrsim) = samples
besssim = _bess(npts, x1sim, x2sim, x1errsim, x2errsim, cerrsim)
(asim, bsim, avarsim, bvarsim, xi, zeta) = besssim
asum += asim
assum += asim ** 2
bsum += bsim
bssum += bsim ** 2
aavg = asum / bootstrap
bavg = bsum / bootstrap
for i in range(4):
sdtest = assum[i] - bootstrap * aavg[i] ** 2
if sdtest > 0:
sda[i] = numpy.sqrt(sdtest / (bootstrap - 1))
sdtest = bssum[i] - bootstrap * bavg[i] ** 2
if sdtest > 0:
sdb[i] = numpy.sqrt(sdtest / (bootstrap - 1))
if verbose in ('normal', 'debug'):
print '%s B err(B)' %('Fit'.ljust(19)),
print ' A err(A)'
for i in range(4):
print '%s %9.2e +/- %8.2e %10.3e +/- %9.3e' \
%(models[1][i].ljust(16), b[i],
numpy.sqrt(bvar[i]), a[i], numpy.sqrt(avar[i]))
if bootstrap is not False:
print '%s %9.2e +/- %8.2e %10.3e +/- %9.3e' \
%('bootstrap'.ljust(16), bavg[i],
sdb[i], aavg[i], sda[i])
print ''
if verbose == 'debug':
print 'cov[%s] =' %models[model]
print covar_ab
if bootstrap is not False:
if full_output:
return (a[j], sda[j]), (b[j], sdb[j]), covar_ab
else:
return (a[j], sda[j]), (b[j], sdb[j])
if full_output:
out = ((a[j], numpy.sqrt(avar[j])),
(b[j], numpy.sqrt(bvar[j])),
covar_ab)
else:
out = ((a[j], numpy.sqrt(avar[j])),
(b[j], numpy.sqrt(bvar[j])))
return out | 0.007834 |
def inherit_dict(base, namespace, attr_name,
inherit=lambda k, v: True):
"""
Perform inheritance of dictionaries. Returns a list of key
and value pairs for values that were inherited, for
post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the
dictionary to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
key and value, and the item will be added and
included in the items list only if the
function returns ``True``. By default, all
items are added and included in the items
list.
"""
items = []
# Get the dicts to compare
base_dict = getattr(base, attr_name, {})
new_dict = namespace.setdefault(attr_name, {})
for key, value in base_dict.items():
# Skip keys that have been overridden or that we shouldn't
# inherit
if key in new_dict or (inherit and not inherit(key, value)):
continue
# Inherit the key
if inherit:
new_dict[key] = value
# Save the item for post-processing
items.append((key, value))
return items | 0.001673 |
def _SID_call_prep(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Preparation work for SomaticIndelDetector.
"""
base_config = items[0]["config"]
for x in align_bams:
bam.index(x, base_config)
params = ["-R", ref_file, "-T", "SomaticIndelDetector", "-U", "ALLOW_N_CIGAR_READS"]
# Limit per base read start count to between 200-10000, i.e. from any base
# can no more 10000 new reads begin.
# Further, limit maxNumberOfReads accordingly, otherwise SID discards
# windows for high coverage panels.
paired = vcfutils.get_paired_bams(align_bams, items)
params += ["--read_filter", "NotPrimaryAlignment"]
params += ["-I:tumor", paired.tumor_bam]
min_af = float(get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
if paired.normal_bam is not None:
params += ["-I:normal", paired.normal_bam]
# notice there must be at least 4 reads of coverage in normal
params += ["--filter_expressions", "T_COV<6||N_COV<4||T_INDEL_F<%s||T_INDEL_CF<0.7" % min_af]
else:
params += ["--unpaired"]
params += ["--filter_expressions", "COV<6||INDEL_F<%s||INDEL_CF<0.7" % min_af]
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule",
"INTERSECTION"]
return params | 0.004409 |
def data_request(self, vehicle_id, name, wake_if_asleep=False):
"""Get requested data from vehicle_id.
Parameters
----------
vehicle_id : string
Identifier for the car on the owner-api endpoint. Confusingly it
is not the vehicle_id field for identifying the car across
different endpoints.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
name: string
Name of data to be requested from the data_request endpoint which
rolls ups all data plus vehicle configuration.
https://tesla-api.timdorr.com/vehicle/state/data
wake_if_asleep : bool
Function for underlying api call for whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object.
"""
return self.get(vehicle_id, 'vehicle_data/%s' % name,
wake_if_asleep=wake_if_asleep)['response'] | 0.001944 |
def numpart_qaoa(asset_list, A=1.0, minimizer_kwargs=None, steps=1):
"""
generate number partition driver and cost functions
:param asset_list: list to binary partition
:param A: (float) optional constant for level separation. Default=1.
:param minimizer_kwargs: Arguments for the QAOA minimizer
:param steps: (int) number of steps approximating the solution.
"""
cost_operators = []
ref_operators = []
for ii in range(len(asset_list)):
for jj in range(ii + 1, len(asset_list)):
cost_operators.append(PauliSum([PauliTerm("Z", ii, 2*asset_list[ii]) *
PauliTerm("Z", jj, A*asset_list[jj])]))
ref_operators.append(PauliSum([PauliTerm("X", ii, -1.0)]))
cost_operators.append(PauliSum([PauliTerm("I", 0, len(asset_list))]))
if minimizer_kwargs is None:
minimizer_kwargs = {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2,
'xtol': 1.0e-2,
'disp': True}}
qc = get_qc(f"{len(asset_list)}q-qvm")
qaoa_inst = QAOA(qc, list(range(len(asset_list))), steps=steps, cost_ham=cost_operators,
ref_ham=ref_operators, store_basis=True,
minimizer=minimize, minimizer_kwargs=minimizer_kwargs,
vqe_options={'disp': print})
return qaoa_inst | 0.002782 |
def split(self, split_dimension, cache_points):
"""!
@brief Split BANG-block into two new blocks in specified dimension.
@param[in] split_dimension (uint): Dimension where block should be split.
@param[in] cache_points (bool): If True then covered points are cached. Used for leaf blocks.
@return (tuple) Pair of BANG-block that were formed from the current.
"""
left_region_number = self.__region_number
right_region_number = self.__region_number + 2 ** self.__level
first_spatial_block, second_spatial_block = self.__spatial_block.split(split_dimension)
left = bang_block(self.__data, left_region_number, self.__level + 1, first_spatial_block, cache_points)
right = bang_block(self.__data, right_region_number, self.__level + 1, second_spatial_block, cache_points)
return left, right | 0.007735 |
def _connect(cls, url, token, timeout, results, i, job_is_done_event=None):
""" Connects to the specified cls with url and token. Stores the connection
information to results[i] in a threadsafe way.
Arguments:
cls: the class which is responsible for establishing connection, basically it's
:class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
url (str): url which should be passed as `baseurl` argument to cls.__init__()
token (str): authentication token which should be passed as `baseurl` argument to cls.__init__()
timeout (int): timeout which should be passed as `baseurl` argument to cls.__init__()
results (list): pre-filled list for results
i (int): index of current job, should be less than len(results)
job_is_done_event (:class:`~threading.Event`): is X_PLEX_ENABLE_FAST_CONNECT is True then the
event would be set as soon the connection is established
"""
starttime = time.time()
try:
device = cls(baseurl=url, token=token, timeout=timeout)
runtime = int(time.time() - starttime)
results[i] = (url, token, device, runtime)
if X_PLEX_ENABLE_FAST_CONNECT and job_is_done_event:
job_is_done_event.set()
except Exception as err:
runtime = int(time.time() - starttime)
log.error('%s: %s', url, err)
results[i] = (url, token, None, runtime) | 0.004695 |
def get_update(self, z=None):
"""
Computes the new estimate based on measurement `z` and returns it
without altering the state of the filter.
Parameters
----------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
Returns
-------
(x, P) : tuple
State vector and covariance array of the update.
"""
if z is None:
return self.x, self.P
z = reshape_z(z, self.dim_z, self.x.ndim)
R = self.R
H = self.H
P = self.P
x = self.x
# error (residual) between measurement and prediction
y = z - dot(H, x)
# common subexpression for speed
PHT = dot(P, H.T)
# project system uncertainty into measurement space
S = dot(H, PHT) + R
# map system uncertainty into kalman gain
K = dot(PHT, self.inv(S))
# predict new x with residual scaled by the kalman gain
x = x + dot(K, y)
# P = (I-KH)P(I-KH)' + KRK'
I_KH = self._I - dot(K, H)
P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)
return x, P | 0.001579 |
def configure(self, options, conf):
"""Configure plugin.
"""
self.conf = conf
# Disable if explicitly disabled, or if logging is
# configured via logging config file
if not options.logcapture or conf.loggingConfig:
self.enabled = False
self.logformat = options.logcapture_format
self.logdatefmt = options.logcapture_datefmt
self.clear = options.logcapture_clear
self.loglevel = options.logcapture_level
if options.logcapture_filters:
self.filters = options.logcapture_filters.split(',') | 0.003344 |
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as 'BOS201806070'.
"""
if self._away_runs is None and self._home_runs is None:
return None
fields_to_include = {
'date': self.date,
'time': self.time,
'venue': self.venue,
'attendance': self.attendance,
'duration': self.duration,
'time_of_day': self.time_of_day,
'winner': self.winner,
'winning_name': self.winning_name,
'winning_abbr': self.winning_abbr,
'losing_name': self.losing_name,
'losing_abbr': self.losing_abbr,
'away_at_bats': self.away_at_bats,
'away_runs': self.away_runs,
'away_hits': self.away_hits,
'away_rbi': self.away_rbi,
'away_earned_runs': self.away_earned_runs,
'away_bases_on_balls': self.away_bases_on_balls,
'away_strikeouts': self.away_strikeouts,
'away_plate_appearances': self.away_plate_appearances,
'away_batting_average': self.away_batting_average,
'away_on_base_percentage': self.away_on_base_percentage,
'away_slugging_percentage': self.away_slugging_percentage,
'away_on_base_plus': self.away_on_base_plus,
'away_pitches': self.away_pitches,
'away_strikes': self.away_strikes,
'away_win_probability_for_offensive_player':
self.away_win_probability_for_offensive_player,
'away_average_leverage_index': self.away_average_leverage_index,
'away_win_probability_added': self.away_win_probability_added,
'away_win_probability_subtracted':
self.away_win_probability_subtracted,
'away_base_out_runs_added': self.away_base_out_runs_added,
'away_putouts': self.away_putouts,
'away_assists': self.away_assists,
'away_innings_pitched': self.away_innings_pitched,
'away_home_runs': self.away_home_runs,
'away_strikes_by_contact': self.away_strikes_by_contact,
'away_strikes_swinging': self.away_strikes_swinging,
'away_strikes_looking': self.away_strikes_looking,
'away_grounded_balls': self.away_grounded_balls,
'away_fly_balls': self.away_fly_balls,
'away_line_drives': self.away_line_drives,
'away_unknown_bat_type': self.away_unknown_bat_type,
'away_game_score': self.away_game_score,
'away_inherited_runners': self.away_inherited_runners,
'away_inherited_score': self.away_inherited_score,
'away_win_probability_by_pitcher':
self.away_win_probability_by_pitcher,
'away_base_out_runs_saved': self.away_base_out_runs_saved,
'home_at_bats': self.home_at_bats,
'home_runs': self.home_runs,
'home_hits': self.home_hits,
'home_rbi': self.home_rbi,
'home_earned_runs': self.home_earned_runs,
'home_bases_on_balls': self.home_bases_on_balls,
'home_strikeouts': self.home_strikeouts,
'home_plate_appearances': self.home_plate_appearances,
'home_batting_average': self.home_batting_average,
'home_on_base_percentage': self.home_on_base_percentage,
'home_slugging_percentage': self.home_slugging_percentage,
'home_on_base_plus': self.home_on_base_plus,
'home_pitches': self.home_pitches,
'home_strikes': self.home_strikes,
'home_win_probability_for_offensive_player':
self.home_win_probability_for_offensive_player,
'home_average_leverage_index': self.home_average_leverage_index,
'home_win_probability_added': self.home_win_probability_added,
'home_win_probability_subtracted':
self.home_win_probability_subtracted,
'home_base_out_runs_added': self.home_base_out_runs_added,
'home_putouts': self.home_putouts,
'home_assists': self.home_assists,
'home_innings_pitched': self.home_innings_pitched,
'home_home_runs': self.home_home_runs,
'home_strikes_by_contact': self.home_strikes_by_contact,
'home_strikes_swinging': self.home_strikes_swinging,
'home_strikes_looking': self.home_strikes_looking,
'home_grounded_balls': self.home_grounded_balls,
'home_fly_balls': self.home_fly_balls,
'home_line_drives': self.home_line_drives,
'home_unknown_bat_type': self.home_unknown_bat_type,
'home_game_score': self.home_game_score,
'home_inherited_runners': self.home_inherited_runners,
'home_inherited_score': self.home_inherited_score,
'home_win_probability_by_pitcher':
self.home_win_probability_by_pitcher,
'home_base_out_runs_saved': self.home_base_out_runs_saved
}
return pd.DataFrame([fields_to_include], index=[self._uri]) | 0.000381 |
def set_plugin_filepaths(self, filepaths, except_blacklisted=True):
"""
Sets internal state to `filepaths`. Recommend passing
in absolute filepaths. Method will attempt to convert to
absolute paths if they are not already.
`filepaths` can be a single object or an iterable.
If `except_blacklisted` is `True`, all `filepaths` that
have been blacklisted will not be set.
"""
self.file_manager.set_plugin_filepaths(filepaths,
except_blacklisted) | 0.003552 |
def _merge_command(run, full_result, results):
"""Merge a group of results from write commands into the full result.
"""
for offset, result in results:
affected = result.get("n", 0)
if run.op_type == _INSERT:
full_result["nInserted"] += affected
elif run.op_type == _DELETE:
full_result["nRemoved"] += affected
elif run.op_type == _UPDATE:
upserted = result.get("upserted")
if upserted:
if isinstance(upserted, list):
n_upserted = len(upserted)
for doc in upserted:
doc["index"] = run.index(doc["index"] + offset)
full_result["upserted"].extend(upserted)
else:
n_upserted = 1
index = run.index(offset)
doc = {_UINDEX: index, _UID: upserted}
full_result["upserted"].append(doc)
full_result["nUpserted"] += n_upserted
full_result["nMatched"] += (affected - n_upserted)
else:
full_result["nMatched"] += affected
n_modified = result.get("nModified")
# SERVER-13001 - in a mixed sharded cluster a call to
# update could return nModified (>= 2.6) or not (<= 2.4).
# If any call does not return nModified we can't report
# a valid final count so omit the field completely.
if n_modified is not None and "nModified" in full_result:
full_result["nModified"] += n_modified
else:
full_result.pop("nModified", None)
write_errors = result.get("writeErrors")
if write_errors:
for doc in write_errors:
# Leave the server response intact for APM.
replacement = doc.copy()
idx = doc["index"] + offset
replacement["index"] = run.index(idx)
# Add the failed operation to the error document.
replacement[_UOP] = run.ops[idx]
full_result["writeErrors"].append(replacement)
wc_error = result.get("writeConcernError")
if wc_error:
full_result["writeConcernErrors"].append(wc_error) | 0.000436 |
def query_db_reference():
"""
Returns list of cross references by query parameters
---
tags:
- Query functions
parameters:
- name: type_
in: query
type: string
required: false
description: Reference type
default: EMBL
- name: identifier
in: query
type: string
required: false
description: reference identifier
default: Y00264
- name: entry_name
in: query
type: string
required: false
description: UniProt entry name
default: A4_HUMAN
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
"""
args = get_args(
request_args=request.args,
allowed_str_args=['type_', 'identifier', 'entry_name'],
allowed_int_args=['limit']
)
return jsonify(query.db_reference(**args)) | 0.00103 |
def show_order_parameter(sync_output_dynamic, start_iteration = None, stop_iteration = None):
"""!
@brief Shows evolution of order parameter (level of global synchronization in the network).
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network whose evolution of global synchronization should be visualized.
@param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the first is used
@param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then the last is used.
"""
(start_iteration, stop_iteration) = sync_visualizer.__get_start_stop_iterations(sync_output_dynamic, start_iteration, stop_iteration);
order_parameter = sync_output_dynamic.calculate_order_parameter(start_iteration, stop_iteration);
axis = plt.subplot(111);
plt.plot(sync_output_dynamic.time[start_iteration:stop_iteration], order_parameter, 'b-', linewidth = 2.0);
set_ax_param(axis, "t", "R (order parameter)", None, [0.0, 1.05]);
plt.show(); | 0.02294 |
def getRaw(self, instance, **kwargs):
"""Returns raw field value (possible wrapped in BaseUnit)
"""
value = ObjectField.get(self, instance, **kwargs)
# getattr(instance, "Remarks") returns a BaseUnit
if callable(value):
value = value()
return value | 0.006494 |
def users_lookupByEmail(self, *, email: str, **kwargs) -> SlackResponse:
"""Find a user with an email address.
Args:
email (str): An email address belonging to a user in the workspace.
e.g. '[email protected]'
"""
kwargs.update({"email": email})
return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs) | 0.007389 |
def determine_hooks(self, controller=None):
'''
Determines the hooks to be run, in which order.
:param controller: If specified, includes hooks for a specific
controller.
'''
controller_hooks = []
if controller:
controller_hooks = _cfg(controller).get('hooks', [])
if controller_hooks:
return list(
sorted(
chain(controller_hooks, self.hooks),
key=operator.attrgetter('priority')
)
)
return self.hooks | 0.003175 |
def ExpireRules(self):
"""Removes any rules with an expiration date in the past."""
rules = self.Get(self.Schema.RULES)
new_rules = self.Schema.RULES()
now = time.time() * 1e6
expired_session_ids = set()
for rule in rules:
if rule.expires > now:
new_rules.Append(rule)
else:
for action in rule.actions:
if action.hunt_id:
expired_session_ids.add(action.hunt_id)
if expired_session_ids:
with data_store.DB.GetMutationPool() as pool:
# Notify the worker to mark this hunt as terminated.
manager = queue_manager.QueueManager(token=self.token)
manager.MultiNotifyQueue([
rdf_flows.GrrNotification(session_id=session_id)
for session_id in expired_session_ids
],
mutation_pool=pool)
if len(new_rules) < len(rules):
self.Set(self.Schema.RULES, new_rules)
self.Flush() | 0.007353 |
def _get_synset_offsets(synset_idxes):
"""Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
"""
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes] | 0.00578 |
def _set_i2c_speed(self, i2c_speed):
""" Set I2C speed to one of '400kHz', '100kHz', 50kHz', '5kHz'
"""
lower_bits_mapping = {
'400kHz': 3,
'100kHz': 2,
'50kHz': 1,
'5kHz': 0,
}
if i2c_speed not in lower_bits_mapping:
raise ValueError('Invalid i2c_speed')
speed_byte = 0b01100000 | lower_bits_mapping[i2c_speed]
self.device.write(bytearray([speed_byte]))
response = self.device.read(1)
if response != b"\x01":
raise Exception("Changing I2C speed failed. Received: {}".format(repr(response))) | 0.004724 |
def _load_json_file(json_file):
""" load json file and check file content format
"""
with io.open(json_file, encoding='utf-8') as data_file:
try:
json_content = json.load(data_file)
except p_exception.JSONDecodeError:
err_msg = u"JSONDecodeError: JSON file format error: {}".format(json_file)
raise p_exception.FileFormatError(err_msg)
FileUtils._check_format(json_file, json_content)
return json_content | 0.005597 |
def extended_cigar(aligned_template, aligned_query):
''' Convert mutation annotations to extended cigar format
https://github.com/lh3/minimap2#the-cs-optional-tag
USAGE:
>>> template = 'CGATCGATAAATAGAGTAG---GAATAGCA'
>>> query = 'CGATCG---AATAGAGTAGGTCGAATtGCA'
>>> extended_cigar(template, query) == ':6-ata:10+gtc:4*at:3'
True
'''
# - Go through each position in the alignment
insertion = []
deletion = []
matches = []
cigar = []
for r_aa, q_aa in zip(aligned_template.lower(), aligned_query.lower()):
gap_ref = r_aa == '-'
gap_que = q_aa == '-'
match = r_aa == q_aa
if matches and not match:
# End match block
cigar.append(":%s"%len(matches))
matches = []
if insertion and not gap_ref:
# End insertion
cigar.append("+%s"%''.join(insertion))
insertion = []
elif deletion and not gap_que:
# End deletion
cigar.append("-%s"%''.join(deletion))
deletion = []
if gap_ref:
if insertion:
# Extend insertion
insertion.append(q_aa)
else:
# Start insertion
insertion = [q_aa]
elif gap_que:
if deletion:
# Extend deletion
deletion.append(r_aa)
else:
# Start deletion
deletion = [r_aa]
elif match:
if matches:
# Extend match block
matches.append(r_aa)
else:
# Start match block
matches = [r_aa]
else:
# Add SNP annotation
cigar.append("*%s%s"%(r_aa, q_aa))
if matches:
cigar.append(":%s"%len(matches))
del matches
if insertion:
# End insertion
cigar.append("+%s"%''.join(insertion))
del insertion
elif deletion:
# End deletion
cigar.append("-%s"%''.join(deletion))
del deletion
return ''.join(cigar) | 0.029308 |
def _summarize_pscore(self, pscore_c, pscore_t):
"""
Called by Strata class during initialization.
"""
self._dict['p_min'] = min(pscore_c.min(), pscore_t.min())
self._dict['p_max'] = max(pscore_c.max(), pscore_t.max())
self._dict['p_c_mean'] = pscore_c.mean()
self._dict['p_t_mean'] = pscore_t.mean() | 0.028481 |
def _bias_add(x, b, data_format):
"""Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT."""
if data_format == 'NHWC':
return tf.add(x, b)
elif data_format == 'NCHW':
return tf.add(x, _to_channel_first_bias(b))
else:
raise ValueError('invalid data_format: %s' % data_format) | 0.005848 |
def enumerateAll(self, subsectionTitle, lst, openFile):
'''
Helper function for :func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI`.
Simply writes a subsection to ``openFile`` (a ``toctree`` to the ``file_name``)
of each ExhaleNode in ``sorted(lst)`` if ``len(lst) > 0``. Otherwise, nothing
is written to the file.
:Parameters:
``subsectionTitle`` (str)
The title of this subsection, e.g. ``"Namespaces"`` or ``"Files"``.
``lst`` (list)
The list of ExhaleNodes to be enumerated in this subsection.
``openFile`` (File)
The **already open** file object to write to directly. No safety checks
are performed, make sure this is a real file object that has not been
closed already.
'''
if len(lst) > 0:
openFile.write(textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=subsectionTitle,
heading_mark=utils.heading_mark(
subsectionTitle,
configs.SUB_SUB_SECTION_HEADING_CHAR
)
)))
for l in sorted(lst):
openFile.write(textwrap.dedent('''
.. toctree::
:maxdepth: {depth}
{file}
'''.format(
depth=configs.fullToctreeMaxDepth,
file=l.file_name
))) | 0.005762 |
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the ExtensionInformation object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(ExtensionInformation, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
self.extension_name.read(tstream, kmip_version=kmip_version)
if self.is_tag_next(Tags.EXTENSION_TAG, tstream):
self.extension_tag = ExtensionTag()
self.extension_tag.read(tstream, kmip_version=kmip_version)
if self.is_tag_next(Tags.EXTENSION_TYPE, tstream):
self.extension_type = ExtensionType()
self.extension_type.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate() | 0.001637 |
def fn_x(i, dfs_data):
"""The minimum vertex (DFS-number) in a frond contained in Ri."""
try:
return R(i, dfs_data)['x']
except Exception as e:
# Page 17 states that if Ri is empty, then we take xi to be n
return dfs_data['graph'].num_nodes() | 0.003597 |
def port_profile_qos_profile_qos_trust_trust_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
trust = ET.SubElement(qos, "trust")
trust_cos = ET.SubElement(trust, "trust-cos")
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004559 |
def build_y(self):
"""Build transmission line admittance matrix into self.Y"""
if not self.n:
return
self.y1 = mul(self.u, self.g1 + self.b1 * 1j)
self.y2 = mul(self.u, self.g2 + self.b2 * 1j)
self.y12 = div(self.u, self.r + self.x * 1j)
self.m = polar(self.tap, self.phi * deg2rad)
self.m2 = abs(self.m)**2
self.mconj = conj(self.m)
# build self and mutual admittances into Y
self.Y = spmatrix(
div(self.y12 + self.y1, self.m2), self.a1, self.a1,
(self.nb, self.nb), 'z')
self.Y -= spmatrix(
div(self.y12, self.mconj), self.a1, self.a2, (self.nb, self.nb),
'z')
self.Y -= spmatrix(
div(self.y12, self.m), self.a2, self.a1, (self.nb, self.nb), 'z')
self.Y += spmatrix(self.y12 + self.y2, self.a2, self.a2,
(self.nb, self.nb), 'z') | 0.002141 |
def block_view(self, mri):
# type: (str) -> Block
"""Get a view of a block
Args:
mri: The mri of the controller hosting the block
Returns:
Block: The block we control
"""
controller = self.get_controller(mri)
block = controller.block_view(weakref.proxy(self))
return block | 0.008287 |
def _openFile(self):
""" Open the HDF5 file for reading/writing.
This methad is called during the initialization of this class.
"""
if self.filename is not None:
self.h5 = h5py.File(self.filename)
else:
return
if 'bp' not in self.h5:
self.h5.create_group('bp')
if 'bps' not in self.h5:
self.h5.create_group('bps')
for bp_type in ['bp', 'bps']:
for bp_num in self.data[bp_type]:
# Create group in case if new hdf5 file is opened
if bp_num not in self.h5[bp_type]:
self.h5[bp_type].create_group(bp_num)
# Sync data if old file is opened
for parameters in self.h5[bp_type][bp_num]:
self.data[bp_type][bp_num][parameters] = self.h5[bp_type][bp_num][parameters]
if 'mask' in self.h5:
self.mask = self.h5['mask'][:]
if 'time' in self.h5:
self.time = self.h5['time'][:]
if 'num_bp' in self.h5.attrs:
self.num_bp = self.h5.attrs['num_bp']
else:
self.h5.attrs['num_bp'] = self.num_bp
if 'num_step' in self.h5.attrs:
self.num_step = self.h5.attrs['num_step']
else:
self.h5.attrs['num_step'] = self.num_step
if 'startBP' in self.h5.attrs:
self.startBP = self.h5.attrs['startBP']
else:
self.h5.attrs['startBP'] = self.startBP | 0.001984 |
def sort_projects(
self,
workflowTags):
"""*order the projects within this taskpaper object via a list of tags*
The order of the tags in the list dictates the order of the sort - first comes first*
**Key Arguments:**
- ``workflowTags`` -- a string of space/comma seperated tags.
**Return:**
- ``None``
**Usage:**
To recursively sort the projects within a taskpaper document with the following order:
1. *@due*
2. *@flag*
3. *@hold*
4. *@next*
5. *@someday*
6. *@wait*
use the following:
.. code-block:: python
doc.sort_projects("@due, @flag, @hold, @next, @someday, @wait")
"""
self.refresh
if not isinstance(workflowTags, list):
workflowTagsLists = workflowTags.strip().replace(",", "").replace("@", "")
workflowTagsLists = workflowTagsLists.split(" ")
else:
workflowTagsLists = []
workflowTagsLists[:] = [l.replace("@", "") for l in workflowTags]
matchedProjects = collections.OrderedDict()
unmatchedProjects = []
for wt in workflowTagsLists:
matchedProjects[wt.lower()] = []
for p in self.projects:
matched = False
for pt in p.tags:
if matched:
break
for wt in workflowTagsLists:
thisTag = pt.lower()
if "(" not in wt:
thisTag = pt.split("(")[0].lower()
if thisTag == wt.lower() and matched == False:
matchedProjects[wt.lower()].append(p)
matched = True
break
if matched == False:
unmatchedProjects.append(p)
sortedProjects = []
for k, v in matchedProjects.iteritems():
sortedProjects += v
sortedProjects += unmatchedProjects
self.projects = sortedProjects
self.content = self.to_string(
title=False, projects=sortedProjects, indentLevel=0)
for p in self.projects:
p.projects = p.sort_projects(workflowTags)
# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO
# THIS OBJECT
oldContent = self.to_string(indentLevel=1)
newContent = self.to_string(
indentLevel=1, projects=sortedProjects)
if self.parent:
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.content = self.content.replace(self.to_string(indentLevel=0, title=False), self.to_string(
indentLevel=0, title=False, projects=sortedProjects))
self.refresh
return sortedProjects | 0.003067 |
def element_data_from_name(name):
'''Obtain elemental data given an elemental name
The given name is not case sensitive
An exception is thrown if the name is not found
'''
name_lower = name.lower()
if name_lower not in _element_name_map:
raise KeyError('No element data for name \'{}\''.format(name))
return _element_name_map[name_lower] | 0.00266 |
def register(self,obj,cls=type(None),hist={}):
"""
Takes a FortranObject and adds it to the appropriate list, if
not already present.
"""
#~ ident = getattr(obj,'ident',obj)
if is_submodule(obj,cls):
if obj not in self.submodules: self.submodules[obj] = SubmodNode(obj,self)
elif is_module(obj,cls):
if obj not in self.modules: self.modules[obj] = ModNode(obj,self)
elif is_type(obj,cls):
if obj not in self.types: self.types[obj] = TypeNode(obj,self,hist)
elif is_proc(obj,cls):
if obj not in self.procedures: self.procedures[obj] = ProcNode(obj,self,hist)
elif is_program(obj,cls):
if obj not in self.programs: self.programs[obj] = ProgNode(obj,self)
elif is_sourcefile(obj,cls):
if obj not in self.sourcefiles: self.sourcefiles[obj] = FileNode(obj,self)
elif is_blockdata(obj,cls):
if obj not in self.blockdata: self.blockdata[obj] = BlockNode(obj,self)
else:
raise BadType("Object type {} not recognized by GraphData".format(type(obj).__name__)) | 0.030461 |
def _analyze_function_features(self, all_funcs_completed=False):
"""
For each function in the function_manager, try to determine if it returns or not. A function does not return if
it calls another function that is known to be not returning, and this function does not have other exits.
We might as well analyze other features of functions in the future.
:param bool all_funcs_completed: Ignore _completed_functions set and treat all functions as completed. This
can be set to True after the entire CFG is built and _post_analysis() is
called (at which point analysis on all functions must be completed).
"""
changes = {
'functions_return': [],
'functions_do_not_return': []
}
if self._updated_nonreturning_functions is not None:
all_func_addrs = self._updated_nonreturning_functions
# Convert addresses to objects
all_functions = [ self.kb.functions.get_by_addr(f) for f in all_func_addrs
if self.kb.functions.contains_addr(f) ]
else:
all_functions = list(self.kb.functions.values())
analyzed_functions = set()
# short-hand
functions = self.kb.functions # type: angr.knowledge.FunctionManager
while all_functions:
func = all_functions.pop(-1) # type: angr.knowledge.Function
analyzed_functions.add(func.addr)
if func.returning is not None:
# It has been determined before. Skip it
continue
returning = self._determine_function_returning(func, all_funcs_completed=all_funcs_completed)
if returning:
func.returning = True
changes['functions_return'].append(func)
elif returning is False:
func.returning = False
changes['functions_do_not_return'].append(func)
if returning is not None:
# Add all callers of this function to all_functions list
if func.addr in functions.callgraph:
callers = functions.callgraph.predecessors(func.addr)
for caller in callers:
if caller in analyzed_functions:
continue
if functions.contains_addr(caller):
all_functions.append(functions.get_by_addr(caller))
return changes | 0.004259 |
def get_table_idcb_field(endianess, data):
"""
Return data from a packed TABLE_IDC_BFLD bit-field.
:param str endianess: The endianess to use when packing values ('>' or '<')
:param str data: The packed and machine-formatted data to parse
:rtype: tuple
:return: Tuple of (proc_nbr, std_vs_mfg, proc_flag, flag1, flag2, flag3)
"""
bfld = struct.unpack(endianess + 'H', data[:2])[0]
proc_nbr = bfld & 2047
std_vs_mfg = bool(bfld & 2048)
proc_flag = bool(bfld & 4096)
flag1 = bool(bfld & 8192)
flag2 = bool(bfld & 16384)
flag3 = bool(bfld & 32768)
return (proc_nbr, std_vs_mfg, proc_flag, flag1, flag2, flag3) | 0.025723 |
async def _check_resolver_ans(
self, dns_answer_list, record_name,
record_data_list, record_ttl, record_type_code):
"""Check if resolver answer is equal to record data.
Args:
dns_answer_list (list): DNS answer list contains record objects.
record_name (str): Record name.
record_data_list (list): List of data values for the record.
record_ttl (int): Record time-to-live info.
record_type_code (int): Record type code.
Returns:
boolean indicating if DNS answer data is equal to record data.
"""
type_filtered_list = [
ans for ans in dns_answer_list if ans.qtype == record_type_code
]
# check to see that type_filtered_lst has
# the same number of records as record_data_list
if len(type_filtered_list) != len(record_data_list):
return False
# check each record data is equal to the given data
for rec in type_filtered_list:
conditions = [rec.name == record_name,
rec.ttl == record_ttl,
rec.data in record_data_list]
# if ans record data is not equal
# to the given data return False
if not all(conditions):
return False
return True | 0.001461 |
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y = np.asarray(sp.prox_l1(self.S - self.AX - self.U,
self.lmbda/self.rho), dtype=self.dtype) | 0.007692 |
def emit( self, record ):
"""
Throws an error based on the information that the logger reported,
given the logging level.
:param record | <logging.LogRecord>
"""
msg = self._formatter.format(record)
align = self._splash.textAlignment()
fg = self._splash.textColor()
self._splash.showMessage(msg, align, fg) | 0.022113 |
def _tr_magic(line_info):
"Translate lines escaped with: %"
tpl = '%sget_ipython().magic(%r)'
cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
return tpl % (line_info.pre, cmd) | 0.009091 |
def to_timezone(dt, timezone):
"""
Return an aware datetime which is ``dt`` converted to ``timezone``.
If ``dt`` is naive, it is assumed to be UTC.
For example, if ``dt`` is "06:00 UTC+0000" and ``timezone`` is "EDT-0400",
then the result will be "02:00 EDT-0400".
This method follows the guidelines in http://pytz.sourceforge.net/
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_UTC)
return timezone.normalize(dt.astimezone(timezone)) | 0.002066 |
def get_rsa_pub_key(path):
'''
Read a public key off the disk.
'''
log.debug('salt.crypt.get_rsa_pub_key: Loading public key')
if HAS_M2:
with salt.utils.files.fopen(path, 'rb') as f:
data = f.read().replace(b'RSA ', b'')
bio = BIO.MemoryBuffer(data)
key = RSA.load_pub_key_bio(bio)
else:
with salt.utils.files.fopen(path) as f:
key = RSA.importKey(f.read())
return key | 0.002208 |
def identify(mw_uri, consumer_token, access_token, leeway=10.0,
user_agent=defaults.USER_AGENT):
"""
Gather identifying information about a user via an authorized token.
:Parameters:
mw_uri : `str`
The base URI of the MediaWiki installation. Note that the URI
should end in ``"index.php"``.
consumer_token : :class:`~mwoauth.ConsumerToken`
A token representing you, the consumer.
access_token : :class:`~mwoauth.AccessToken`
A token representing an authorized user. Obtained from
`complete()`
leeway : `int` | `float`
The number of seconds of leeway to account for when examining a
tokens "issued at" timestamp.
:Returns:
A dictionary containing identity information.
"""
# Construct an OAuth auth
auth = OAuth1(consumer_token.key,
client_secret=consumer_token.secret,
resource_owner_key=access_token.key,
resource_owner_secret=access_token.secret)
# Request the identity using auth
r = requests.post(url=mw_uri,
params={'title': "Special:OAuth/identify"},
auth=auth,
headers={'User-Agent': user_agent})
# Special:OAuth/identify unhelpfully returns 200 status even when there is
# an error in the API call. Check for error messages manually.
if r.content.startswith(b'{'):
try:
resp = r.json()
if 'error' in resp:
raise OAuthException(
"A MediaWiki API error occurred: {0}".format(resp['message']))
except ValueError:
raise OAuthException(
"An error occurred while trying to read json " +
"content: {0}".format(e))
# Decode json & stuff
try:
identity = jwt.decode(r.content, consumer_token.secret,
audience=consumer_token.key,
algorithms=["HS256"],
leeway=leeway)
except jwt.InvalidTokenError as e:
raise OAuthException(
"An error occurred while trying to read json " +
"content: {0}".format(e))
# Verify the issuer is who we expect (server sends $wgCanonicalServer)
issuer = urlparse(identity['iss']).netloc
expected_domain = urlparse(mw_uri).netloc
if not issuer == expected_domain:
raise OAuthException(
"Unexpected issuer " +
"{0}, expected {1}".format(issuer, expected_domain))
# Check that the identity was issued in the past.
now = time.time()
issued_at = float(identity['iat'])
if not now >= (issued_at - leeway):
raise OAuthException(
"Identity issued {0} ".format(issued_at - now) +
"seconds in the future!")
# Verify that the nonce matches our request one,
# to avoid a replay attack
authorization_header = force_unicode(r.request.headers['Authorization'])
request_nonce = re.search(r'oauth_nonce="(.*?)"',
authorization_header).group(1)
if identity['nonce'] != request_nonce:
raise OAuthException(
'Replay attack detected: {0} != {1}'.format(
identity['nonce'], request_nonce))
return identity | 0.00089 |
def get_rank(self, entity, criteria, condition = True):
"""
Get the rank of a person within an entity according to a criteria.
The person with rank 0 has the minimum value of criteria.
If condition is specified, then the persons who don't respect it are not taken into account and their rank is -1.
Example:
>>> age = person('age', period) # e.g [32, 34, 2, 8, 1]
>>> person.get_rank(household, age)
>>> [3, 4, 0, 2, 1]
>>> is_child = person.has_role(Household.CHILD) # [False, False, True, True, True]
>>> person.get_rank(household, - age, condition = is_child) # Sort in reverse order so that the eldest child gets the rank 0.
>>> [-1, -1, 1, 0, 2]
"""
# If entity is for instance 'person.household', we get the reference entity 'household' behind the projector
entity = entity if not isinstance(entity, Projector) else entity.reference_entity
positions = entity.members_position
biggest_entity_size = np.max(positions) + 1
filtered_criteria = np.where(condition, criteria, np.inf)
ids = entity.members_entity_id
# Matrix: the value in line i and column j is the value of criteria for the jth person of the ith entity
matrix = np.asarray([
entity.value_nth_person(k, filtered_criteria, default = np.inf)
for k in range(biggest_entity_size)
]).transpose()
# We double-argsort all lines of the matrix.
# Double-argsorting gets the rank of each value once sorted
# For instance, if x = [3,1,6,4,0], y = np.argsort(x) is [4, 1, 0, 3, 2] (because the value with index 4 is the smallest one, the value with index 1 the second smallest, etc.) and z = np.argsort(y) is [2, 1, 4, 3, 0], the rank of each value.
sorted_matrix = np.argsort(np.argsort(matrix))
# Build the result vector by taking for each person the value in the right line (corresponding to its household id) and the right column (corresponding to its position)
result = sorted_matrix[ids, positions]
# Return -1 for the persons who don't respect the condition
return np.where(condition, result, -1) | 0.006281 |
def nvmlDeviceGetRetiredPagesPendingStatus(device):
r"""
/**
* Check if any pages are pending retirement and need a reboot to fully retire.
*
* For Kepler &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param isPending Reference in which to return the pending status
*
* @return
* - \ref NVML_SUCCESS if \a isPending was populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus
"""
c_pending = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPagesPendingStatus")
ret = fn(device, byref(c_pending))
_nvmlCheckReturn(ret)
return int(c_pending.value) | 0.006988 |
def prepare_response_header(origin_header, segment):
"""
Prepare a trace header to be inserted into response
based on original header and the request segment.
"""
if origin_header and origin_header.sampled == '?':
new_header = TraceHeader(root=segment.trace_id,
sampled=segment.sampled)
else:
new_header = TraceHeader(root=segment.trace_id)
return new_header.to_header_str() | 0.002212 |
def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs):
'''
Ensure that a pagerduty escalation policy exists. Will create or update as needed.
This method accepts as args everything defined in
https://developer.pagerduty.com/documentation/rest/escalation_policies/create.
In addition, user and schedule id's will be translated from name (or email address)
into PagerDuty unique ids. For example:
.. code-block:: yaml
pagerduty_escalation_policy.present:
- name: bruce test escalation policy
- escalation_rules:
- targets:
- type: schedule
id: 'bruce test schedule level1'
- type: user
id: 'Bruce Sherrod'
In this example, 'Bruce Sherrod' will be looked up and replaced with the
PagerDuty id (usually a 7 digit all-caps string, e.g. PX6GQL7)
'''
# for convenience, we accept id, name, or email for users
# and we accept the id or name for schedules
for escalation_rule in kwargs['escalation_rules']:
for target in escalation_rule['targets']:
target_id = None
if target['type'] == 'user':
user = __salt__['pagerduty_util.get_resource']('users',
target['id'],
['email', 'name', 'id'],
profile=profile,
subdomain=subdomain,
api_key=api_key)
if user:
target_id = user['id']
elif target['type'] == 'schedule':
schedule = __salt__['pagerduty_util.get_resource']('schedules',
target['id'],
['name', 'id'],
profile=profile,
subdomain=subdomain,
api_key=api_key)
if schedule:
target_id = schedule['schedule']['id']
if target_id is None:
raise Exception('unidentified target: {0}'.format(target))
target['id'] = target_id
r = __salt__['pagerduty_util.resource_present']('escalation_policies',
['name', 'id'],
_diff,
profile,
subdomain,
api_key,
**kwargs)
return r | 0.003347 |
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_evalframeex():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print 'Unable to find an older python frame'
else:
print 'Unable to find a newer python frame' | 0.00157 |
def load_dataset_items(test_file, predict_file_lst, nonfeature_file):
"""
This function is used to read 3 kinds of data into list, 3 kinds of data are stored in files given by parameter
:param test_file: path string, the testing set used for SVm rank
:param predict_file_lst: filename lst, all prediction file output by SVM rank
:param nonfeature_file: path string, contain all the score data not used as feature (aligned with test_file)
:return: None
"""
print 'Reading baseline feature & bleu...'
with open(test_file, 'r') as reader:
for line in reader:
items = line.split(' ')
label = float(items[0])
id_list.append(items[1])
bleu_list.append(label)
word_count_list.append(float(items[2].split(':')[1]))
attri_count_list.append(float(items[10].split(':')[1]))
print 'Reading svm rankscore...'
global prediction_dict
for predict_file in predict_file_lst:
mark = predict_file.replace('predictions', '')
prediction_dict[mark] = []
with open(result_file_path + predict_file, 'r') as reader:
for line in reader:
rankscore = float(line)
prediction_dict[mark].append(rankscore)
print 'Reading NonFeature score...'
with open(nonfeature_file, 'r') as reader:
for line in reader:
nonfeature_items = line.split()
w_score = float(nonfeature_items[2].split(':')[1])
m_score = float(nonfeature_items[3].split(':')[1])
weighted_attri_list.append(w_score)
meteor_score_list.append(m_score) | 0.002371 |
def _add_column(self, type, name, **parameters):
"""
Add a new column to the blueprint.
:param type: The column type
:type type: str
:param name: The column name
:type name: str
:param parameters: The column parameters
:type parameters: dict
:rtype: Fluent
"""
parameters.update({
'type': type,
'name': name
})
column = Fluent(**parameters)
self._columns.append(column)
return column | 0.003752 |
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Plots memory usage from profiling data.'))
argument_parser.add_argument(
'--output', dest='output_file', type=str, help=(
'path of the output file to write the graph to instead of using '
'interactive mode. The output format deduced from the extension '
'of the filename.'))
argument_parser.add_argument(
'profile_path', type=str, help=(
'path to the directory containing the profiling data.'))
options = argument_parser.parse_args()
if not os.path.isdir(options.profile_path):
print('No such directory: {0:s}'.format(options.profile_path))
return False
names = ['time', 'queued', 'processing', 'to_merge', 'abandoned', 'total']
glob_expression = os.path.join(options.profile_path, 'task_queue-*.csv.gz')
for csv_file_name in glob.glob(glob_expression):
data = numpy.genfromtxt(
csv_file_name, delimiter='\t', dtype=None, encoding='utf-8',
names=names, skip_header=1)
pyplot.plot(data['time'], data['queued'], label='queued')
pyplot.plot(data['time'], data['processing'], label='processing')
pyplot.plot(data['time'], data['to_merge'], label='to merge')
pyplot.plot(data['time'], data['abandoned'], label='abandoned')
pyplot.title('Number of tasks over time')
pyplot.xlabel('Time')
pyplot.xscale('linear')
pyplot.ylabel('Number of tasks')
pyplot.yscale('linear')
pyplot.legend()
if options.output_file:
pyplot.savefig(options.output_file)
else:
pyplot.show()
return True | 0.011229 |
def so4_to_magic_su2s(
mat: np.ndarray,
*,
rtol: float = 1e-5,
atol: float = 1e-8,
check_preconditions: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
"""Finds 2x2 special-unitaries A, B where mat = Mag.H @ kron(A, B) @ Mag.
Mag is the magic basis matrix:
1 0 0 i
0 i 1 0
0 i -1 0 (times sqrt(0.5) to normalize)
1 0 0 -i
Args:
mat: A real 4x4 orthogonal matrix.
rtol: Per-matrix-entry relative tolerance on equality.
atol: Per-matrix-entry absolute tolerance on equality.
check_preconditions: When set, the code verifies that the given
matrix is from SO(4). Defaults to set.
Returns:
A pair (A, B) of matrices in SU(2) such that Mag.H @ kron(A, B) @ Mag
is approximately equal to the given matrix.
Raises:
ValueError: Bad matrix.
"""
if check_preconditions:
if mat.shape != (4, 4) or not predicates.is_special_orthogonal(
mat, atol=atol, rtol=rtol):
raise ValueError('mat must be 4x4 special orthogonal.')
ab = combinators.dot(MAGIC, mat, MAGIC_CONJ_T)
_, a, b = kron_factor_4x4_to_2x2s(ab)
return a, b | 0.000804 |
def OR(*fns):
""" Validate with any of the chainable valdator functions """
if len(fns) < 2:
raise TypeError('At least two functions must be passed')
@chainable
def validator(v):
for fn in fns:
last = None
try:
return fn(v)
except ValueError as err:
last = err
if last:
raise last
return validator | 0.00237 |
def get_by_params(self, params: [Scope]) -> (Scope, [[Scope]]):
""" Retrieve a Set of all signature that match the parameter list.
Return a pair:
pair[0] the overloads for the functions
pair[1] the overloads for the parameters
(a list of candidate list of parameters)
"""
lst = []
scopep = []
# for each of our signatures
for s in self.values():
# for each params of this signature
if hasattr(s, 'tparams'):
# number of matched params
mcnt = 0
# temporary collect
nbparam_sig = (0 if s.tparams is None else len(s.tparams))
nbparam_candidates = len(params)
# don't treat signature too short
if nbparam_sig > nbparam_candidates:
continue
# don't treat call signature too long if not variadic
if nbparam_candidates > nbparam_sig and not s.variadic:
continue
tmp = [None] * nbparam_candidates
variadic_types = []
for i in range(nbparam_candidates):
tmp[i] = Scope(state=StateScope.LINKED)
tmp[i].set_parent(self)
# match param of the expr
if i < nbparam_sig:
if params[i].state == StateScope.EMBEDDED:
raise ValueError(
("params[%d] of get_by_params is a StateScope."
+ "EMBEDDED scope... "
+ "read the doc and try a StateScope.FREE"
+ " or StateScope.LINKED.") % i
)
m = params[i].get_by_return_type(s.tparams[i])
if len(m) > 0:
mcnt += 1
tmp[i].update(m)
else:
# co/contra-variance
# we just need to search a t1->t2
# and add it into the tree (with/without warnings)
t1 = params[i]
t2 = s.tparams[i]
# if exist a fun (t1) -> t2
(is_convertible,
signature,
translator
) = t1.findTranslationTo(t2)
if is_convertible:
# add a translator in the EvalCtx
signature.use_translator(translator)
mcnt += 1
nscope = Scope(
sig=[signature],
state=StateScope.LINKED,
is_namespace=False
)
nscope.set_parent(self)
tmp[i].update(nscope)
elif s.tparams[i].is_polymorphic:
# handle polymorphic parameter
mcnt += 1
if not isinstance(params[i], Scope):
raise Exception(
"params[%d] must be a Scope" % i
)
tmp[i].update(params[i])
else:
# handle polymorphic return type
m = params[i].get_all_polymorphic_return()
if len(m) > 0:
mcnt += 1
tmp[i].update(m)
# for variadic extra parameters
else:
mcnt += 1
if not isinstance(params[i], Scope):
raise Exception("params[%d] must be a Scope" % i)
variadic_types.append(params[i].first().tret)
tmp[i].update(params[i])
# we have match all candidates
if mcnt == len(params):
# select this signature but
# box it (with EvalCtx) for type resolution
lst.append(EvalCtx.from_sig(s))
lastentry = lst[-1]
if lastentry.variadic:
lastentry.use_variadic_types(variadic_types)
scopep.append(tmp)
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return (rscope, scopep) | 0.000407 |
def get_joined_filters(self, filters):
"""
Creates a new filters class with active filters joined
"""
retfilters = Filters(self.filter_converter, self.datamodel)
retfilters.filters = self.filters + filters.filters
retfilters.values = self.values + filters.values
return retfilters | 0.005882 |
def define_haystack_units():
"""
Missing units found in project-haystack
Added to the registry
"""
ureg = UnitRegistry()
ureg.define('% = [] = percent')
ureg.define('pixel = [] = px = dot = picture_element = pel')
ureg.define('decibel = [] = dB')
ureg.define('ppu = [] = parts_per_unit')
ureg.define('ppm = [] = parts_per_million')
ureg.define('ppb = [] = parts_per_billion')
ureg.define('%RH = [] = percent_relative_humidity = percentRH')
ureg.define('cubic_feet = ft ** 3 = cu_ft')
ureg.define('cfm = cu_ft * minute = liter_per_second / 0.4719475')
ureg.define('cfh = cu_ft * hour')
ureg.define('cfs = cu_ft * second')
ureg.define('VAR = volt * ampere')
ureg.define('kVAR = 1000 * volt * ampere')
ureg.define('MVAR = 1000000 * volt * ampere')
ureg.define('inH2O = in_H2O')
ureg.define('dry_air = []')
ureg.define('gas = []')
ureg.define('energy_efficiency_ratio = [] = EER')
ureg.define('coefficient_of_performance = [] = COP')
ureg.define('data_center_infrastructure_efficiency = [] = DCIE')
ureg.define('power_usage_effectiveness = [] = PUE')
ureg.define('formazin_nephelometric_unit = [] = fnu')
ureg.define('nephelometric_turbidity_units = [] = ntu')
ureg.define('power_factor = [] = PF')
ureg.define('degree_day_celsius = [] = degdaysC')
ureg.define('degree_day_farenheit = degree_day_celsius * 9 / 5 = degdaysF')
ureg.define('footcandle = lumen / sq_ft = ftcd')
ureg.define('Nm = newton * meter')
ureg.define('%obsc = [] = percent_obscuration = percentobsc')
ureg.define('cycle = []')
ureg.define('cph = cycle / hour')
ureg.define('cpm = cycle / minute')
ureg.define('cps = cycle / second')
ureg.define('hecto_cubic_foot = 100 * cubic_foot')
ureg.define('tenths_second = second / 10')
ureg.define('hundredths_second = second / 100')
#ureg.define('irradiance = W / sq_meter = irr')
# In the definition of project haystack, there's a redundancy as irr = W/m^2
# no need to use : watts_per_square_meter_irradiance
# CURRENCY
# I know...we won'T be able to convert right now !
ureg.define('australian_dollar = [] = AUD')
ureg.define('british_pound = [] = GBP = £')
ureg.define('canadian_dollar = [] = CAD')
ureg.define('chinese_yuan = [] = CNY = 元')
ureg.define('emerati_dirham = [] = AED')
ureg.define('euro = [] = EUR = €')
ureg.define('indian_rupee = [] = INR = ₹')
ureg.define('japanese_yen = [] = JPY = ¥')
ureg.define('russian_ruble = [] = RUB = руб')
ureg.define('south_korean_won = [] = KRW = ₩')
ureg.define('swedish_krona = [] = SEK = kr')
ureg.define('swiss_franc = [] = CHF = Fr')
ureg.define('taiwan_dollar = [] = TWD')
ureg.define('us_dollar = [] = USD = $')
ureg.define('new_israeli_shekel = [] = NIS')
return ureg | 0.001385 |
def _zbufcountlines(filename, gzipped):
""" faster line counter """
if gzipped:
cmd1 = ["gunzip", "-c", filename]
else:
cmd1 = ["cat", filename]
cmd2 = ["wc"]
proc1 = sps.Popen(cmd1, stdout=sps.PIPE, stderr=sps.PIPE)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, stderr=sps.PIPE)
res = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("error zbufcountlines {}:".format(res))
LOGGER.info(res)
nlines = int(res.split()[0])
return nlines | 0.003724 |
def matlab_formatter(level, vertices, codes=None):
"""`MATLAB`_ style contour formatter.
Contours are returned as a single Nx2, `MATLAB`_ style, contour array.
There are two types of rows in this format:
* Header: The first element of a header row is the level of the contour
(the lower level for filled contours) and the second element is the
number of vertices (to follow) belonging to this contour line.
* Vertex: x,y coordinate pairs of the vertex.
A header row is always followed by the coresponding number of vertices.
Another header row may follow if there are more contour lines.
For filled contours the direction of vertices matters:
* CCW (ACW): The vertices give the exterior of a contour polygon.
* CW: The vertices give a hole of a contour polygon. This hole will
always be inside the exterior of the last contour exterior.
For further explanation of this format see the `Mathworks documentation
<https://www.mathworks.com/help/matlab/ref/contour-properties.html#prop_ContourMatrix>`_
noting that the MATLAB format used in the `contours` package is the
transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major
and `NumPy`_ is row-major by default).
.. _NumPy: http://www.numpy.org
.. _MATLAB: https://www.mathworks.com/products/matlab.html
"""
vertices = numpy_formatter(level, vertices, codes)
if codes is not None:
level = level[0]
headers = np.vstack((
[v.shape[0] for v in vertices],
[level]*len(vertices))).T
vertices = np.vstack(
list(it.__next__() for it in
itertools.cycle((iter(headers), iter(vertices)))))
return vertices | 0.00058 |
def _ParseRedirected(
self, parser_mediator, msiecf_item, recovered=False):
"""Extract data from a MSIE Cache Files (MSIECF) redirected item.
Every item is stored as an event object, one for each timestamp.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
msiecf_item (pymsiecf.redirected): MSIECF redirected item.
recovered (Optional[bool]): True if the item was recovered.
"""
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event_data = MSIECFRedirectedEventData()
event_data.offset = msiecf_item.offset
event_data.recovered = recovered
event_data.url = msiecf_item.location
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.001096 |
def get_query_tokens(query):
"""
:type query str
:rtype: list[sqlparse.sql.Token]
"""
query = preprocess_query(query)
parsed = sqlparse.parse(query)
# handle empty queries (#12)
if not parsed:
return []
tokens = TokenList(parsed[0].tokens).flatten()
# print([(token.value, token.ttype) for token in tokens])
return [token for token in tokens if token.ttype is not Whitespace] | 0.002326 |
async def send_data(self, data, addr):
"""
Send data to a remote host via the TURN server.
"""
channel = self.peer_to_channel.get(addr)
if channel is None:
channel = self.channel_number
self.channel_number += 1
self.channel_to_peer[channel] = addr
self.peer_to_channel[addr] = channel
# bind channel
await self.channel_bind(channel, addr)
header = struct.pack('!HH', channel, len(data))
self._send(header + data) | 0.00369 |
def video_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for video."""
del targets # unused arg
num_channels = model_hparams.problem.num_channels
shape = common_layers.shape_list(body_output)
reshape_shape = shape[:-1] + [num_channels, vocab_size]
res = tf.reshape(body_output, reshape_shape)
# Calculate argmax so as to have a summary with the produced images.
x = tf.argmax(tf.reshape(res, [-1, vocab_size]), axis=-1)
x = tf.reshape(x, shape[:-1] + [num_channels])
common_video.gif_summary("results", x, max_outputs=1)
return res | 0.020583 |
def p40baro(msg):
"""Barometric pressure setting
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
float: pressure in millibar
"""
d = hex2bin(data(msg))
if d[26] == '0':
return None
p = bin2int(d[27:39]) * 0.1 + 800 # millibar
return p | 0.003106 |
def add(self, coro, args=(), kwargs={}, first=True):
"""Add a coroutine in the scheduler. You can add arguments
(_args_, _kwargs_) to init the coroutine with."""
assert callable(coro), "'%s' not a callable object" % coro
coro = coro(*args, **kwargs)
if first:
self.active.append( (None, coro) )
else:
self.active.appendleft( (None, coro) )
return coro | 0.013636 |
def identify(self, text, **kwargs):
"""
Identify language.
Identifies the language of the input text.
:param str text: Input text in UTF-8 format.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('language_translator', 'V3', 'identify')
headers.update(sdk_headers)
params = {'version': self.version}
data = text
headers['content-type'] = 'text/plain'
url = '/v3/identify'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response | 0.002885 |
def GetStars(campaign, module, model='nPLD', **kwargs):
'''
Returns de-trended light curves for all stars on a given module in
a given campaign.
'''
# Get the channel numbers
channels = Channels(module)
assert channels is not None, "No channels available on this module."
# Get the EPIC numbers
all = GetK2Campaign(campaign)
stars = np.array([s[0] for s in all if s[2] in channels and
os.path.exists(
os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign),
('%09d' % s[0])[:4] + '00000',
('%09d' % s[0])[4:], model + '.npz'))], dtype=int)
N = len(stars)
assert N > 0, "No light curves found for campaign %d, module %d." % (
campaign, module)
# Loop over all stars and store the fluxes in a list
fluxes = []
errors = []
kpars = []
for n in range(N):
# De-trended light curve file name
nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign),
('%09d' % stars[n])[:4] + '00000',
('%09d' % stars[n])[4:], model + '.npz')
# Get the data
data = np.load(nf)
t = data['time']
if n == 0:
time = t
breakpoints = data['breakpoints']
# Get de-trended light curve
y = data['fraw'] - data['model']
err = data['fraw_err']
# De-weight outliers and bad timestamps
m = np.array(list(set(np.concatenate([data['outmask'], data['badmask'],
data['nanmask'],
data['transitmask']]))),
dtype=int)
# Interpolate over the outliers
y = np.interp(t, np.delete(t, m), np.delete(y, m))
err = np.interp(t, np.delete(t, m), np.delete(err, m))
# Append to our running lists
fluxes.append(y)
errors.append(err)
kpars.append(data['kernel_params'])
return time, breakpoints, np.array(fluxes), \
np.array(errors), np.array(kpars) | 0.00095 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.