filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_20491
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
import unittest
import filecmp
import os
import shutil
import tempfile
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator),) + paths)
return _join
def _gen_fake_split(separator):
"""Returns a callable that splits a with the given separator."""
def _split(path):
sep_index = path.rfind(separator)
if sep_index >= 0:
return (path[:sep_index], path[sep_index + 1:])
else:
return (path, '')
return _split
class LocalFileSystemTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.fs = localfilesystem.LocalFileSystem()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_scheme(self):
self.assertIsNone(self.fs.scheme())
self.assertIsNone(localfilesystem.LocalFileSystem.scheme())
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual('/tmp/path/to/file',
self.fs.join('/tmp/path', 'to', 'file'))
self.assertEqual('/tmp/path/to/file',
self.fs.join('/tmp/path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(r'C:\tmp\path\to\file',
self.fs.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(r'C:\tmp\path\to\file',
self.fs.join(r'C:\tmp\path', r'to\file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_split(self, os_mock):
os_mock.path.abspath.side_effect = lambda a: a
os_mock.path.split.side_effect = _gen_fake_split('/')
self.assertEqual(('/tmp/path/to', 'file'),
self.fs.split('/tmp/path/to/file'))
# Actual os.path.split will split following to '/' and 'tmp' when run in
# Unix.
self.assertEqual(('', 'tmp'),
self.fs.split('/tmp'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_split(self, os_mock):
os_mock.path.abspath = lambda a: a
os_mock.path.split.side_effect = _gen_fake_split('\\')
self.assertEqual((r'C:\tmp\path\to', 'file'),
self.fs.split(r'C:\tmp\path\to\file'))
# Actual os.path.split will split following to 'C:\' and 'tmp' when run in
# Windows.
self.assertEqual((r'C:', 'tmp'),
self.fs.split(r'C:\tmp'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
self.fs.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
self.fs.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
self.fs.mkdirs(path)
with self.assertRaises(IOError):
self.fs.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = self.fs.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = self.fs.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaises(BeamIOError) as error:
self.fs.match([None])
self.assertTrue(
error.exception.message.startswith('Match operation failed'))
self.assertEqual(error.exception.exception_details.keys(), [None])
def test_match_directory(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = self.fs.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path1, path2])
def test_match_directory(self):
result = self.fs.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.fs.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaises(BeamIOError) as error:
self.fs.copy([path1], [path2])
self.assertTrue(
error.exception.message.startswith('Copy operation failed'))
self.assertEqual(error.exception.exception_details.keys(), [(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
self.fs.mkdirs(path_t1)
self.fs.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.fs.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.fs.rename([path1], [path2])
self.assertTrue(self.fs.exists(path2))
self.assertFalse(self.fs.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaises(BeamIOError) as error:
self.fs.rename([path1], [path2])
self.assertTrue(
error.exception.message.startswith('Rename operation failed'))
self.assertEqual(error.exception.exception_details.keys(), [(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
self.fs.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.fs.rename([path_t1], [path_t2])
self.assertTrue(self.fs.exists(path_t2))
self.assertFalse(self.fs.exists(path_t1))
self.assertTrue(self.fs.exists(path2))
self.assertFalse(self.fs.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(self.fs.exists(path1))
self.assertFalse(self.fs.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(self.fs.exists(path1))
self.fs.delete([path1])
self.assertFalse(self.fs.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaises(BeamIOError) as error:
self.fs.delete([path1])
self.assertTrue(
error.exception.message.startswith('Delete operation failed'))
self.assertEqual(error.exception.exception_details.keys(), [path1])
|
the-stack_106_20494
|
import hashlib
import os
import mimetypes
from time import time
from zlib import adler32
from werkzeug.datastructures import Headers
from werkzeug.wrappers import Response
from werkzeug.wsgi import wrap_file
SEND_FILE_MAX_AGE_DEFAULT = 43200 # 12 hours, default from Flask.
def send_file(request, filename, attachment_filename):
"""Simplified from Flask to add appropriated headers."""
headers = Headers()
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
headers['Content-Length'] = os.path.getsize(filename)
data = wrap_file(request.environ, open(filename, 'rb'))
mimetype = mimetypes.guess_type(attachment_filename)[0] \
or 'application/octet-stream'
response = Response(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
response.last_modified = os.path.getmtime(filename)
response.cache_control.public = True
response.cache_control.max_age = SEND_FILE_MAX_AGE_DEFAULT
response.expires = int(time() + SEND_FILE_MAX_AGE_DEFAULT)
response.set_etag('%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(filename.encode('utf-8')) & 0xffffffff
))
return response
def generate_hash(query_string):
"""Custom hash to avoid long values."""
return hashlib.md5(query_string.encode('utf-8')).hexdigest()[:10]
def file_exists(file_path):
"""Check existence AND non-emptiness of the file."""
return os.path.isfile(file_path) and os.stat(file_path).st_size
|
the-stack_106_20496
|
from __future__ import division
import os
import cv2
import numpy as np
import sys
import pickle
from optparse import OptionParser
import time
from keras_frcnn import config
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from keras_frcnn import roi_helpers
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("-p", "--path", dest="test_path", help="Path to test data.", default="/home/yangdoudou/fasterrcnn/VOC2007")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
help="Number of ROIs per iteration. Higher means more memory use.", default=32)
parser.add_option("--config_filename", dest="config_filename", help=
"Location to read the metadata related to the training (generated when training).",
default="config.pickle")
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='vgg')
(options, args) = parser.parse_args()
if not options.test_path: # if filename is not given
parser.error('Error: path to test data must be specified. Pass --path to command line')
config_output_filename = options.config_filename
with open(config_output_filename, 'rb') as f_in:
C = pickle.load(f_in)
if C.network == 'resnet50':
import keras_frcnn.resnet as nn
elif C.network == 'vgg':
import keras_frcnn.vgg as nn
elif C.network == 'resnet101':
import keras_frcnn.resnet101 as nn
# turn off any data augmentation at test time
C.use_horizontal_flips = False
C.use_vertical_flips = False
C.rot_90 = False
img_path = options.test_path
def format_img_size(img, C):
""" formats the image size based on config """
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
ratio = img_min_side/width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side/height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, ratio
def format_img_channels(img, C):
""" formats the image channels based on config """
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
def format_img(img, C):
""" formats an image for model prediction based on config """
img, ratio = format_img_size(img, C)
img = format_img_channels(img, C)
return img, ratio
# Method to transform the coordinates of the bounding box to its original size
def get_real_coordinates(ratio, x1, y1, x2, y2):
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return (real_x1, real_y1, real_x2 ,real_y2)
class_mapping = C.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.items()}
print(class_mapping)
class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
C.num_rois = int(options.num_rois)
if C.network == 'resnet50':
num_features = 1024
elif C.network == 'vgg':
num_features = 512
elif C.network == 'resnet101':
num_features = 1024
if K.image_data_format() == 'channels_first':
input_shape_img = (3, None, None)
input_shape_features = (num_features, None, None)
else:
input_shape_img = (None, None, 3)
input_shape_features = (None, None, num_features)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(C.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True)
model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)
model_classifier = Model([feature_map_input, roi_input], classifier)
print('Loading weights from {}'.format(C.model_path))
model_rpn.load_weights(C.model_path, by_name=True)
model_classifier.load_weights(C.model_path, by_name=True)
model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')
all_imgs = []
classes = {}
bbox_threshold = 0.8
visualise = True
for idx, img_name in enumerate(sorted(os.listdir(img_path))):
if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
print(img_name)
st = time.time()
filepath = os.path.join(img_path,img_name)
img = cv2.imread(filepath)
X, ratio = format_img(img, C)
if K.image_data_format() == 'channels_last':
X = np.transpose(X, (0, 2, 3, 1))
# get the feature maps and output from the RPN
[Y1, Y2, F] = model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_data_format(), overlap_thresh=0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0]//C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0]//C.num_rois:
#pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0],C.num_rois,curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = model_classifier_only.predict([F, ROIs])
for ii in range(P_cls.shape[1]):
if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
pass
bboxes[cls_name].append([C.rpn_stride*x, C.rpn_stride*y, C.rpn_stride*(x+w), C.rpn_stride*(y+h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
all_dets = []
for key in bboxes:
bbox = np.array(bboxes[key])
new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5)
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk,:]
(real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)
cv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2)
textLabel = '{}: {}'.format(key,int(100*new_probs[jk]))
all_dets.append((key,100*new_probs[jk]))
(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1)
textOrg = (real_x1, real_y1-0)
cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2)
cv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1)
cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)
print('Elapsed time = {}'.format(time.time() - st))
print(all_dets)
#import matplotlib.pyplot as plt
#plt.show(img)
#cv2.imshow('img', img)
#cv2.waitKey(0)
cv2.imwrite('/home/yangdoudou/fasterrcnn/results_imgs/{}.png'.format(idx),img)
|
the-stack_106_20502
|
import os
from dataclasses import dataclass
from datetime import date
from datetime import datetime
from typing import Any
from typing import Dict
import requests
from common.dicts import Objectview
class ResponseError(Exception):
def __init__(self, *args, response, **kwargs):
super().__init__(*args, **kwargs)
self.response = response
class Types:
DATETIME = 'datetime'
DATE = 'date'
NUMBER = 'number'
PERCENTAGE = 'percentage'
MONEY = 'money'
STRING = 'string'
@dataclass
class Field:
""" A field specification. """
name: str = None
type: str = Types.NUMBER
required: bool = False
unique: bool = False
class Dataset:
""" Simple object to ease interaction with geckoboard.
The main python geckoboard client
(https://github.com/geckoboard/geckoboard-python) requires the template to
even *retrieve* a dataset, which is ridiculous. So, this is a rewrite
that does not require a template before executing actions on the dataset.
Example usage:
from common.geckoboard import Dataset, Field, Types
# Examples below assume you've set GECKOBOARD_API_KEY
# Could also pass in api key directly: Dataset('some.id', api_key='abc14cdga1415ga1aagt1')
# dataset *data* operations
dataset = Dataset('some.id')
dataset.overwrite([dict(tpl='13', timestamp=datetime, num_orders=54)])
dataset.append([dict(tpl='14', timestamp=datetime, num_orders=22)])
dataset.clear()
# individual dataset *schema* operations
dataset = Dataset('some.id')
class MySchema:
tpl = Field(Types.STRING, required=True, unique=True),
timestamp = Field(Types.DATETIME, required=True),
num_orders = Field(Types.NUMBER),
dataset.create_schema(MySchema)
schema = dataset.get_schema()
dataset.delete()
# get all the schemas
lots_of_schemas = Dataset.get_schemas()
"""
KEY_NAME = "GECKOBOARD_API_KEY"
BASE_URL = "https://api.geckoboard.com"
DATASETS_ENDPOINT = BASE_URL + "/datasets"
DATASETS_ENDPOINT_FORMAT_STR = BASE_URL + "/datasets/{}"
DATA_ENDPOINT_FORMAT_STR = f"{DATASETS_ENDPOINT}/{{}}/data"
@classmethod
def get_schemas(cls, filter_on_ids: list = None, only_return_ids: bool = False, api_key: str = None):
""" Return all the datasets/schemas.
Args:
filter_on_ids: a list of ids to filter on.
only_return_ids: whether to only return a list of ids.
api_key: the api key (can be None if have set env var)
"""
auth_params = cls._get_auth_params(api_key)
response = requests.get(cls.DATASETS_ENDPOINT, **auth_params)
cls._ensure_response_ok(response)
schemas = response.json()['data']
if only_return_ids:
return [scheme['id'] for scheme in schemas]
if filter_on_ids:
schemas = [scheme for scheme in schemas if scheme['id'] in filter_on_ids]
return schemas
@classmethod
def get_schema_ids(cls, api_key: str = None):
""" Only return the ids of datasets. """
return cls.get_schemas(only_return_ids=True, api_key=api_key)
@staticmethod
def _get_api_key(api_key: str = None):
if not api_key:
api_key = os.environ[Dataset.KEY_NAME]
return api_key
@staticmethod
def _get_auth_params(api_key: str = None):
api_key = Dataset._get_api_key(api_key)
return dict(auth=(api_key, ''))
def _key_from_auth_params(self):
return self.auth_params['auth'][0]
def __init__(self, dataset_id: str, api_key: str = None):
self.auth_params = self._get_auth_params(api_key)
self.dataset_id = dataset_id
def _field_to_field_def(self, key: str, field: Field) -> Dict[str, dict]:
""" Convert a Field into proper schema. """
return {
key: dict(
# could uppercase and put spaces for underscores or something here
name=field.name or key,
type=field.type,
required=field.required
)
}
def create_schema(self, schema: Any = None):
""" Creates the template
Args
schema: An instance or class that has Field instances as
attributes. Or can be a dict-like object that has keys (the field
names) and values that are Field instances.
Returns:
None if successful.
Raises:
ResponseError if any problems.
Example:
class MySchema:
first = Field(Types.STRING)
second = Field() # numeric by default
timestamp = Field(Types.DATETIME, required=True, unique=True)
dataset.create_schema(MySchema)
# could also be a dictionary
my_schema = dict(
first=Field(Types.STRING),
second=Field() # numeric by default
timestamp=Field(Types.DATETIME, required=True, unique=True)
)
"""
if isinstance(schema, dict):
schema = Objectview(schema)
fields = {
att: getattr(schema, att)
for att in dir(schema)
if not att.startswith("_") and isinstance(getattr(schema, att), Field)
}
all_fields = {}
for name, field in fields.items():
all_fields.update(self._field_to_field_def(name, field))
unique_by = [name for name, field in fields.items() if field.unique]
template = dict(
fields=all_fields,
unique_by=unique_by,
)
response = requests.put(
self.DATASETS_ENDPOINT_FORMAT_STR.format(self.dataset_id), json=template, **self.auth_params
)
self._ensure_response_ok(response)
def overwrite(self, data):
return self._upload(data, append=False)
def append(self, data):
return self._upload(data, append=True)
def delete(self):
""" Delete the schema and all data in it.
Returns:
None if successful, the response object if unsuccessful.
"""
url = self.DATASETS_ENDPOINT_FORMAT_STR.format(self.dataset_id)
response = requests.delete(url, **self.auth_params)
self._ensure_response_ok(response)
def clear(self):
""" Clears the data in the dataset without changing the schema. """
self.overwrite([])
def _date_like_to_isoformat(self, value):
""" Transforms date or datetime to isoformat. """
if isinstance(value, date):
return value.isoformat()
elif isinstance(value, datetime):
return value.isoformat().replace(' ', 'T')
else:
return value
def dates_to_isoformat(self, data):
return [{key: self._date_like_to_isoformat(value) for key, value in datum.items()} for datum in data]
def _upload(self, data, append):
""" Appends data. """
url = self.DATA_ENDPOINT_FORMAT_STR.format(self.dataset_id)
method = 'post' if append else 'put'
processed_data = self.dates_to_isoformat(data)
payload = dict(data=processed_data)
response = getattr(requests, method)(url, json=payload, **self.auth_params)
self._ensure_response_ok(response)
@staticmethod
def _ensure_response_ok(response):
# print("STATUS CODE", response.status_code)
# print("RESPONSE", response.json())
if not response.ok:
raise ResponseError(response=response)
def get_schema(self):
api_key = self._key_from_auth_params()
schemas = self.get_schemas(filter_on_ids=[self.dataset_id], api_key=api_key)
if schemas:
return schemas[0]
|
the-stack_106_20504
|
def Kadane(array):
partialSum = bestSum = array[0]
fromIndex = toIndex = 0
for i in range(1, len(array)):
if array[i] > partialSum + array[i]:
partialSum = array[i]
fromIndex = i
else:
partialSum += array[i]
if partialSum >= bestSum:
bestSum = partialSum
toIndex = i
return {
"fromIndex" : fromIndex,
"toIndex" : toIndex,
"bestSum" : bestSum
}
n = int(input("Enter the size of the array: "))
print("Input the array")
array = map(int,raw_input().split())
kadane = Kadane(array)
print("Sum: %d From: %d To: %d" % (kadane['bestSum'], kadane['fromIndex'], kadane['toIndex']))
|
the-stack_106_20505
|
import asyncio
import functools
import operator
from typing import (
cast,
Iterable,
NamedTuple,
Sequence,
Type,
Tuple,
)
from cached_property import cached_property
from cancel_token import CancelToken
from eth_utils import (
ExtendedDebugLogger,
to_tuple,
)
from eth_utils.toolz import groupby, valmap
from eth_keys import keys
from p2p._utils import duplicates, get_logger
from p2p.abc import (
ConnectionAPI,
HandshakerAPI,
HandshakeReceiptAPI,
MultiplexerAPI,
NodeAPI,
TransportAPI,
TProtocol,
ProtocolAPI,
)
from p2p.connection import Connection
from p2p.constants import DEVP2P_V5
from p2p.disconnect import DisconnectReason
from p2p.exceptions import (
HandshakeFailure,
HandshakeFailureTooManyPeers,
NoMatchingPeerCapabilities,
)
from p2p.multiplexer import (
stream_transport_messages,
Multiplexer,
)
from p2p.p2p_proto import (
DevP2PReceipt,
Disconnect,
Hello,
HelloPayload,
BaseP2PProtocol,
P2PProtocolV4,
P2PProtocolV5,
)
from p2p.protocol import get_cmd_offsets
from p2p.transport import Transport
from p2p.typing import (
Capabilities,
Capability,
)
class Handshaker(HandshakerAPI[TProtocol]):
"""
Base class that handles the handshake for a given protocol. The primary
justification for this class's existence is to house parameters that are
needed for the protocol handshake.
"""
@cached_property
def logger(self) -> ExtendedDebugLogger:
return get_logger('p2p.handshake.Handshaker')
class DevP2PHandshakeParams(NamedTuple):
client_version_string: str
listen_port: int
version: int
def get_base_protocol_class(self) -> Type[BaseP2PProtocol]:
if self.version == 5:
return P2PProtocolV5
elif self.version == 4:
return P2PProtocolV4
else:
raise Exception(
f"Unknown protocol version: {self.version}. Expected one of "
f"`4` or `5`"
)
@to_tuple
def _select_capabilities(remote_capabilities: Capabilities,
local_capabilities: Capabilities) -> Iterable[Capability]:
"""
Select the appropriate shared capabilities between local and remote.
https://github.com/ethereum/devp2p/blob/master/rlpx.md#capability-messaging
"""
# Determine the remote capabilities that intersect with our own.
matching_capabilities = tuple(sorted(
set(local_capabilities).intersection(remote_capabilities),
key=operator.itemgetter(0),
))
# generate a dictionary of each capability grouped by name and sorted by
# version in descending order.
sort_by_version = functools.partial(sorted, key=operator.itemgetter(1), reverse=True)
capabilities_by_name = valmap(
tuple,
valmap(
sort_by_version,
groupby(operator.itemgetter(0), matching_capabilities),
),
)
# now we loop over the names that have a matching capability and return the
# *highest* version one.
for name in sorted(capabilities_by_name.keys()):
yield capabilities_by_name[name][0]
async def _do_p2p_handshake(transport: TransportAPI,
capabilities: Capabilities,
p2p_handshake_params: DevP2PHandshakeParams,
base_protocol: BaseP2PProtocol,
token: CancelToken) -> Tuple[DevP2PReceipt, BaseP2PProtocol]:
client_version_string, listen_port, p2p_version = p2p_handshake_params
base_protocol.send(Hello(HelloPayload(
client_version_string=client_version_string,
capabilities=capabilities,
listen_port=listen_port,
version=p2p_version,
remote_public_key=transport.public_key.to_bytes(),
)))
# The base `p2p` protocol handshake directly streams the messages as it has
# strict requirements about receiving the `Hello` message first.
async for _, cmd in stream_transport_messages(transport, base_protocol, token=token):
if isinstance(cmd, Disconnect):
if cmd.payload == DisconnectReason.TOO_MANY_PEERS:
raise HandshakeFailureTooManyPeers(f"Peer disconnected because it is already full")
if not isinstance(cmd, Hello):
raise HandshakeFailure(
f"First message across the DevP2P connection must be a Hello "
f"msg, got {cmd}, disconnecting"
)
protocol: BaseP2PProtocol
if base_protocol.version >= DEVP2P_V5:
# Check whether to support Snappy Compression or not
# based on other peer's p2p protocol version
snappy_support = cmd.payload.version >= DEVP2P_V5
if snappy_support:
# Now update the base protocol to support snappy compression
# This is needed so that Trinity is compatible with parity since
# parity sends Ping immediately after handshake
protocol = P2PProtocolV5(
transport,
command_id_offset=0,
snappy_support=True,
)
else:
protocol = base_protocol
else:
protocol = base_protocol
devp2p_receipt = DevP2PReceipt(
protocol=protocol,
version=cmd.payload.version,
client_version_string=cmd.payload.client_version_string,
capabilities=cmd.payload.capabilities,
remote_public_key=cmd.payload.remote_public_key,
listen_port=cmd.payload.listen_port,
)
break
else:
raise HandshakeFailure("DevP2P message stream exited before finishing handshake")
return devp2p_receipt, protocol
async def negotiate_protocol_handshakes(transport: TransportAPI,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
token: CancelToken,
) -> Tuple[MultiplexerAPI, DevP2PReceipt, Tuple[HandshakeReceiptAPI, ...]]: # noqa: E501
"""
Negotiate the handshakes for both the base `p2p` protocol and the
appropriate sub protocols. The basic logic follows the following steps.
* perform the base `p2p` handshake.
* using the capabilities exchanged during the `p2p` handshake, select the
appropriate sub protocols.
* allow each sub-protocol to perform its own handshake.
* return the established `Multiplexer` as well as the `HandshakeReceipt`
objects from each handshake.
"""
# The `p2p` Protocol class that will be used.
p2p_protocol_class = p2p_handshake_params.get_base_protocol_class()
# Collect our local capabilities, the set of (name, version) pairs for all
# of the protocols that we support.
local_capabilities = tuple(
handshaker.protocol_class.as_capability()
for handshaker
in protocol_handshakers
)
# Verify that there are no duplicated local or remote capabilities
duplicate_capabilities = duplicates(local_capabilities)
if duplicate_capabilities:
raise Exception(f"Duplicate local capabilities: {duplicate_capabilities}")
# We create an *ephemeral* version of the base `p2p` protocol with snappy
# compression disabled for the handshake. As part of the handshake, a new
# instance of this protocol will be created with snappy compression enabled
# if it is supported by the protocol version.
ephemeral_base_protocol = p2p_protocol_class(
transport,
command_id_offset=0,
snappy_support=False,
)
# Perform the actual `p2p` protocol handshake. We need the remote
# capabilities data from the receipt to select the appropriate sub
# protocols.
devp2p_receipt, base_protocol = await _do_p2p_handshake(
transport,
local_capabilities,
p2p_handshake_params,
ephemeral_base_protocol,
token,
)
# This data structure is simply for easy retrieval of the proper
# `Handshaker` for each selected protocol.
protocol_handshakers_by_capability = dict(zip(local_capabilities, protocol_handshakers))
# Using our local capabilities and the ones transmitted by the remote
# select the highest shared version of each shared protocol.
selected_capabilities = _select_capabilities(
devp2p_receipt.capabilities,
local_capabilities,
)
# If there are no capability matches throw an exception.
if len(selected_capabilities) < 1:
raise NoMatchingPeerCapabilities(
"Found no matching capabilities between self and peer:\n"
f" - local : {tuple(sorted(local_capabilities))}\n"
f" - remote: {devp2p_receipt.capabilities}"
)
# Retrieve the handshakers which correspond to the selected protocols.
# These are needed to perform the actual handshake logic for each protocol.
selected_handshakers = tuple(
protocol_handshakers_by_capability[capability]
for capability in selected_capabilities
)
# Grab the `Protocol` class for each of the selected protocols. We need
# this to compute the offsets for each protocol's command ids, as well as
# for instantiation of the protocol instances.
selected_protocol_types = tuple(
handshaker.protocol_class
for handshaker
in selected_handshakers
)
# Compute the offsets for each protocol's command ids
protocol_cmd_offsets = get_cmd_offsets(selected_protocol_types)
# Now instantiate instances of each of the protocol classes.
selected_protocols = tuple(
protocol_class(transport, command_id_offset, base_protocol.snappy_support)
for protocol_class, command_id_offset
in zip(selected_protocol_types, protocol_cmd_offsets)
)
# Create `Multiplexer` to abstract all of the protocols into a single
# interface to stream only messages relevant to the given protocol.
multiplexer = Multiplexer(transport, base_protocol, selected_protocols, token=token)
# This context manager runs a background task which reads messages off of
# the `Transport` and feeds them into protocol specific queues. Each
# protocol is responsible for reading its own messages from that queue via
# the `Multiplexer.stream_protocol_messages` API.
async with multiplexer.multiplex():
# Concurrently perform the handshakes for each protocol, gathering up
# the returned receipts.
protocol_receipts = cast(Tuple[HandshakeReceiptAPI, ...], await asyncio.gather(*(
handshaker.do_handshake(multiplexer, protocol)
for handshaker, protocol
in zip(selected_handshakers, selected_protocols)
)))
# Return the `Multiplexer` object as well as the handshake receipts. The
# `Multiplexer` object acts as a container for the individual protocol
# instances.
return multiplexer, devp2p_receipt, protocol_receipts
async def dial_out(remote: NodeAPI,
private_key: keys.PrivateKey,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
token: CancelToken) -> ConnectionAPI:
"""
Perform the auth and P2P handshakes with the given remote.
Return a `Connection` object housing all of the negotiated sub protocols.
Raises UnreachablePeer if we cannot connect to the peer or
HandshakeFailure if the remote disconnects before completing the
handshake or if none of the sub-protocols supported by us is also
supported by the remote.
"""
transport = await Transport.connect(
remote,
private_key,
token,
)
transport.logger.debug2("Initiating p2p handshake with %s", remote)
try:
multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
transport=transport,
p2p_handshake_params=p2p_handshake_params,
protocol_handshakers=protocol_handshakers,
token=token,
)
except Exception:
# Note: This is one of two places where we manually handle closing the
# reader/writer connection pair in the event of an error during the
# peer connection and handshake process.
# See `p2p.auth.handshake` for the other.
try:
await transport.close()
except ConnectionResetError:
transport.logger.debug("Could not wait for transport to close")
raise
transport.logger.debug2("Completed p2p handshake with %s", remote)
connection = Connection(
multiplexer=multiplexer,
devp2p_receipt=devp2p_receipt,
protocol_receipts=protocol_receipts,
is_dial_out=True,
)
return connection
async def receive_dial_in(reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
private_key: keys.PrivateKey,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
token: CancelToken) -> Connection:
transport = await Transport.receive_connection(
reader=reader,
writer=writer,
private_key=private_key,
token=token,
)
try:
multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
transport=transport,
p2p_handshake_params=p2p_handshake_params,
protocol_handshakers=protocol_handshakers,
token=token,
)
except Exception:
# Note: This is one of two places where we manually handle closing the
# reader/writer connection pair in the event of an error during the
# peer connection and handshake process.
# See `p2p.auth.handshake` for the other.
try:
await transport.close()
except ConnectionResetError:
transport.logger.debug("Could not wait for transport to close")
raise
connection = Connection(
multiplexer=multiplexer,
devp2p_receipt=devp2p_receipt,
protocol_receipts=protocol_receipts,
is_dial_out=False,
)
return connection
|
the-stack_106_20506
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import exec_remote_command
from dbaas_cloudstack.models import HostAttr as CS_HostAttr
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class ConfigLog(BaseStep):
def __unicode__(self):
return "Configuring rsyslog..."
def do(self, workflow_dict):
try:
for source_host in workflow_dict['source_hosts']:
future_host = source_host.future_host
cs_host_attr = CS_HostAttr.objects.get(host=future_host)
LOG.info("Configuring rsyslog {}".format(future_host))
script = self.rsyslog_create_config(workflow_dict['database'])
LOG.info(script)
output = {}
return_code = exec_remote_command(
server=future_host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output
)
LOG.info(output)
if return_code != 0:
error_msg = "Error configuring rsyslog: {}".format(str(output))
LOG.error(error_msg)
raise EnvironmentError(error_msg)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
for source_host in workflow_dict['source_hosts']:
future_host = source_host.future_host
cs_host_attr = CS_HostAttr.objects.get(host=future_host)
LOG.info("Removing rsyslog config in {}".format(future_host))
script = self.rsyslog_remove_config()
LOG.info(script)
output = {}
return_code = exec_remote_command(
server=future_host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output
)
LOG.info(output)
if return_code != 0:
error_msg = "Error removing configuring rsyslog: {}".format(str(output))
LOG.error(error_msg)
raise EnvironmentError(error_msg)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def rsyslog_create_config(self, database):
return \
' echo "\$EscapeControlCharactersOnReceive off" >> /etc/rsyslog.d/dbaaslog.conf &&' \
' sed -i "\$a \$template db-log, \\\"<%PRI%>%TIMESTAMP% %HOSTNAME% %syslogtag%%msg% tags: INFRA,DBAAS,MYSQL,{}\\\"" /etc/rsyslog.d/dbaaslog.conf &&' \
' sed -i "\$a*.* @logging.udp.globoi.com:5140; db-log" /etc/rsyslog.d/dbaaslog.conf &&' \
' /etc/init.d/rsyslog restart'.format(database.name)
def rsyslog_remove_config(self):
return 'rm -f /etc/rsyslog.d/dbaaslog.conf'
|
the-stack_106_20507
|
from random import randint
from sympy import expand, sqrt
from cartesian import *
def sub_y(x, y):
return y, (1 - randint(0, 1)*2)*sqrt(1 - x**2)
def main():
# A hexagon ABCDEF inscribed in a unit circle
# Prove 3 intersections of opposite edges (AB∩DE, BC∩EF, CD∩FA) are collinear
a, b, c, d, e, f, g, h, j, k, m, n = symbols('a, b, c, d, e, f, g, h, j, k, m, n')
A, B, C, D, E, F = (a, b), (c, d), (e, f), (g, h), (j, k), (m, n)
# A = (1, 0) can be faster
AB, BC, CD, DE, EF, FA = line(A, B), line(B, C), line(C, D), line(D, E), line(E, F), line(F, A)
G, H, J = intersect(AB, DE), intersect(BC, EF), intersect(CD, FA)
print('G:', G)
print('H:', H)
print('J:', J)
print('Are GHJ collinear?', expand(collinear(G, H, J).lhs. \
subs([sub_y(a, b), sub_y(c, d), sub_y(e, f), sub_y(g, h), sub_y(j, k), sub_y(m, n)])) == 0)
if __name__ == '__main__':
main()
|
the-stack_106_20509
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import errno
import codecs
import collections
import json
import math
import shutil
import sys
import numpy as np
import tensorflow as tf
import pyhocon
def initialize_from_env():
if "GPU" in os.environ:
set_gpus(int(os.environ["GPU"]))
else:
set_gpus()
name = sys.argv[1]
print("Running experiment: {}".format(name))
config = pyhocon.ConfigFactory.parse_file("experiments.conf")[name]
config["log_dir"] = mkdirs(os.path.join(config["log_root"], name))
print(pyhocon.HOCONConverter.convert(config, "hocon"))
return config
def copy_checkpoint(source, target):
for ext in (".index", ".data-00000-of-00001"):
shutil.copyfile(source + ext, target + ext)
def make_summary(value_dict):
return tf.Summary(value=[tf.Summary.Value(tag=k, simple_value=v) for k,v in value_dict.items()])
def flatten(l):
return [item for sublist in l for item in sublist]
def set_gpus(*gpus):
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(g) for g in gpus)
print("Setting CUDA_VISIBLE_DEVICES to: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
def mkdirs(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
return path
def load_char_dict(char_vocab_path):
vocab = [u"<unk>"]
with codecs.open(char_vocab_path, encoding="utf-8") as f:
vocab.extend(l.strip() for l in f.readlines())
char_dict = collections.defaultdict(int)
char_dict.update({c:i for i, c in enumerate(vocab)})
return char_dict
def maybe_divide(x, y):
return 0 if y == 0 else x / float(y)
def projection(inputs, output_size, initializer=None):
return ffnn(inputs, 0, -1, output_size, dropout=None, output_weights_initializer=initializer)
def highway(inputs, num_layers, dropout):
for i in range(num_layers):
with tf.variable_scope("highway_{}".format(i)):
j, f = tf.split(projection(inputs, 2 * shape(inputs, -1)), 2, -1)
f = tf.sigmoid(f)
j = tf.nn.relu(j)
if dropout is not None:
j = tf.nn.dropout(j, dropout)
inputs = f * j + (1 - f) * inputs
return inputs
def shape(x, dim):
return x.get_shape()[dim].value or tf.shape(x)[dim]
def ffnn(inputs, num_hidden_layers, hidden_size, output_size, dropout, output_weights_initializer=None):
if len(inputs.get_shape()) > 3:
raise ValueError("FFNN with rank {} not supported".format(len(inputs.get_shape())))
if len(inputs.get_shape()) == 3:
batch_size = shape(inputs, 0)
seqlen = shape(inputs, 1)
emb_size = shape(inputs, 2)
current_inputs = tf.reshape(inputs, [batch_size * seqlen, emb_size])
else:
current_inputs = inputs
for i in range(num_hidden_layers):
hidden_weights = tf.get_variable("hidden_weights_{}".format(i), [shape(current_inputs, 1), hidden_size])
hidden_bias = tf.get_variable("hidden_bias_{}".format(i), [hidden_size])
current_outputs = tf.nn.relu(tf.nn.xw_plus_b(current_inputs, hidden_weights, hidden_bias))
if dropout is not None:
current_outputs = tf.nn.dropout(current_outputs, dropout)
current_inputs = current_outputs
output_weights = tf.get_variable("output_weights", [shape(current_inputs, 1), output_size], initializer=output_weights_initializer)
output_bias = tf.get_variable("output_bias", [output_size])
outputs = tf.nn.xw_plus_b(current_inputs, output_weights, output_bias)
if len(inputs.get_shape()) == 3:
outputs = tf.reshape(outputs, [batch_size, seqlen, output_size])
return outputs
def cnn(inputs, filter_sizes, num_filters):
num_words = shape(inputs, 0)
num_chars = shape(inputs, 1)
input_size = shape(inputs, 2)
outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.variable_scope("conv_{}".format(i)):
w = tf.get_variable("w", [filter_size, input_size, num_filters])
b = tf.get_variable("b", [num_filters])
conv = tf.nn.conv1d(inputs, w, stride=1, padding="VALID") # [num_words, num_chars - filter_size, num_filters]
h = tf.nn.relu(tf.nn.bias_add(conv, b)) # [num_words, num_chars - filter_size, num_filters]
pooled = tf.reduce_max(h, 1) # [num_words, num_filters]
outputs.append(pooled)
return tf.concat(outputs, 1) # [num_words, num_filters * len(filter_sizes)]
def batch_gather(emb, indices):
batch_size = shape(emb, 0)
seqlen = shape(emb, 1)
if len(emb.get_shape()) > 2:
emb_size = shape(emb, 2)
else:
emb_size = 1
flattened_emb = tf.reshape(emb, [batch_size * seqlen, emb_size]) # [batch_size * seqlen, emb]
offset = tf.expand_dims(tf.range(batch_size) * seqlen, 1) # [batch_size, 1]
gathered = tf.gather(flattened_emb, indices + offset) # [batch_size, num_indices, emb]
if len(emb.get_shape()) == 2:
gathered = tf.squeeze(gathered, 2) # [batch_size, num_indices]
return gathered
class RetrievalEvaluator(object):
def __init__(self):
self._num_correct = 0
self._num_gold = 0
self._num_predicted = 0
def update(self, gold_set, predicted_set):
self._num_correct += len(gold_set & predicted_set)
self._num_gold += len(gold_set)
self._num_predicted += len(predicted_set)
def recall(self):
return maybe_divide(self._num_correct, self._num_gold)
def precision(self):
return maybe_divide(self._num_correct, self._num_predicted)
def metrics(self):
recall = self.recall()
precision = self.precision()
f1 = maybe_divide(2 * recall * precision, precision + recall)
return recall, precision, f1
class EmbeddingDictionary(object):
def __init__(self, info, normalize=True, maybe_cache=None):
self._size = info["size"]
self._normalize = normalize
self._path = info["path"]
if maybe_cache is not None and maybe_cache._path == self._path:
assert self._size == maybe_cache._size
self._embeddings = maybe_cache._embeddings
else:
self._embeddings = self.load_embedding_dict(self._path)
@property
def size(self):
return self._size
def load_embedding_dict(self, path):
print("Loading word embeddings from {}...".format(path))
default_embedding = np.zeros(self.size)
embedding_dict = collections.defaultdict(lambda:default_embedding)
if len(path) > 0:
vocab_size = None
with open(path) as f:
for i, line in enumerate(f.readlines()):
word_end = line.find(" ")
word = line[:word_end]
embedding = np.fromstring(line[word_end + 1:], np.float32, sep=" ")
assert len(embedding) == self.size
embedding_dict[word] = embedding
if vocab_size is not None:
assert vocab_size == len(embedding_dict)
print("Done loading word embeddings.")
return embedding_dict
def __getitem__(self, key):
embedding = self._embeddings[key]
if self._normalize:
embedding = self.normalize(embedding)
return embedding
def normalize(self, v):
norm = np.linalg.norm(v)
if norm > 0:
return v / norm
else:
return v
class CustomLSTMCell(tf.contrib.rnn.RNNCell):
def __init__(self, num_units, batch_size, dropout):
self._num_units = num_units
self._dropout = dropout
self._dropout_mask = tf.nn.dropout(tf.ones([batch_size, self.output_size]), dropout)
self._initializer = self._block_orthonormal_initializer([self.output_size] * 3)
initial_cell_state = tf.get_variable("lstm_initial_cell_state", [1, self.output_size])
initial_hidden_state = tf.get_variable("lstm_initial_hidden_state", [1, self.output_size])
self._initial_state = tf.contrib.rnn.LSTMStateTuple(initial_cell_state, initial_hidden_state)
@property
def state_size(self):
return tf.contrib.rnn.LSTMStateTuple(self.output_size, self.output_size)
@property
def output_size(self):
return self._num_units
@property
def initial_state(self):
return self._initial_state
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope(scope or type(self).__name__): # "CustomLSTMCell"
c, h = state
h *= self._dropout_mask
concat = projection(tf.concat([inputs, h], 1), 3 * self.output_size, initializer=self._initializer)
i, j, o = tf.split(concat, num_or_size_splits=3, axis=1)
i = tf.sigmoid(i)
new_c = (1 - i) * c + i * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
return new_h, new_state
def _orthonormal_initializer(self, scale=1.0):
def _initializer(shape, dtype=tf.float32, partition_info=None):
M1 = np.random.randn(shape[0], shape[0]).astype(np.float32)
M2 = np.random.randn(shape[1], shape[1]).astype(np.float32)
Q1, R1 = np.linalg.qr(M1)
Q2, R2 = np.linalg.qr(M2)
Q1 = Q1 * np.sign(np.diag(R1))
Q2 = Q2 * np.sign(np.diag(R2))
n_min = min(shape[0], shape[1])
params = np.dot(Q1[:, :n_min], Q2[:n_min, :]) * scale
return params
return _initializer
def _block_orthonormal_initializer(self, output_sizes):
def _initializer(shape, dtype=np.float32, partition_info=None):
assert len(shape) == 2
assert sum(output_sizes) == shape[1]
initializer = self._orthonormal_initializer()
params = np.concatenate([initializer([shape[0], o], dtype, partition_info) for o in output_sizes], 1)
return params
return _initializer
|
the-stack_106_20511
|
# MIT License
# Copyright (c) 2019 Yang Liu and the HuggingFace team
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import math
import numpy as np
import torch
from torch import nn
from torch.nn.init import xavier_uniform_
from configuration_bertabs import BertAbsConfig
from transformers import BertConfig, BertModel, PreTrainedModel
MAX_SIZE = 5000
BERTABS_FINETUNED_MODEL_MAP = {
"bertabs-finetuned-cnndm": "https://s3.amazonaws.com/models.huggingface.co/bert/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization-pytorch_model.bin",
}
class BertAbsPreTrainedModel(PreTrainedModel):
config_class = BertAbsConfig
pretrained_model_archive_map = BERTABS_FINETUNED_MODEL_MAP
load_tf_weights = False
base_model_prefix = "bert"
class BertAbs(BertAbsPreTrainedModel):
def __init__(self, args, checkpoint=None, bert_extractive_checkpoint=None):
super(BertAbs, self).__init__(args)
self.args = args
self.bert = Bert()
# If pre-trained weights are passed for Bert, load these.
load_bert_pretrained_extractive = True if bert_extractive_checkpoint else False
if load_bert_pretrained_extractive:
self.bert.model.load_state_dict(
dict([(n[11:], p) for n, p in bert_extractive_checkpoint.items() if n.startswith("bert.model")]),
strict=True,
)
self.vocab_size = self.bert.model.config.vocab_size
if args.max_pos > 512:
my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][
None, :
].repeat(args.max_pos - 512, 1)
self.bert.model.embeddings.position_embeddings = my_pos_embeddings
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder = TransformerDecoder(
self.args.dec_layers,
self.args.dec_hidden_size,
heads=self.args.dec_heads,
d_ff=self.args.dec_ff_size,
dropout=self.args.dec_dropout,
embeddings=tgt_embeddings,
vocab_size=self.vocab_size,
)
gen_func = nn.LogSoftmax(dim=-1)
self.generator = nn.Sequential(nn.Linear(args.dec_hidden_size, args.vocab_size), gen_func)
self.generator[0].weight = self.decoder.embeddings.weight
load_from_checkpoints = False if checkpoint is None else True
if load_from_checkpoints:
self.load_state_dict(checkpoint)
def init_weights(self):
for module in self.decoder.modules():
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
for p in self.generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
else:
p.data.zero_()
def forward(
self, encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask,
):
encoder_output = self.bert(
input_ids=encoder_input_ids, token_type_ids=token_type_ids, attention_mask=encoder_attention_mask,
)
encoder_hidden_states = encoder_output #encoder_output[0]
dec_state = self.decoder.init_decoder_state(encoder_input_ids, encoder_hidden_states)
decoder_outputs, _ = self.decoder(decoder_input_ids[:, :-1], encoder_hidden_states, dec_state)
return self.generator(decoder_outputs) # return decoder_outputs
class Bert(nn.Module):
""" This class is not really necessary and should probably disappear.
"""
def __init__(self):
super(Bert, self).__init__()
config = BertConfig.from_pretrained("bert-base-uncased")
self.model = BertModel(config)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, **kwargs):
self.eval()
with torch.no_grad():
encoder_outputs, _ = self.model(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, **kwargs
)
return encoder_outputs
class TransformerDecoder(nn.Module):
"""
The Transformer decoder from "Attention is All You Need".
Args:
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
embeddings (:obj:`onmt.modules.Embeddings`):
embeddings to use, should have positional encodings
attn_type (str): if using a seperate copy attention
"""
def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, vocab_size):
super(TransformerDecoder, self).__init__()
# Basic attributes.
self.decoder_type = "transformer"
self.num_layers = num_layers
self.embeddings = embeddings
self.pos_emb = PositionalEncoding(dropout, self.embeddings.embedding_dim)
# Build TransformerDecoder.
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_layers)]
)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
# forward(input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask)
# def forward(self, input_ids, state, attention_mask=None, memory_lengths=None,
# step=None, cache=None, encoder_attention_mask=None, encoder_hidden_states=None, memory_masks=None):
def forward(
self,
input_ids,
encoder_hidden_states=None,
state=None,
attention_mask=None,
memory_lengths=None,
step=None,
cache=None,
encoder_attention_mask=None,
):
"""
See :obj:`onmt.modules.RNNDecoderBase.forward()`
memory_bank = encoder_hidden_states
"""
# Name conversion
tgt = input_ids
memory_bank = encoder_hidden_states
memory_mask = encoder_attention_mask
src_words = state.src
src_batch, src_len = src_words.size()
padding_idx = self.embeddings.padding_idx
# Decoder padding mask
tgt_words = tgt
tgt_batch, tgt_len = tgt_words.size()
tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1).expand(tgt_batch, tgt_len, tgt_len)
# Encoder padding mask
if memory_mask is not None:
src_len = memory_mask.size(-1)
src_pad_mask = memory_mask.expand(src_batch, tgt_len, src_len)
else:
src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1).expand(src_batch, tgt_len, src_len)
# Pass through the embeddings
emb = self.embeddings(input_ids)
output = self.pos_emb(emb, step)
assert emb.dim() == 3 # len x batch x embedding_dim
if state.cache is None:
saved_inputs = []
for i in range(self.num_layers):
prev_layer_input = None
if state.cache is None:
if state.previous_input is not None:
prev_layer_input = state.previous_layer_inputs[i]
output, all_input = self.transformer_layers[i](
output,
memory_bank,
src_pad_mask,
tgt_pad_mask,
previous_input=prev_layer_input,
layer_cache=state.cache["layer_{}".format(i)] if state.cache is not None else None,
step=step,
)
if state.cache is None:
saved_inputs.append(all_input)
if state.cache is None:
saved_inputs = torch.stack(saved_inputs)
output = self.layer_norm(output)
if state.cache is None:
state = state.update_state(tgt, saved_inputs)
# Decoders in transformers return a tuple. Beam search will fail
# if we don't follow this convention.
return output, state
def init_decoder_state(self, src, memory_bank, with_cache=False):
""" Init decoder state """
state = TransformerDecoderState(src)
if with_cache:
state._init_cache(memory_bank, self.num_layers)
return state
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0)
super(PositionalEncoding, self).__init__()
self.register_buffer("pe", pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
emb = emb * math.sqrt(self.dim)
if step:
emb = emb + self.pe[:, step][:, None, :]
else:
emb = emb + self.pe[:, : emb.size(1)]
emb = self.dropout(emb)
return emb
def get_emb(self, emb):
return self.pe[:, : emb.size(1)]
class TransformerDecoderLayer(nn.Module):
"""
Args:
d_model (int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
heads (int): the number of heads for MultiHeadedAttention.
d_ff (int): the second-layer of the PositionwiseFeedForward.
dropout (float): dropout probability(0-1.0).
self_attn_type (string): type of self-attention scaled-dot, average
"""
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)
self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)
self.drop = nn.Dropout(dropout)
mask = self._get_attn_subsequent_mask(MAX_SIZE)
# Register self.mask as a buffer in TransformerDecoderLayer, so
# it gets TransformerDecoderLayer's cuda behavior automatically.
self.register_buffer("mask", mask)
def forward(
self, inputs, memory_bank, src_pad_mask, tgt_pad_mask, previous_input=None, layer_cache=None, step=None,
):
"""
Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
* all_input `[batch_size x current_step x model_dim]`
"""
dec_mask = torch.gt(tgt_pad_mask + self.mask[:, : tgt_pad_mask.size(1), : tgt_pad_mask.size(1)], 0)
input_norm = self.layer_norm_1(inputs)
all_input = input_norm
if previous_input is not None:
all_input = torch.cat((previous_input, input_norm), dim=1)
dec_mask = None
query = self.self_attn(all_input, all_input, input_norm, mask=dec_mask, layer_cache=layer_cache, type="self",)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid = self.context_attn(
memory_bank, memory_bank, query_norm, mask=src_pad_mask, layer_cache=layer_cache, type="context",
)
output = self.feed_forward(self.drop(mid) + query)
return output, all_input
# return output
def _get_attn_subsequent_mask(self, size):
"""
Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]`
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8")
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention module from
"Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.
Similar to standard `dot` attention but uses
multiple attention distributions simulataneously
to select relevant items.
.. mermaid::
graph BT
A[key]
B[value]
C[query]
O[output]
subgraph Attn
D[Attn 1]
E[Attn 2]
F[Attn N]
end
A --> D
C --> D
A --> E
C --> E
A --> F
C --> F
D --> O
E --> O
F --> O
B --> O
Also includes several additional tricks.
Args:
head_count (int): number of parallel heads
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head)
self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head)
self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.use_final_linear = use_final_linear
if self.use_final_linear:
self.final_linear = nn.Linear(model_dim, model_dim)
def forward(
self, key, value, query, mask=None, layer_cache=None, type=None, predefined_graph_1=None,
):
"""
Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]`
"""
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
def shape(x):
""" projection """
return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if type == "self":
query, key, value = (
self.linear_query(query),
self.linear_keys(query),
self.linear_values(query),
)
key = shape(key)
value = shape(value)
if layer_cache is not None:
device = key.device
if layer_cache["self_keys"] is not None:
key = torch.cat((layer_cache["self_keys"].to(device), key), dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat((layer_cache["self_values"].to(device), value), dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif type == "context":
query = self.linear_query(query)
if layer_cache is not None:
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key), self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = (
layer_cache["memory_keys"],
layer_cache["memory_values"],
)
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key, value = self.linear_keys(key), self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
query = shape(query)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
scores = torch.matmul(query, key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(scores)
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores)
if predefined_graph_1 is not None:
attn_masked = attn[:, -1] * predefined_graph_1
attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9)
attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1)
drop_attn = self.dropout(attn)
if self.use_final_linear:
context = unshape(torch.matmul(drop_attn, value))
output = self.final_linear(context)
return output
else:
context = torch.matmul(drop_attn, value)
return context
class DecoderState(object):
"""Interface for grouping together the current state of a recurrent
decoder. In the simplest case just represents the hidden state of
the model. But can also be used for implementing various forms of
input_feeding and non-recurrent models.
Modules need to implement this to utilize beam search decoding.
"""
def detach(self):
""" Need to document this """
self.hidden = tuple([_.detach() for _ in self.hidden])
self.input_feed = self.input_feed.detach()
def beam_update(self, idx, positions, beam_size):
""" Need to document this """
for e in self._all:
sizes = e.size()
br = sizes[1]
if len(sizes) == 3:
sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[:, :, idx]
else:
sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2], sizes[3])[:, :, idx]
sent_states.data.copy_(sent_states.data.index_select(1, positions))
def map_batch_fn(self, fn):
raise NotImplementedError()
class TransformerDecoderState(DecoderState):
""" Transformer Decoder state base class """
def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
self.previous_layer_inputs = None
self.cache = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
if self.previous_input is not None and self.previous_layer_inputs is not None:
return (self.previous_input, self.previous_layer_inputs, self.src)
else:
return (self.src,)
def detach(self):
if self.previous_input is not None:
self.previous_input = self.previous_input.detach()
if self.previous_layer_inputs is not None:
self.previous_layer_inputs = self.previous_layer_inputs.detach()
self.src = self.src.detach()
def update_state(self, new_input, previous_layer_inputs):
state = TransformerDecoderState(self.src)
state.previous_input = new_input
state.previous_layer_inputs = previous_layer_inputs
return state
def _init_cache(self, memory_bank, num_layers):
self.cache = {}
for l in range(num_layers):
layer_cache = {"memory_keys": None, "memory_values": None}
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
self.cache["layer_{}".format(l)] = layer_cache
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = self.src.data.repeat(1, beam_size, 1)
def map_batch_fn(self, fn):
def _recursive_map(struct, batch_dim=0):
for k, v in struct.items():
if v is not None:
if isinstance(v, dict):
_recursive_map(v)
else:
struct[k] = fn(v, batch_dim)
self.src = fn(self.src, 0)
if self.cache is not None:
_recursive_map(self.cache)
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
#
# TRANSLATOR
# The following code is used to generate summaries using the
# pre-trained weights and beam search.
#
def build_predictor(args, tokenizer, symbols, model, logger=None):
# we should be able to refactor the global scorer a lot
scorer = GNMTGlobalScorer(args.alpha, length_penalty="wu")
translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)
return translator
class GNMTGlobalScorer(object):
"""
NMT re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`
Args:
alpha (float): length parameter
beta (float): coverage parameter
"""
def __init__(self, alpha, length_penalty):
self.alpha = alpha
penalty_builder = PenaltyBuilder(length_penalty)
self.length_penalty = penalty_builder.length_penalty()
def score(self, beam, logprobs):
"""
Rescores a prediction based on penalty functions
"""
normalized_probs = self.length_penalty(beam, logprobs, self.alpha)
return normalized_probs
class PenaltyBuilder(object):
"""
Returns the Length and Coverage Penalty function for Beam Search.
Args:
length_pen (str): option name of length pen
cov_pen (str): option name of cov pen
"""
def __init__(self, length_pen):
self.length_pen = length_pen
def length_penalty(self):
if self.length_pen == "wu":
return self.length_wu
elif self.length_pen == "avg":
return self.length_average
else:
return self.length_none
"""
Below are all the different penalty terms implemented so far
"""
def length_wu(self, beam, logprobs, alpha=0.0):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha)
return logprobs / modifier
def length_average(self, beam, logprobs, alpha=0.0):
"""
Returns the average probability of tokens in a sequence.
"""
return logprobs / len(beam.next_ys)
def length_none(self, beam, logprobs, alpha=0.0, beta=0.0):
"""
Returns unmodified scores.
"""
return logprobs
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None):
self.logger = logger
self.args = args
self.model = model
self.generator = self.model.generator
self.vocab = vocab
self.symbols = symbols
self.start_token = symbols["BOS"]
self.end_token = symbols["EOS"]
self.global_scorer = global_scorer
self.beam_size = args.beam_size
self.min_length = args.min_length
self.max_length = args.max_length
def translate(self, batch, step, attn_debug=False):
""" Generates summaries from one batch of data.
"""
self.model.eval()
with torch.no_grad():
batch_data = self.translate_batch(batch)
translations = self.from_batch(batch_data)
return translations
def translate_batch(self, batch, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length)
# Where the beam search lives
# I have no idea why it is being called from the method above
def _fast_translate_batch(self, batch, max_length, min_length=0):
""" Beam Search using the encoder inputs contained in `batch`.
"""
# The batch object is funny
# Instead of just looking at the size of the arguments we encapsulate
# a size argument.
# Where is it defined?
beam_size = self.beam_size
batch_size = batch.batch_size
src = batch.src
segs = batch.segs
mask_src = batch.mask_src
src_features = self.model.bert(src, segs, mask_src)
dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True)
device = src_features.device
# Tile states and memory beam_size times.
dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim))
src_features = tile(src_features, beam_size, dim=0)
batch_offset = torch.arange(batch_size, dtype=torch.long, device=device)
beam_offset = torch.arange(0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device)
alive_seq = torch.full([batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device)
# Give full probability to the first beam on the first step.
topk_log_probs = torch.tensor([0.0] + [float("-inf")] * (beam_size - 1), device=device).repeat(batch_size)
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["gold_score"] = [0] * batch_size
results["batch"] = batch
for step in range(max_length):
decoder_input = alive_seq[:, -1].view(1, -1)
# Decoder forward.
decoder_input = decoder_input.transpose(0, 1)
dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step)
# Generator forward.
log_probs = self.generator.forward(dec_out.transpose(0, 1).squeeze(0))
vocab_size = log_probs.size(-1)
if step < min_length:
log_probs[:, self.end_token] = -1e20
# Multiply probs by the beam probability.
log_probs += topk_log_probs.view(-1).unsqueeze(1)
alpha = self.global_scorer.alpha
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
if self.args.block_trigram:
cur_len = alive_seq.size(1)
if cur_len > 3:
for i in range(alive_seq.size(0)):
fail = False
words = [int(w) for w in alive_seq[i]]
words = [self.vocab.ids_to_tokens[w] for w in words]
words = " ".join(words).replace(" ##", "").split()
if len(words) <= 3:
continue
trigrams = [(words[i - 1], words[i], words[i + 1]) for i in range(1, len(words) - 1)]
trigram = tuple(trigrams[-1])
if trigram in trigrams[:-1]:
fail = True
if fail:
curr_scores[i] = -10e20
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
# Recover log probs.
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
topk_beam_index = topk_ids.div(vocab_size)
topk_ids = topk_ids.fmod(vocab_size)
# Map beam_index to batch_index in the flat representation.
batch_index = topk_beam_index + beam_offset[: topk_beam_index.size(0)].unsqueeze(1)
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat([alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1)
is_finished = topk_ids.eq(self.end_token)
if step + 1 == max_length:
is_finished.fill_(1)
# End condition is top beam is finished.
end_condition = is_finished[:, 0].eq(1)
# Save finished hypotheses.
if is_finished.any():
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:]))
# If the batch reached the end, save the n_best hypotheses.
if end_condition[i]:
best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True)
score, pred = best_hyp[0]
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(0).nonzero().view(-1)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished).view(-1, alive_seq.size(-1))
# Reorder states.
select_indices = batch_index.view(-1)
src_features = src_features.index_select(0, select_indices)
dec_states.map_batch_fn(lambda state, dim: state.index_select(dim, select_indices))
return results
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert len(translation_batch["gold_score"]) == len(translation_batch["predictions"])
batch_size = batch.batch_size
preds, _, _, tgt_str, src = (
translation_batch["predictions"],
translation_batch["scores"],
translation_batch["gold_score"],
batch.tgt_str,
batch.src,
)
translations = []
for b in range(batch_size):
pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])
pred_sents = " ".join(pred_sents).replace(" ##", "")
gold_sent = " ".join(tgt_str[b].split())
raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]
raw_src = " ".join(raw_src)
translation = (pred_sents, gold_sent, raw_src)
translations.append(translation)
return translations
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1).transpose(0, 1).repeat(count, 1).transpose(0, 1).contiguous().view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
#
# Optimizer for training. We keep this here in case we want to add
# a finetuning script.
#
class BertSumOptimizer(object):
""" Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
summarization using two Adam Optimizers with different warm-up steps and
learning rate. They also use a custom learning rate scheduler.
[1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
"""
def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
self.encoder = model.encoder
self.decoder = model.decoder
self.lr = lr
self.warmup_steps = warmup_steps
self.optimizers = {
"encoder": torch.optim.Adam(
model.encoder.parameters(), lr=lr["encoder"], betas=(beta_1, beta_2), eps=eps,
),
"decoder": torch.optim.Adam(
model.decoder.parameters(), lr=lr["decoder"], betas=(beta_1, beta_2), eps=eps,
),
}
self._step = 0
self.current_learning_rates = {}
def _update_rate(self, stack):
return self.lr[stack] * min(self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5))
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
self.current_learning_rates[stack] = new_rate
|
the-stack_106_20512
|
"""
Module for executing git commands, sending results back to the handlers
"""
import os
import subprocess
from subprocess import Popen, PIPE
class Git:
"""
A single parent class containing all of the individual git methods in it.
"""
def __init__(self, root_dir, *args, **kwargs):
super(Git, self).__init__(*args, **kwargs)
self.root_dir = os.path.realpath(os.path.expanduser(root_dir))
def status(self, current_path):
"""
Execute git status command & return the result.
"""
p = Popen(
["git", "status", "--porcelain"],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
result = []
line_array = my_output.decode("utf-8").splitlines()
for line in line_array:
to1 = None
from_path = line[3:]
if line[0] == "R":
to0 = line[3:].split(" -> ")
to1 = to0[len(to0) - 1]
else:
to1 = line[3:]
if to1.startswith('"'):
to1 = to1[1:]
if to1.endswith('"'):
to1 = to1[:-1]
result.append(
{"x": line[0], "y": line[1], "to": to1, "from": from_path}
)
return {"code": p.returncode, "files": result}
else:
return {
"code": p.returncode,
"command": "git status --porcelain",
"message": my_error.decode("utf-8"),
}
def log(self, current_path):
"""
Execute git log command & return the result.
"""
p = Popen(
["git", "log", "--pretty=format:%H%n%an%n%ar%n%s", "-10"],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
result = []
line_array = my_output.decode("utf-8").splitlines()
i = 0
PREVIOUS_COMMIT_OFFSET = 4
while i < len(line_array):
if i + PREVIOUS_COMMIT_OFFSET < len(line_array):
result.append(
{
"commit": line_array[i],
"author": line_array[i + 1],
"date": line_array[i + 2],
"commit_msg": line_array[i + 3],
"pre_commit": line_array[i + PREVIOUS_COMMIT_OFFSET],
}
)
else:
result.append(
{
"commit": line_array[i],
"author": line_array[i + 1],
"date": line_array[i + 2],
"commit_msg": line_array[i + 3],
"pre_commit": "",
}
)
i += PREVIOUS_COMMIT_OFFSET
return {"code": p.returncode, "commits": result}
else:
return {"code": p.returncode, "message": my_error.decode("utf-8")}
def detailed_log(self, selected_hash, current_path):
"""
Execute git log -1 --stat --numstat --oneline command (used to get
insertions & deletions per file) & return the result.
"""
p = Popen(
["git", "log", "-1", "--stat", "--numstat", "--oneline", selected_hash],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
result = []
note = [0] * 3
count = 0
temp = ""
line_array = my_output.decode("utf-8").splitlines()
length = len(line_array)
INSERTION_INDEX = 0
DELETION_INDEX = 1
MODIFIED_FILE_PATH_INDEX = 2
if length > 1:
temp = line_array[length - 1]
words = temp.split()
for i in range(0, len(words)):
if words[i].isdigit():
note[count] = words[i]
count += 1
for num in range(1, int(length / 2)):
line_info = line_array[num].split()
words = line_info[2].split("/")
length = len(words)
result.append(
{
"modified_file_path": line_info[MODIFIED_FILE_PATH_INDEX],
"modified_file_name": words[length - 1],
"insertion": line_info[INSERTION_INDEX],
"deletion": line_info[DELETION_INDEX],
}
)
if note[2] == 0 and length > 1:
if "-" in temp:
exchange = note[1]
note[1] = note[2]
note[2] = exchange
return {
"code": p.returncode,
"modified_file_note": temp,
"modified_files_count": note[0],
"number_of_insertions": note[1],
"number_of_deletions": note[2],
"modified_files": result,
}
else:
return {
"code": p.returncode,
"command": "git log_1",
"message": my_error.decode("utf-8"),
}
def diff(self, top_repo_path):
"""
Execute git diff command & return the result.
"""
p = Popen(
["git", "diff", "--numstat"], stdout=PIPE, stderr=PIPE, cwd=top_repo_path
)
my_output, my_error = p.communicate()
if p.returncode == 0:
result = []
line_array = my_output.decode("utf-8").splitlines()
for line in line_array:
linesplit = line.split()
result.append(
{
"insertions": linesplit[0],
"deletions": linesplit[1],
"filename": linesplit[2],
}
)
return {"code": p.returncode, "result": result}
else:
return {"code": p.returncode, "message": my_error.decode("utf-8")}
def branch(self, current_path):
"""
Execute git branch -a command & return the result.
"""
p = Popen(
["git", "branch", "-a"],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
result = []
line_array = my_output.decode("utf-8").splitlines()
"""By comparing strings 'remotes/' to determine if a branch is
local or remote, should have better ways
"""
for line_full in line_array:
line_cut = (line_full.split(" -> "),)
tag = None
current = False
remote = False
if len(line_cut[0]) > 1:
tag = line_cut[0][1]
line = (line_cut[0][0],)
if line_full[0] == "*":
current = True
if (len(line_full) >= 10) and (line_full[2:10] == "remotes/"):
remote = True
result.append(
{
"current": current,
"remote": remote,
"name": line[0][10:],
"tag": tag,
}
)
else:
result.append(
{
"current": current,
"remote": remote,
"name": line_full[2:],
"tag": tag,
}
)
return {"code": p.returncode, "branches": result}
else:
return {
"code": p.returncode,
"command": "git branch -a",
"message": my_error.decode("utf-8"),
}
def show_top_level(self, current_path):
"""
Execute git --show-toplevel command & return the result.
"""
p = Popen(
["git", "rev-parse", "--show-toplevel"],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
result = {
"code": p.returncode,
"top_repo_path": my_output.decode("utf-8").strip("\n"),
}
return result
else:
return {
"code": p.returncode,
"command": "git rev-parse --show-toplevel",
"message": my_error.decode("utf-8"),
}
def show_prefix(self, current_path):
"""
Execute git --show-prefix command & return the result.
"""
p = Popen(
["git", "rev-parse", "--show-prefix"],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
result = {
"code": p.returncode,
"under_repo_path": my_output.decode("utf-8").strip("\n"),
}
return result
else:
return {
"code": p.returncode,
"command": "git rev-parse --show-prefix",
"message": my_error.decode("utf-8"),
}
def add(self, filename, top_repo_path):
"""
Execute git add<filename> command & return the result.
"""
my_output = subprocess.check_output(["git", "add", filename], cwd=top_repo_path)
return my_output
def add_all(self, top_repo_path):
"""
Execute git add all command & return the result.
"""
my_output = subprocess.check_output(["git", "add", "-A"], cwd=top_repo_path)
return my_output
def add_all_untracked(self, top_repo_path):
"""
Execute git add_all_untracked command & return the result.
"""
e = 'echo "a\n*\nq\n" | git add -i'
my_output = subprocess.call(e, shell=True, cwd=top_repo_path)
return {"result": my_output}
def reset(self, filename, top_repo_path):
"""
Execute git reset <filename> command & return the result.
"""
my_output = subprocess.check_output(
["git", "reset", filename], cwd=top_repo_path
)
return my_output
def reset_all(self, top_repo_path):
"""
Execute git reset command & return the result.
"""
my_output = subprocess.check_output(["git", "reset"], cwd=top_repo_path)
return my_output
def delete_commit(self, commit_id, top_repo_path):
"""
Delete a specified commit from the repository.
"""
my_output = subprocess.check_output(["git", "revert", "--no-commit", commit_id], cwd=top_repo_path)
return my_output
def reset_to_commit(self, commit_id, top_repo_path):
"""
Reset the current branch to a specific past commit.
"""
my_output = subprocess.check_output(["git", "reset", "--hard", commit_id], cwd=top_repo_path)
return my_output
def checkout_new_branch(self, branchname, current_path):
"""
Execute git checkout <make-branch> command & return the result.
"""
p = Popen(
["git", "checkout", "-b", branchname],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
return {"code": p.returncode, "message": my_output.decode("utf-8")}
else:
return {
"code": p.returncode,
"command": "git checkout " + "-b" + branchname,
"message": my_error.decode("utf-8"),
}
def checkout_branch(self, branchname, current_path):
"""
Execute git checkout <branch-name> command & return the result.
"""
p = Popen(
["git", "checkout", branchname],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, current_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
return {"code": p.returncode, "message": my_output.decode("utf-8")}
else:
return {
"code": p.returncode,
"command": "git checkout " + branchname,
"message": my_error.decode("utf-8"),
}
def checkout(self, filename, top_repo_path):
"""
Execute git checkout command for the filename & return the result.
"""
my_output = subprocess.check_output(
["git", "checkout", "--", filename], cwd=top_repo_path
)
return my_output
def checkout_all(self, top_repo_path):
"""
Execute git checkout command & return the result.
"""
my_output = subprocess.check_output(
["git", "checkout", "--", "."], cwd=top_repo_path
)
return my_output
def commit(self, commit_msg, top_repo_path):
"""
Execute git commit <filename> command & return the result.
"""
my_output = subprocess.check_output(
["git", "commit", "-m", commit_msg], cwd=top_repo_path
)
return my_output
def pull(self, origin, master, curr_fb_path):
"""
Execute git pull <branch1> <branch2> command & return the result.
"""
p = Popen(
["git", "pull", origin, master, "--no-commit"],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, curr_fb_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
return {"code": p.returncode}
else:
return {
"code": p.returncode,
"command": "git pull " + origin + " " + master + " --no-commit",
"message": my_error.decode("utf-8"),
}
def push(self, origin, master, curr_fb_path):
"""
Execute git push <branch1> <branch2> command & return the result.
"""
p = Popen(
["git", "push", origin, master],
stdout=PIPE,
stderr=PIPE,
cwd=os.path.join(self.root_dir, curr_fb_path),
)
my_output, my_error = p.communicate()
if p.returncode == 0:
return {"code": p.returncode}
else:
return {
"code": p.returncode,
"command": "git push " + origin + " " + master,
"message": my_error.decode("utf-8"),
}
def init(self, current_path):
"""
Execute git init command & return the result.
"""
my_output = subprocess.check_output(
["git", "init"], cwd=os.path.join(self.root_dir, current_path)
)
return my_output
|
the-stack_106_20514
|
"""
See //docs/search.md for overview.
"""
import grpc
from sqlalchemy.sql import func, or_
from couchers import errors
from couchers.db import session_scope
from couchers.models import Cluster, Event, EventOccurrence, Node, Page, PageType, PageVersion, Reference, User
from couchers.servicers.api import (
hostingstatus2sql,
parkingdetails2sql,
sleepingarrangement2sql,
smokinglocation2sql,
user_model_to_pb,
)
from couchers.servicers.communities import community_to_pb
from couchers.servicers.events import event_to_pb
from couchers.servicers.groups import group_to_pb
from couchers.servicers.pages import page_to_pb
from couchers.utils import create_coordinate, last_active_coarsen, to_aware_datetime
from proto import search_pb2, search_pb2_grpc
# searches are a bit expensive, we'd rather send back a bunch of results at once than lots of small pages
MAX_PAGINATION_LENGTH = 50
REGCONFIG = "english"
TRI_SIMILARITY_THRESHOLD = 0.6
TRI_SIMILARITY_WEIGHT = 5
def _join_with_space(coalesces):
# the objects in coalesces are not strings, so we can't do " ".join(coalesces). They're SQLAlchemy magic.
if not coalesces:
return ""
out = coalesces[0]
for coalesce in coalesces[1:]:
out += " " + coalesce
return out
def _build_tsv(A, B=[], C=[], D=[]):
"""
Given lists for A, B, C, and D, builds a tsvector from them.
"""
tsv = func.setweight(func.to_tsvector(REGCONFIG, _join_with_space([func.coalesce(bit, "") for bit in A])), "A")
if B:
tsv = tsv.concat(
func.setweight(func.to_tsvector(REGCONFIG, _join_with_space([func.coalesce(bit, "") for bit in B])), "B")
)
if C:
tsv = tsv.concat(
func.setweight(func.to_tsvector(REGCONFIG, _join_with_space([func.coalesce(bit, "") for bit in C])), "C")
)
if D:
tsv = tsv.concat(
func.setweight(func.to_tsvector(REGCONFIG, _join_with_space([func.coalesce(bit, "") for bit in D])), "D")
)
return tsv
def _build_doc(A, B=[], C=[], D=[]):
"""
Builds the raw document (without to_tsvector and weighting), used for extracting snippet
"""
doc = _join_with_space([func.coalesce(bit, "") for bit in A])
if B:
doc += " " + _join_with_space([func.coalesce(bit, "") for bit in B])
if C:
doc += " " + _join_with_space([func.coalesce(bit, "") for bit in C])
if D:
doc += " " + _join_with_space([func.coalesce(bit, "") for bit in D])
return doc
def _similarity(query, text):
return func.word_similarity(func.unaccent(query), func.unaccent(text))
def _gen_search_elements(query, title_only, next_rank, page_size, A, B=[], C=[], D=[]):
"""
Given a query and four sets of fields, (A, B, C, D), generates a bunch of postgres expressions for full text search.
The four sets are in decreasing order of "importance" for ranking.
A should be the "title", the others can be anything.
If title_only=True, we only perform a trigram search against A only
"""
if not title_only:
# a postgres tsquery object that can be used to match against a tsvector
tsq = func.websearch_to_tsquery(REGCONFIG, query)
# the tsvector object that we want to search against with our tsquery
tsv = _build_tsv(A, B, C, D)
# document to generate snippet from
doc = _build_doc(A, B, C, D)
title = _build_doc(A)
# trigram based text similarity between title and query string
sim = _similarity(query, title)
# ranking algo, weigh the similarity a lot, the text-based ranking less
rank = (TRI_SIMILARITY_WEIGHT * sim + func.ts_rank_cd(tsv, tsq)).label("rank")
# the snippet with results highlighted
snippet = func.ts_headline(REGCONFIG, doc, tsq, "StartSel=**,StopSel=**").label("snippet")
def do_search_query(orig_query):
"""
Does the right search filtering, limiting, and ordering for the query
"""
return (
orig_query.filter(or_(tsv.op("@@")(tsq), sim > TRI_SIMILARITY_THRESHOLD))
.filter(rank <= next_rank if next_rank is not None else True)
.order_by(rank.desc())
.limit(page_size + 1)
.all()
)
else:
title = _build_doc(A)
# trigram based text similarity between title and query string
sim = _similarity(query, title)
# ranking algo, weigh the similarity a lot, the text-based ranking less
rank = sim.label("rank")
# used only for headline
tsq = func.websearch_to_tsquery(REGCONFIG, query)
doc = _build_doc(A, B, C, D)
# the snippet with results highlighted
snippet = func.ts_headline(REGCONFIG, doc, tsq, "StartSel=**,StopSel=**").label("snippet")
def do_search_query(orig_query):
"""
Does the right search filtering, limiting, and ordering for the query
"""
return (
orig_query.filter(sim > TRI_SIMILARITY_THRESHOLD)
.filter(rank <= next_rank if next_rank is not None else True)
.order_by(rank.desc())
.limit(page_size + 1)
.all()
)
return rank, snippet, do_search_query
def _search_users(session, search_query, title_only, next_rank, page_size, context, include_users):
if not include_users:
return []
rank, snippet, do_search_query = _gen_search_elements(
search_query,
title_only,
next_rank,
page_size,
[User.username, User.name],
[User.city],
[User.about_me],
[User.my_travels, User.things_i_like, User.about_place, User.additional_information],
)
users = do_search_query(session.query(User, rank, snippet).filter_users(context))
return [
search_pb2.Result(
rank=rank,
user=user_model_to_pb(page, session, context),
snippet=snippet,
)
for page, rank, snippet in users
]
def _search_pages(session, search_query, title_only, next_rank, page_size, context, include_places, include_guides):
rank, snippet, do_search_query = _gen_search_elements(
search_query,
title_only,
next_rank,
page_size,
[PageVersion.title],
[PageVersion.address],
[],
[PageVersion.content],
)
if not include_places and not include_guides:
return []
latest_pages = (
session.query(func.max(PageVersion.id).label("id"))
.join(Page, Page.id == PageVersion.page_id)
.filter(
or_(
(Page.type == PageType.place) if include_places else False,
(Page.type == PageType.guide) if include_guides else False,
)
)
.group_by(PageVersion.page_id)
.subquery()
)
pages = do_search_query(
session.query(Page, rank, snippet)
.join(PageVersion, PageVersion.page_id == Page.id)
.join(latest_pages, latest_pages.c.id == PageVersion.id)
)
return [
search_pb2.Result(
rank=rank,
place=page_to_pb(page, context) if page.type == PageType.place else None,
guide=page_to_pb(page, context) if page.type == PageType.guide else None,
snippet=snippet,
)
for page, rank, snippet in pages
]
def _search_events(session, search_query, title_only, next_rank, page_size, context):
rank, snippet, do_search_query = _gen_search_elements(
search_query,
title_only,
next_rank,
page_size,
[Event.title],
[EventOccurrence.address, EventOccurrence.link],
[],
[EventOccurrence.content],
)
occurrences = do_search_query(
session.query(EventOccurrence, rank, snippet)
.join(Event, Event.id == EventOccurrence.event_id)
.filter(EventOccurrence.end_time >= func.now())
)
return [
search_pb2.Result(
rank=rank,
event=event_to_pb(occurrence, context),
snippet=snippet,
)
for occurrence, rank, snippet in occurrences
]
def _search_clusters(
session, search_query, title_only, next_rank, page_size, context, include_communities, include_groups
):
if not include_communities and not include_groups:
return []
rank, snippet, do_search_query = _gen_search_elements(
search_query,
title_only,
next_rank,
page_size,
[Cluster.name],
[PageVersion.address, PageVersion.title],
[Cluster.description],
[PageVersion.content],
)
latest_pages = (
session.query(func.max(PageVersion.id).label("id"))
.join(Page, Page.id == PageVersion.page_id)
.filter(Page.type == PageType.main_page)
.group_by(PageVersion.page_id)
.subquery()
)
clusters = do_search_query(
session.query(Cluster, rank, snippet)
.join(Page, Page.owner_cluster_id == Cluster.id)
.join(PageVersion, PageVersion.page_id == Page.id)
.join(latest_pages, latest_pages.c.id == PageVersion.id)
.filter(Cluster.is_official_cluster if include_communities and not include_groups else True)
.filter(~Cluster.is_official_cluster if not include_communities and include_groups else True)
)
return [
search_pb2.Result(
rank=rank,
community=community_to_pb(cluster.official_cluster_for_node, context)
if cluster.is_official_cluster
else None,
group=group_to_pb(cluster, context) if not cluster.is_official_cluster else None,
snippet=snippet,
)
for cluster, rank, snippet in clusters
]
class Search(search_pb2_grpc.SearchServicer):
def Search(self, request, context):
page_size = min(MAX_PAGINATION_LENGTH, request.page_size or MAX_PAGINATION_LENGTH)
# this is not an ideal page token, some results have equal rank (unlikely)
next_rank = float(request.page_token) if request.page_token else None
with session_scope() as session:
all_results = (
_search_users(
session,
request.query,
request.title_only,
next_rank,
page_size,
context,
request.include_users,
)
+ _search_pages(
session,
request.query,
request.title_only,
next_rank,
page_size,
context,
request.include_places,
request.include_guides,
)
+ _search_events(
session,
request.query,
request.title_only,
next_rank,
page_size,
context,
)
+ _search_clusters(
session,
request.query,
request.title_only,
next_rank,
page_size,
context,
request.include_communities,
request.include_groups,
)
)
all_results.sort(key=lambda result: result.rank, reverse=True)
return search_pb2.SearchRes(
results=all_results[:page_size],
next_page_token=str(all_results[page_size].rank) if len(all_results) > page_size else None,
)
def UserSearch(self, request, context):
with session_scope() as session:
query = session.query(User).filter_users(context)
if request.HasField("query"):
if request.query_name_only:
query = query.filter(
or_(
User.name.ilike(f"%{request.query.value}%"), User.username.ilike(f"%{request.query.value}%")
)
)
else:
query = query.filter(
or_(
User.name.ilike(f"%{request.query.value}%"),
User.username.ilike(f"%{request.query.value}%"),
User.city.ilike(f"%{request.query.value}%"),
User.hometown.ilike(f"%{request.query.value}%"),
User.about_me.ilike(f"%{request.query.value}%"),
User.my_travels.ilike(f"%{request.query.value}%"),
User.things_i_like.ilike(f"%{request.query.value}%"),
User.about_place.ilike(f"%{request.query.value}%"),
User.additional_information.ilike(f"%{request.query.value}%"),
)
)
if request.HasField("last_active"):
raw_dt = to_aware_datetime(request.last_active)
query = query.filter(User.last_active >= last_active_coarsen(raw_dt))
if request.HasField("gender"):
query = query.filter(User.gender.ilike(f"%{request.gender.value}%"))
if len(request.hosting_status_filter) > 0:
query = query.filter(
User.hosting_status.in_([hostingstatus2sql[status] for status in request.hosting_status_filter])
)
if len(request.smoking_location_filter) > 0:
query = query.filter(
User.smoking_allowed.in_([smokinglocation2sql[loc] for loc in request.smoking_location_filter])
)
if len(request.sleeping_arrangement_filter) > 0:
query = query.filter(
User.sleeping_arrangement.in_(
[sleepingarrangement2sql[arr] for arr in request.sleeping_arrangement_filter]
)
)
if len(request.parking_details_filter) > 0:
query = query.filter(
User.parking_details.in_([parkingdetails2sql[det] for det in request.parking_details_filter])
)
if request.HasField("guests"):
query = query.filter(User.max_guests >= request.guests.value)
if request.HasField("last_minute"):
query = query.filter(User.last_minute == request.last_minute.value)
if request.HasField("has_pets"):
query = query.filter(User.has_pets == request.has_pets.value)
if request.HasField("accepts_pets"):
query = query.filter(User.accepts_pets == request.accepts_pets.value)
if request.HasField("has_kids"):
query = query.filter(User.has_kids == request.has_kids.value)
if request.HasField("accepts_kids"):
query = query.filter(User.accepts_kids == request.accepts_kids.value)
if request.HasField("has_housemates"):
query = query.filter(User.has_housemates == request.has_housemates.value)
if request.HasField("wheelchair_accessible"):
query = query.filter(User.wheelchair_accessible == request.wheelchair_accessible.value)
if request.HasField("smokes_at_home"):
query = query.filter(User.smokes_at_home == request.smokes_at_home.value)
if request.HasField("drinking_allowed"):
query = query.filter(User.drinking_allowed == request.drinking_allowed.value)
if request.HasField("drinks_at_home"):
query = query.filter(User.drinks_at_home == request.drinks_at_home.value)
if request.HasField("parking"):
query = query.filter(User.parking == request.parking.value)
if request.HasField("camping_ok"):
query = query.filter(User.camping_ok == request.camping_ok.value)
if request.HasField("search_in_area"):
# EPSG4326 measures distance in decimal degress
# we want to check whether two circles overlap, so check if the distance between their centers is less
# than the sum of their radii, divided by 111111 m ~= 1 degree (at the equator)
search_point = create_coordinate(request.search_in_area.lat, request.search_in_area.lng)
query = query.filter(
func.ST_DWithin(
User.geom, search_point, (User.geom_radius + request.search_in_area.radius) / 111111
)
)
if request.HasField("search_in_community_id"):
# could do a join here as well, but this is just simpler
node = session.query(Node).filter(Node.id == request.search_in_community_id).one_or_none()
if not node:
context.abort(grpc.StatusCode.NOT_FOUND, errors.COMMUNITY_NOT_FOUND)
query = query.filter(func.ST_Contains(node.geom, User.geom))
if request.only_with_references:
query = query.join(Reference, Reference.to_user_id == User.id)
# TODO:
# google.protobuf.StringValue language = 11;
# bool friends_only = 13;
# google.protobuf.UInt32Value age_min = 14;
# google.protobuf.UInt32Value age_max = 15;
page_size = min(MAX_PAGINATION_LENGTH, request.page_size or MAX_PAGINATION_LENGTH)
next_user_id = int(request.page_token) if request.page_token else 0
users = query.filter(User.id >= next_user_id).order_by(User.id).limit(page_size + 1).all()
return search_pb2.UserSearchRes(
results=[
search_pb2.Result(
rank=1,
user=user_model_to_pb(user, session, context),
)
for user in users[:page_size]
],
next_page_token=str(users[-1].id) if len(users) > page_size else None,
)
|
the-stack_106_20520
|
import numpy as np
import pandas as pd
results = {
'results-imagenet.csv': [
'results-imagenet-real.csv',
'results-imagenetv2-matched-frequency.csv',
'results-sketch.csv'
],
'results-imagenet-a-clean.csv': [
'results-imagenet-a.csv',
],
'results-imagenet-r-clean.csv': [
'results-imagenet-r.csv',
],
}
def diff(base_df, test_csv):
base_models = base_df['model'].values
test_df = pd.read_csv(test_csv)
test_models = test_df['model'].values
rank_diff = np.zeros_like(test_models, dtype='object')
top1_diff = np.zeros_like(test_models, dtype='object')
top5_diff = np.zeros_like(test_models, dtype='object')
for rank, model in enumerate(test_models):
if model in base_models:
base_rank = int(np.where(base_models == model)[0])
top1_d = test_df['top1'][rank] - base_df['top1'][base_rank]
top5_d = test_df['top5'][rank] - base_df['top5'][base_rank]
# rank_diff
if rank == base_rank:
rank_diff[rank] = f'0'
elif rank > base_rank:
rank_diff[rank] = f'-{rank - base_rank}'
else:
rank_diff[rank] = f'+{base_rank - rank}'
# top1_diff
if top1_d >= .0:
top1_diff[rank] = f'+{top1_d:.3f}'
else:
top1_diff[rank] = f'-{abs(top1_d):.3f}'
# top5_diff
if top5_d >= .0:
top5_diff[rank] = f'+{top5_d:.3f}'
else:
top5_diff[rank] = f'-{abs(top5_d):.3f}'
else:
rank_diff[rank] = ''
top1_diff[rank] = ''
top5_diff[rank] = ''
test_df['top1_diff'] = top1_diff
test_df['top5_diff'] = top5_diff
test_df['rank_diff'] = rank_diff
test_df['param_count'] = test_df['param_count'].map('{:,.2f}'.format)
test_df.sort_values('top1', ascending=False, inplace=True)
test_df.to_csv(test_csv, index=False, float_format='%.3f')
for base_results, test_results in results.items():
base_df = pd.read_csv(base_results)
base_df.sort_values('top1', ascending=False, inplace=True)
for test_csv in test_results:
diff(base_df, test_csv)
base_df['param_count'] = base_df['param_count'].map('{:,.2f}'.format)
base_df.to_csv(base_results, index=False, float_format='%.3f')
|
the-stack_106_20521
|
pkgname = "bzip2"
pkgver = "1.0.8"
pkgrel = 0
pkgdesc = "Freely available, patent free, high-quality data compressor"
maintainer = "q66 <[email protected]>"
license = "custom:bzip2"
url = "https://sourceware.org/bzip2"
source = f"https://sourceware.org/pub/bzip2/bzip2-{pkgver}.tar.gz"
sha256 = "ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269"
tool_flags = {"CFLAGS": ["-fPIC"]}
options = ["bootstrap"]
def init_build(self):
from cbuild.util import make
self.make = make.Make(self)
def do_build(self):
self.make.build([
"-f", "Makefile-libbz2_so",
"CFLAGS=" + self.get_cflags(shell = True),
"LDFLAGS=" + self.get_ldflags(shell = True)
])
self.make.invoke(["bzip2recover", "libbz2.a"], [
"CFLAGS=" + self.get_cflags(shell = True),
"LDFLAGS=" + self.get_ldflags(shell = True)
])
def do_check(self):
self.make.invoke("check")
def do_install(self):
self.cp("bzip2-shared", "bzip2")
self.install_bin("bzip2")
self.install_bin("bzip2recover")
self.install_link("bzip2", "usr/bin/bunzip2")
self.install_link("bzip2", "usr/bin/bzcat")
self.install_bin("bzmore")
self.install_lib(f"libbz2.so.{pkgver}")
self.install_link(f"libbz2.so.{pkgver}", "usr/lib/libbz2.so")
self.install_link(f"libbz2.so.{pkgver}", "usr/lib/libbz2.so.1")
self.install_link(f"libbz2.so.{pkgver}", "usr/lib/libbz2.so.1.0")
self.install_file("libbz2.a", "usr/lib")
self.install_file("bzlib.h", "usr/include")
self.install_man("bzip2.1")
self.install_link("bzip2.1", "usr/share/man/man1/bunzip2.1")
self.install_link("bzip2.1", "usr/share/man/man1/bzcat.1")
self.install_link("bzip2.1", "usr/share/man/man1/bzip2recover.1")
self.install_license("LICENSE")
@subpackage("libbz2")
def _lib(self):
self.pkgdesc = "Bzip2-format compression library"
return self.default_libs()
@subpackage("libbz2-devel")
def _devel(self):
self.pkgdesc = "Bzip2-format compression library (development files)"
return self.default_devel()
|
the-stack_106_20522
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from unittest import mock
from oslo_config import cfg
from manuka.tests.functional import base
from manuka.worker import consumer
CONF = cfg.CONF
class TestShibWrapper(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['mail'] = '[email protected]'
environ['displayName'] = "john smith"
environ['persistent-id'] = "1324"
return self.app(environ, start_response)
class TestViews(base.TestCase):
def setUp(self):
super().setUp()
self.app.wsgi_app = TestShibWrapper(self.app.wsgi_app)
def test_new_user(self):
"""Given a user who has registered
And has accepted the terms of service
When the user visits the site
Then a keystone user will be created
And the user will be redirected to the portal with
a token encoded in the response.
"""
response = self.client.get('/login/')
self.assert200(response)
self.assertTemplateUsed('terms_form.html')
@mock.patch('manuka.models.keystone_authenticate')
@mock.patch('manuka.worker.utils.refresh_orcid')
@mock.patch('manuka.worker.utils.send_welcome_email')
@mock.patch('manuka.common.clients.get_swift_client')
@mock.patch('manuka.common.clients.get_admin_nova_client')
@mock.patch('manuka.common.clients.get_openstack_client')
@mock.patch('manuka.common.clients.get_admin_keystoneclient')
def test_agreed_terms_user(self, mock_keystone, mock_openstacksdk,
mock_nova, mock_swift, mock_send_email,
mock_refresh_orcid,
mock_keystone_authenticate):
"""Given a known user who has not registered
And has just accepted the terms of service
When the user visits the site
Then a keystone user will be created
And the user will be redirected to the portal with
a token encoded in the response.
"""
ks_client = mock_keystone.return_value
mock_user = mock.Mock(id='u123', email='[email protected]')
mock_user.name = '[email protected]'
mock_domain = mock.Mock(id='d123')
ks_client.users.create.return_value = mock_user
ks_client.users.update.return_value = mock_user
ks_client.domains.get.return_value = mock_domain
token = 'faketoken'
project_id = 'fake_project_id'
updated_user = mock_user
mock_keystone_authenticate.return_value = (token, project_id,
updated_user)
with mock.patch('manuka.app.create_app') as mock_create_app:
mock_create_app.return_value = self.app
worker = consumer.ConsumerService('fake', CONF)
worker.run()
response = self.client.post('/login/', data={'agree': True})
self.assert200(response)
self.assertTemplateUsed('creating_account.html')
# Allow time for the worker to process
time.sleep(0.1)
response = self.client.post('/login/')
self.assertTemplateUsed('redirect.html')
self.assertContext('token', token)
self.assertContext('tenant_id', project_id)
time.sleep(0.1)
mock_send_email.assert_called_once()
mock_refresh_orcid.assert_called_once()
worker.terminate()
|
the-stack_106_20524
|
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class HighscoresDialog(QDialog):
def __init__(self, scorelist, parent=None):
super(HighscoresDialog, self).__init__(parent)
self.setWindowTitle('High Scores')
frame = QFrame(self)
frame.setFrameStyle(QFrame.Panel | QFrame.Sunken)
grid = QGridLayout()
for i in range(10):
name, score = ('', '') if i >= len(scorelist) else scorelist[i]
place_label = QLabel('%3s.' % (i + 1))
name_label = QLabel('%-50s' % name)
score_label = QLabel('%7s' % score)
score_label.setAlignment(Qt.AlignRight)
grid.addWidget(place_label, i, 0)
grid.addWidget(name_label, i, 1)
grid.addWidget(score_label, i, 2)
frame.setLayout(grid)
#
# Dialog layout
#
okbutton = QPushButton('&OK')
self.connect(okbutton, SIGNAL('clicked()'), self, SLOT('accept()'))
bbox = QHBoxLayout()
bbox.addStretch()
bbox.addWidget(okbutton)
bbox.addStretch()
layout = QVBoxLayout()
layout.addWidget(frame)
layout.addLayout(bbox)
self.setLayout(layout)
if __name__ == "__main__":
import sys
import highscores
FILENAME = 'pyqtris_highscores'
ds = highscores.HighScores(10)
ds.load_from_file(FILENAME)
app = QApplication(sys.argv)
dialog = HighscoresDialog(ds.get_list())
dialog.exec_()
|
the-stack_106_20526
|
import os
import json
import math
import torch
import numpy
import os.path
import pandas
import argparse
import scikit_wrappers
import sys
def load_UCR_dataset(path, dataset):
"""
Loads the UCR dataset given in input in numpy arrays.
@param path Path where the UCR dataset is located.
@param dataset Name of the UCR dataset.
@return Quadruplet containing the training set, the corresponding training
labels, the testing set and the corresponding testing labels.
"""
train_file = os.path.join(path, dataset, dataset + "_TRAIN.tsv")
test_file = os.path.join(path, dataset, dataset + "_TEST.tsv")
train_df = pandas.read_csv(train_file, sep='\t', header=None)
test_df = pandas.read_csv(test_file, sep='\t', header=None)
train_array = numpy.array(train_df)
test_array = numpy.array(test_df)
# Move the labels to {0, ..., L-1}
labels = numpy.unique(train_array[:, 0])
transform = {}
for i, l in enumerate(labels):
transform[l] = i
train = numpy.expand_dims(train_array[:, 1:], 1).astype(numpy.float64)
train_labels = numpy.vectorize(transform.get)(train_array[:, 0])
test = numpy.expand_dims(test_array[:, 1:], 1).astype(numpy.float64)
test_labels = numpy.vectorize(transform.get)(test_array[:, 0])
# Normalization for non-normalized datasets
# To keep the amplitude information, we do not normalize values over
# individual time series, but on the whole dataset
if dataset not in [
'AllGestureWiimoteX',
'AllGestureWiimoteY',
'AllGestureWiimoteZ',
'BME',
'Chinatown',
'Crop',
'EOGHorizontalSignal',
'EOGVerticalSignal',
'Fungi',
'GestureMidAirD1',
'GestureMidAirD2',
'GestureMidAirD3',
'GesturePebbleZ1',
'GesturePebbleZ2',
'GunPointAgeSpan',
'GunPointMaleVersusFemale',
'GunPointOldVersusYoung',
'HouseTwenty',
'InsectEPGRegularTrain',
'InsectEPGSmallTrain',
'MelbournePedestrian',
'PickupGestureWiimoteZ',
'PigAirwayPressure',
'PigArtPressure',
'PigCVP',
'PLAID',
'PowerCons',
'Rock',
'SemgHandGenderCh2',
'SemgHandMovementCh2',
'SemgHandSubjectCh2',
'ShakeGestureWiimoteZ',
'SmoothSubspace',
'UMD'
]:
return train, train_labels, test, test_labels
mean = numpy.nanmean(numpy.concatenate([train, test]))
var = numpy.nanvar(numpy.concatenate([train, test]))
train = (train - mean) / math.sqrt(var)
test = (test - mean) / math.sqrt(var)
return train, train_labels, test, test_labels
def fit_hyperparameters(file, train, train_labels, cuda, gpu,lambda_0=1, lambda_1=0, lambda_2=0, sliding_window=False,
save_memory=False):
"""
Creates a classifier from the given set of hyperparameters in the input
file, fits it and return it.
@param file Path of a file containing a set of hyperparemeters.
@param train Training set.
@param train_labels Labels for the training set.
@param cuda If True, enables computations on the GPU.
@param gpu GPU to use if CUDA is enabled.
@param save_memory If True, save GPU memory by propagating gradients after
each loss term, instead of doing it after computing the whole loss.
"""
classifier = scikit_wrappers.CausalCNNEncoderClassifier()
# Loads a given set of hyperparameters and fits a model with those
hf = open(os.path.join(file), 'r')
params = json.load(hf)
hf.close()
# Check the number of input channels
params['in_channels'] = numpy.shape(train)[1]
params['cuda'] = cuda
params['gpu'] = gpu
params['sliding_window']= sliding_window
classifier.set_params(**params)
return classifier.fit(
train, train_labels, lambda_0=lambda_0, lambda_1=lambda_1, lambda_2=lambda_2, save_memory=save_memory, verbose=True
)
def parse_arguments():
parser = argparse.ArgumentParser(
description='Classification tests for UCR repository datasets'
)
parser.add_argument('--dataset', type=str, metavar='D', required=True,
help='dataset name')
parser.add_argument('--path', type=str, metavar='PATH', required=True,
help='path where the dataset is located')
parser.add_argument('--save_path', type=str, metavar='PATH', required=True,
help='path where the estimator is/should be saved')
parser.add_argument('--cuda', action='store_true',
help='activate to use CUDA')
parser.add_argument('--gpu', type=int, default=0, metavar='GPU',
help='index of GPU used for computations (default: 0)')
parser.add_argument('--hyper', type=str, metavar='FILE', required=True,
help='path of the file of hyperparameters to use; ' +
'for training; must be a JSON file')
parser.add_argument('--load', action='store_true', default=False,
help='activate to load the estimator instead of ' +
'training it')
parser.add_argument('--fit_classifier', action='store_true', default=False,
help='if not supervised, activate to load the ' +
'model and retrain the classifier')
parser.add_argument('--sliding_window', type=int , default=0,
help="If you want to implement the sliding window based x_pos, then keep it 1, default 0")
parser.add_argument('--logs', type=str, default='logs/', help="log dump folder")
parser.add_argument('--lambda_0', type=int, required=True, help='value of lambda_0 for GG loss')
parser.add_argument('--lambda_1', type=int, required=True, help='value of lambda_1 for GL loss')
parser.add_argument('--lambda_2', type=int, required=True, help='value of lambda_2 for LL loss')
return parser.parse_args()
def log(message):
#if (os.path.isdir(path)==False):
#os.mkdir(path)
filewriter = open( "output.txt", 'a')
filewriter.write(message+"\n")
filewriter.close()
if __name__ == '__main__':
args = parse_arguments()
if args.cuda and not torch.cuda.is_available():
print("CUDA is not available, proceeding without it...")
args.cuda = False
train, train_labels, test, test_labels = load_UCR_dataset(
args.path, args.dataset
)
if not args.load and not args.fit_classifier:
classifier = fit_hyperparameters(
args.hyper, train, train_labels, args.cuda, args.gpu,args.lambda_0, args.lambda_1, args.lambda_2, (args.sliding_window==1)
)
else:
classifier = scikit_wrappers.CausalCNNEncoderClassifier()
hf = open(
os.path.join(
args.save_path, args.dataset + '_hyperparameters.json'
), 'r'
)
hp_dict = json.load(hf)
hf.close()
hp_dict['cuda'] = args.cuda
hp_dict['gpu'] = args.gpu
classifier.set_params(**hp_dict)
classifier.load(os.path.join(args.save_path, args.dataset))
if not args.load:
if args.fit_classifier:
classifier.fit_classifier(classifier.encode(train), train_labels)
classifier.save(
os.path.join(args.save_path, args.dataset)
)
with open(
os.path.join(
args.save_path, args.dataset + '_hyperparameters.json'
), 'w'
) as fp:
json.dump(classifier.get_params(), fp)
message = "Test accuracy on dataset {} is {} ".format(args.dataset, classifier.score(test, test_labels))
log(message)
print(message)
print("Completed !!!! ")
|
the-stack_106_20527
|
from helpers import upload
import json
import os
# Assumes the system's current directory is iot-farm/src
with open('config.json') as f:
CONFIG = json.load(f)
def create_file(filename: str):
f = open(filename, 'w')
f.write('Hello again world!')
f.close()
def main():
uploader = upload.Uploader(CONFIG['upload'])
filename = 'test.txt'
create_file(filename)
try:
folder = uploader.create_folder("iot_test")
uploader.upload_file(filename, parent_id=folder)
except Exception as e:
print(e)
finally:
os.remove(filename)
if __name__ == '__main__':
main()
|
the-stack_106_20528
|
import numpy as np
import pytest
from . import fit_atg_model
from .fit_atg_model import AtgModelFit
def test_fit_atg_model():
a = 2719.0
tg = 7.2
exp = 6.23
t0 = 2.5
xs = np.arange(1, 100)
# Test perfect fit.
ys = fit_atg_model._model(params=[a, tg, exp, t0], xs=xs)
fit = fit_atg_model.fit_atg_model(xs=xs, ys=ys)
assert np.allclose([fit.a, fit.tg, fit.exp, fit.t0], [a, tg, exp, t0])
# Test a noisy fit.
ys += np.random.rand(len(ys))
fit = fit_atg_model.fit_atg_model(xs=xs, ys=ys)
assert [fit.a, fit.tg, fit.exp, fit.t0] == pytest.approx([a, tg, exp, t0], rel=0.1)
# Test with real-world data. (Jordan until 2020-4-23.)
# fmt: off
ys = np.array(
[
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 7, 16, 33, 51, 68, 84, 84, 111, 126, 153, 171,
211, 216, 227, 238, 237, 239, 237, 249, 247, 244, 230, 217, 209, 202, 204, 195, 197,
181, 169, 155, 144, 136, 135, 137, 134, 136, 124, 113, 112,
]
)
# fmt: on
xs = np.arange(len(ys))
fit = fit_atg_model.fit_atg_model(xs=xs, ys=ys)
assert [fit.a, fit.tg, fit.exp, fit.t0] == pytest.approx([2243, 7, 2, 11], abs=1.0)
def test_atg_model_fit_predict():
fit = AtgModelFit(a=1.2, tg=2.3, exp=3.4, t0=5.6)
x = 10.0
expected = 1.2 / 2.3 * ((x - 5.6) / 2.3) ** 3.4 / np.exp((x - 5.6) / 2.3)
assert fit.predict(x) == pytest.approx(expected)
assert fit.predict(4.0) == pytest.approx(0.0)
|
the-stack_106_20529
|
"""Interaction with smart contracts over Web3 connector.
"""
import functools
from eth_abi import (
encode_abi,
decode_abi,
)
from web3.utils.encoding import (
encode_hex,
)
from web3.utils.formatting import (
add_0x_prefix,
remove_0x_prefix,
)
from web3.utils.string import (
force_bytes,
coerce_return_to_text,
force_obj_to_bytes,
)
from web3.utils.functional import (
compose,
)
from web3.utils.abi import (
filter_by_type,
filter_by_name,
filter_by_argument_count,
filter_by_argument_name,
filter_by_encodability,
get_abi_input_types,
get_abi_output_types,
get_constructor_abi,
check_if_arguments_can_be_encoded,
function_abi_to_4byte_selector,
merge_args_and_kwargs,
normalize_return_type,
)
from web3.utils.decorators import (
combomethod,
)
from web3.utils.events import (
get_event_data,
)
from web3.utils.filters import (
construct_event_filter_params,
PastLogFilter,
)
class Contract(object):
"""Base class for Contract proxy classes.
First you need to create your Contract classes using :func:`construct_contract_class`
that takes compiled Solidity contract ABI definitions as input.
The created class object will be a subclass of this base class.
After you have your Contract proxy class created you can interact with smart contracts
* Create a Contract proxy object for an existing deployed smart contract by its address
using :meth:`__init__`
* Deploy a new smart contract using :py:meth:`Contract.deploy`
"""
# set during class construction
web3 = None
# class properties (overridable at instance level)
_abi = None
_code = None
_code_runtime = None
_source = None
# instance level properties
address = None
def __init__(self, abi=None, address=None, code=None, code_runtime=None, source=None):
"""Create a new smart contract proxy object.
:param address: Contract address as 0x hex string
:param abi: Override class level definition
:param code: Override class level definition
:param code_runtime: Override class level definition
:param source: Override class level definition
"""
if self.web3 is None:
raise AttributeError(
'The `Contract` class has not been initialized. Please use the '
'`web3.contract` interface to create your contract class.'
)
if abi is not None:
self._abi = abi
if code is not None:
self._code = code
if code_runtime is not None:
self._code_runtime = code_runtime
if source is not None:
self._source = source
self.address = address
@property
def abi(self):
if self._abi is not None:
return self._abi
# TODO: abi can be derived from the contract source.
raise AttributeError("No contract abi was specified for thes contract")
@property
def code(self):
if self._code is not None:
return self._code
# TODO: code can be derived from the contract source.
raise AttributeError("No contract code was specified for thes contract")
@property
def code_runtime(self):
if self._code_runtime is not None:
return self._code_runtime
# TODO: runtime can be derived from the contract source.
raise AttributeError("No contract code_runtime was specified for thes contract")
@property
def source(self):
if self._source is not None:
return self._source
raise AttributeError("No contract source was specified for thes contract")
@classmethod
def deploy(cls, transaction=None, arguments=None):
"""
Deploys the contract on a blockchain.
Example:
.. code-block:: python
from typing import Optional, Tuple
from gevent import Timeout
from web3 import Web3
from web3.contract import Contract, construct_contract_class
from populus.utils.transactions import (
get_contract_address_from_txn,
wait_for_transaction_receipt
)
def deploy_contract(
web3: Web3,
contract_definition: dict,
gas=1500000,
timeout=60.0,
constructor_arguments: Optional[list]=None,
from_account=None) -> Tuple[Contract, str]:
'''Deploys a single contract using Web3 client.
:param web3: Web3 client instance
:param contract_definition: Dictionary of describing the contract interface,
as read from ``contracts.json`` Contains
:param gas: Max gas
:param timeout: How many seconds to wait the transaction to
confirm to get the contract address.
:param constructor_arguments: Arguments passed to the smart contract
constructor. Automatically encoded through ABI signature.
:param from_account: Geth account that's balance is used for deployment.
By default, the gas is spent from Web3 coinbase account.
Account must be unlocked.
:return: Tuple containing Contract proxy object and the
transaction hash where it was deployed
:raise gevent.timeout.Timeout: If we can't get our contract
in a block within given timeout
'''
# Check we are passed valid contract definition
assert "abi" in contract_definition, \
"Please pass a valid contract definition dictionary, got {}".
format(contract_definition)
contract_class = construct_contract_class(
web3=web3,
abi=contract_definition["abi"],
code=contract_definition["code"],
code_runtime=contract_definition["code_runtime"],
source=contract_definition["source"],
)
if not from_account:
from_account = web3.eth.coinbase
# Set transaction parameters
transaction = {
"gas": gas,
"from": from_account,
}
# Call web3 to deploy the contract
txn_hash = contract_class.deploy(transaction, constructor_arguments)
# Wait until we get confirmation and address
address = get_contract_address_from_txn(web3, txn_hash, timeout=timeout)
# Create Contract proxy object
contract = contract_class(address=address)
return contract, txn_hash
:param transaction: Transaction parameters for the deployment transaction as a dict
:param arguments: The contract constructor arguments
:return: 0x string formatted transaction hash of the deployment transaction
"""
if transaction is None:
deploy_transaction = {}
else:
deploy_transaction = dict(**transaction)
if not cls.code:
raise ValueError(
"Cannot deploy a contract that does not have 'code' associated with it"
)
if 'data' in deploy_transaction:
raise ValueError(
"Cannot specify `data` for contract deployment"
)
if 'to' in deploy_transaction:
raise ValueError(
"Cannot specify `to` for contract deployment"
)
deploy_transaction['data'] = cls.encodeConstructorData(arguments)
# TODO: handle asynchronous contract creation
txn_hash = cls.web3.eth.sendTransaction(deploy_transaction)
return txn_hash
#
# ABI Helpers
#
@classmethod
def find_matching_fn_abi(cls, fn_name=None, args=None, kwargs=None):
filters = []
if fn_name:
filters.append(functools.partial(filter_by_name, fn_name))
if args is not None or kwargs is not None:
if args is None:
args = tuple()
if kwargs is None:
kwargs = {}
num_arguments = len(args) + len(kwargs)
filters.extend([
functools.partial(filter_by_argument_count, num_arguments),
functools.partial(filter_by_encodability, args, kwargs),
])
function_candidates = filter_by_type('function', cls.abi)
for filter_fn in filters:
function_candidates = filter_fn(function_candidates)
if len(function_candidates) == 1:
return function_candidates[0]
elif not function_candidates:
break
if not function_candidates:
raise ValueError("No matching functions found")
else:
raise ValueError("Multiple functions found")
@classmethod
def find_matching_event_abi(cls, event_name=None, argument_names=None):
filters = [
functools.partial(filter_by_type, 'event'),
]
if event_name is not None:
filters.append(functools.partial(filter_by_name, event_name))
if argument_names is not None:
filters.append(
functools.partial(filter_by_argument_name, argument_names)
)
filter_fn = compose(*filters)
event_abi_candidates = filter_fn(cls.abi)
if len(event_abi_candidates) == 1:
return event_abi_candidates[0]
elif not event_abi_candidates:
raise ValueError("No matching functions found")
else:
raise ValueError("Multiple functions found")
@classmethod
@coerce_return_to_text
def encodeABI(cls, fn_name, arguments, data=None):
"""
encodes the arguments using the Ethereum ABI.
"""
function_abi = cls.find_matching_fn_abi(fn_name, force_obj_to_bytes(arguments))
return cls._encodeABI(function_abi, arguments, data)
@classmethod
def _encodeABI(cls, abi, arguments, data=None):
arguent_types = get_abi_input_types(abi)
encoded_arguments = encode_abi(arguent_types, force_obj_to_bytes(arguments))
if data:
return add_0x_prefix(
force_bytes(remove_0x_prefix(data)) +
force_bytes(remove_0x_prefix(encode_hex(encoded_arguments)))
)
else:
return encode_hex(encoded_arguments)
@classmethod
@coerce_return_to_text
def encodeConstructorData(cls, arguments=None):
if arguments is None:
arguments = []
constructor = get_constructor_abi(cls.abi)
if constructor:
if constructor['inputs'] and not arguments:
raise ValueError(
"This contract requires {0} constructor arguments".format(
len(constructor['inputs']),
)
)
if arguments:
if len(arguments) != len(constructor['inputs']):
raise ValueError(
"This contract requires {0} constructor arguments".format(
len(constructor['inputs']),
)
)
is_encodable = check_if_arguments_can_be_encoded(
constructor,
arguments,
{},
)
if not is_encodable:
raise ValueError("Unable to encode provided arguments.")
deploy_data = add_0x_prefix(cls._encodeABI(constructor, arguments, data=cls.code))
else:
deploy_data = add_0x_prefix(cls.code)
return deploy_data
@combomethod
def on(self, event_name, default_filter_params=None, *callbacks):
"""
register a callback to be triggered on the appropriate events.
"""
if default_filter_params is None:
default_filter_params = {}
argument_filters = default_filter_params.pop('filter', {})
argument_filter_names = list(argument_filters.keys())
event_abi = self.find_matching_event_abi(event_name, argument_filter_names)
data_filter_set, filter_params = construct_event_filter_params(
event_abi,
contract_address=self.address,
argument_filters=argument_filters,
**default_filter_params
)
log_data_extract_fn = functools.partial(get_event_data, event_abi)
log_filter = self.web3.eth.filter(filter_params)
log_filter.set_data_filters(data_filter_set)
log_filter.log_entry_formatter = log_data_extract_fn
log_filter.filter_params = filter_params
if callbacks:
log_filter.watch(*callbacks)
return log_filter
@combomethod
def pastEvents(self, event_name, default_filter_params=None, *callbacks):
"""
register a callback to be triggered on all past events.
"""
if default_filter_params is None:
default_filter_params = {}
if 'fromBlock' in default_filter_params or 'toBlock' in default_filter_params:
raise ValueError("Cannot provide `fromBlock` or `toBlock` in `pastEvents` calls")
filter_params = {}
filter_params.update(default_filter_params)
filter_params.update({
'fromBlock': "earliest",
'toBlock': self.web3.eth.blockNumber,
})
log_filter = self.on(
event_name,
default_filter_params=filter_params,
)
past_log_filter = PastLogFilter(
web3=log_filter.web3,
filter_id=log_filter.filter_id,
log_entry_formatter=log_filter.log_entry_formatter,
data_filter_set=log_filter.data_filter_set,
)
if callbacks:
past_log_filter.watch(*callbacks)
return past_log_filter
def estimateGas(self, transaction=None):
"""
Estimate the gas for a call
"""
if transaction is None:
estimate_transaction = {}
else:
estimate_transaction = dict(**transaction)
if 'data' in estimate_transaction:
raise ValueError("Cannot set data in call transaction")
if 'to' in estimate_transaction:
raise ValueError("Cannot set to in call transaction")
estimate_transaction['to'] = self.address
estimate_transaction.setdefault('from', self.web3.eth.coinbase)
contract = self
class Caller(object):
def __getattr__(self, function_name):
callable_fn = functools.partial(
estimate_gas_for_function,
contract,
function_name,
estimate_transaction,
)
return callable_fn
return Caller()
def call(self, transaction=None):
"""
Execute a contract function call using the `eth_call` interface.
This method prepares a ``Caller`` object that exposes the contract
functions and publib variables as callable Python functions.
Reading a public ``owner`` address variable example:
.. code-block:: python
contract_class = construct_contract_class(
web3=web3,
abi=wallet_contract_definition["abi"]
# Not a real contract address
contract = contract_class("0x2f70d3d26829e412a602e83fe8eebf80255aeea5")
# Read "owner" public variable
bin_addr = contract.call().owner()
# Convert address to 0x format
address = "0x" + bin_addr.decode("ascii")
:param transaction: Dictionary of transaction info for web3 interface
:return: ``Caller`` object that has contract public functions
and variables exposed as Python methods
"""
if transaction is None:
call_transaction = {}
else:
call_transaction = dict(**transaction)
if 'data' in call_transaction:
raise ValueError("Cannot set data in call transaction")
if 'to' in call_transaction:
raise ValueError("Cannot set to in call transaction")
call_transaction['to'] = self.address
call_transaction.setdefault('from', self.web3.eth.coinbase)
contract = self
class Caller(object):
def __getattr__(self, function_name):
callable_fn = functools.partial(
call_contract_function,
contract,
function_name,
call_transaction,
)
return callable_fn
return Caller()
def transact(self, transaction=None):
"""
Execute a contract function call using the `eth_sendTransaction` interface.
You should specify the account that pays the gas for this
transaction in `transaction`. If no account is specified the coinbase
account of web3 interface is used.
Example:
.. code-block:: python
# Assumes self.contract points to a Contract instance having withdraw() function
def withdraw(self,
to_address: str,
amount_in_eth: Decimal,
from_account=None, max_gas=50000) -> str:
'''Withdraw funds from a hosted wallet contract.
:param amount_in_eth: How much as ETH
:param to_address: Destination address we are withdrawing to
:param from_account: Which Geth account pays the gas
:return: Transaction hash as 0x string
'''
assert isinstance(amount_in_eth, Decimal) # Don't let floats slip through
wei = to_wei(amount_in_eth)
if not from_account:
# Default to coinbase for transaction fees
from_account = self.contract.web3.eth.coinbase
tx_info = {
# The Ethereum account that pays the gas for this operation
"from": from_account,
"gas": max_gas,
}
# Interact with underlying wrapped contract
txid = self.contract.transact(tx_info).withdraw(to_address, wei)
return txid
The transaction is created in the Ethereum node memory pool.
Transaction receipt is not available until the transaction has been mined.
See :func:`populus.transaction.wait_for_transaction_receipt`.
:param transaction: Dictionary of transaction info for web3 interface.
Variables include ``from``, ``gas``.
:return: ``Transactor`` object that has contract
public functions exposed as Python methods.
Calling these methods will execute a transaction against the contract.
"""
if transaction is None:
transact_transaction = {}
else:
transact_transaction = dict(**transaction)
if 'data' in transact_transaction:
raise ValueError("Cannot set data in call transaction")
if 'to' in transact_transaction:
raise ValueError("Cannot set to in call transaction")
transact_transaction['to'] = self.address
transact_transaction.setdefault('from', self.web3.eth.coinbase)
contract = self
class Transactor(object):
def __getattr__(self, function_name):
callable_fn = functools.partial(
transact_with_contract_function,
contract,
function_name,
transact_transaction,
)
return callable_fn
return Transactor()
@coerce_return_to_text
def call_contract_function(contract,
function_name,
transaction,
*args,
**kwargs):
"""Calls a contract constant or function.
The function must not have state changing effects.
For those see :func:`transact_with_contract_function`
For usual cases, you do not want to call this directly,
but interact with your contract through :meth:`Contract.call` method.
:param contract: :class:`web3.contract.Contract` object instance
:param function_name: Contract function name to call
:param transaction: Transaction parameters to pass to underlying ``web3.eth.call``
:param *arguments: Arguments to be passed to contract function. Automatically encoded
:return: Function call results, encoded to Python object
"""
if transaction is None:
call_transaction = {}
else:
call_transaction = dict(**transaction)
function_abi = contract.find_matching_fn_abi(function_name, args, kwargs)
function_selector = function_abi_to_4byte_selector(function_abi)
arguments = merge_args_and_kwargs(function_abi, args, kwargs)
call_transaction['data'] = contract.encodeABI(
function_name,
arguments,
data=function_selector,
)
return_data = contract.web3.eth.call(call_transaction)
output_types = get_abi_output_types(function_abi)
output_data = decode_abi(output_types, return_data)
normalized_data = [
normalize_return_type(data_type, data_value)
for data_type, data_value
in zip(output_types, output_data)
]
if len(normalized_data) == 1:
return normalized_data[0]
else:
return normalized_data
def transact_with_contract_function(contract=None,
function_name=None,
transaction=None,
*args,
**kwargs):
"""Transacts with a contract.
Sends in a transaction that interacts with the contract.
You should specify the account that pays the gas for this
transaction in `transaction`.
Example:
.. code-block:: python
def withdraw(self,
to_address: str,
amount_in_eth: Decimal,
from_account=None,
max_gas=50000) -> str:
'''Withdraw funds from a wallet contract.
:param amount_in_eth: How much as ETH
:param to_address: Destination address we are withdrawing to
:param from_account: Which Geth accout pays the gas
:return: Transaction hash
'''
assert isinstance(amount_in_eth, Decimal) # Don't let floats slip through
wei = to_wei(amount_in_eth)
if not from_account:
# Default to coinbase for transaction fees
from_account = self.contract.web3.eth.coinbase
tx_info = {
# The Ethereum account that pays the gas for this operation
"from": from_account,
"gas": max_gas,
}
# Interact with underlying wrapped contract
txid = transact_with_contract_function(
self.contract, "withdraw", tx_info, to_address, wei,
)
return txid
The transaction is created in the Ethereum node memory pool.
Transaction receipt is not available until the transaction has been mined.
See :func:`populus.utils.transactions.wait_for_transaction`.
Usually there is no reason to call directly. Instead
use :meth:`Contract.transact` interface.
:param contract: :class:`web3.contract.Contract` object instance
:param function_name: Contract function name to call
:param transaction: Dictionary of transaction parameters to pass to
underlying ``web3.eth.sendTransaction``
:param *arguments: Arguments to be passed to contract function. Automatically encoded
:return: String, 0x formatted transaction hash.
"""
if transaction is None:
transact_transaction = {}
else:
transact_transaction = dict(**transaction)
function_abi = contract.find_matching_fn_abi(function_name, args, kwargs)
function_selector = function_abi_to_4byte_selector(function_abi)
arguments = merge_args_and_kwargs(function_abi, args, kwargs)
transact_transaction['data'] = contract.encodeABI(
function_name,
arguments,
data=function_selector,
)
txn_hash = contract.web3.eth.sendTransaction(transact_transaction)
return txn_hash
def estimate_gas_for_function(contract=None,
function_name=None,
transaction=None,
*args,
**kwargs):
"""Estimates gas cost a function call would take.
Don't call this directly, instead use :meth:`Contract.estimateGas`
on your contract instance.
"""
if transaction is None:
estimate_transaction = {}
else:
estimate_transaction = dict(**transaction)
function_abi = contract.find_matching_fn_abi(function_name, args, kwargs)
function_selector = function_abi_to_4byte_selector(function_abi)
arguments = merge_args_and_kwargs(function_abi, args, kwargs)
estimate_transaction['data'] = contract.encodeABI(
function_name,
arguments,
data=function_selector,
)
gas_estimate = contract.web3.eth.estimateGas(estimate_transaction)
return gas_estimate
def construct_contract_class(web3, abi, code=None,
code_runtime=None, source=None):
"""Creates a new Contract class.
Contract lass is a Python proxy class to interact with smart contracts.
``abi`` and other contract definition fields are coming from
``solc`` compiler or ``build/contracts.json`` in the
case of Populus framework.
After contract has been instiated you can interact with it
using :meth:`transact_with_contract_function` and
:meth:`call_contract_function`.
Example:
.. code-block:: python
# Assume we have a contract called Token from token.sol, as
# previously build by Populus command line client
contract_abis = json.load(open("build/contracts.json", "rt"))
contract_definition = contract_abis["Token"]
# contract_class is now Python "Token" class
contract_class = construct_contract_class(
web3=web3,
abi=contract_definition["abi"],
code=contract_definition["code"],
code_runtime=contract_definition["code_runtime"],
source=contract_definition["source"],
)
# Create Contract proxy object based on a given
# smart contract address in block chain
contract = contract_class(
address=address,
abi=contract_definition["abi"],
code=contract_definition["code"],
code_runtime=contract_definition["code_runtime"],
source=contract_definition["source"])
:param web3: Web3 connection
:param abi: As given by solc compiler
:param code: As given by solc compiler
:param code_runtime: As given by solc compiler
:param source: As given by solc compiler
:return: Contract class (not instance)
"""
_dict = {
'web3': web3,
'abi': abi,
'code': code,
'code_runtime': code_runtime,
'source': source,
}
return type('Contract', (Contract,), _dict)
|
the-stack_106_20530
|
import numpy as np
from collections import defaultdict
import random
class Agent:
def __init__(self, nA=6):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.gamma = 0.8
self.alpha = 0.04
def select_action(self, state, eps):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
if random.random() > eps:
return np.argmax(self.Q[state])
else:
return np.random.choice(self.nA)
def step(self, state, action, reward, next_state, next_action, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
current = self.Q[state][action]
Qsa_next = self.Q[next_state][next_action] if next_state is not None else 0
target = reward + self.gamma*Qsa_next
self.Q[state][action] = self.Q[state][action] + (self.alpha * (target - current))
|
the-stack_106_20531
|
import re
from abc import ABC, abstractmethod
from typing import List
from nltk import Tree, ParentedTree
def pas_to_str(x):
if isinstance(x, tuple): # has children
head = x[0]
children = [pas_to_str(e) for e in x[1]]
return f"{head}({', '.join(children)})"
else:
return x
class TreeStrParser(ABC):
def __init__(self, x:str=None, brackets="()"):
super(TreeStrParser, self).__init__()
self.stack = [[]]
self.curstring = None
self.stringmode = None
self.prevescape = 0
self.next_is_sibling = False
self.nameless_func = "@NAMELESS@"
self.brackets = brackets
if x is not None:
self.feed(x)
@abstractmethod
def add_level(self):
pass
@abstractmethod
def close_level(self):
pass
@abstractmethod
def add_sibling(self, next_token):
pass
def feed(self, x:str):
xsplits = re.split("([\(\)\s'\"])", x)
queue = list(xsplits)
while len(queue) > 0:
next_token = queue.pop(0)
if self.curstring is not None:
if next_token == "\\":
self.prevescape = 2
elif next_token == "":
continue
self.curstring += next_token
if self.curstring[-1] == self.stringmode and self.prevescape == 0: # closing string
self.stack[-1].append(self.curstring)
self.curstring = None
self.stringmode = None
self.prevescape = max(self.prevescape - 1, 0)
else:
self.next_is_sibling = False
next_token = next_token.strip()
self.prevescape = False
if next_token == self.brackets[0]:
# add one level on stack
self.add_level()
elif next_token == self.brackets[1]:
# close last level on stack, merge into subtree
self.close_level()
elif next_token == "" or next_token == " ":
pass # do nothing
elif next_token == "'":
self.curstring = next_token
self.stringmode = "'"
elif next_token == '"':
self.curstring = next_token
self.stringmode = '"'
elif next_token == ",":
self.next_is_sibling = True
else:
self.add_sibling(next_token)
if len(self.stack) != 1 or len(self.stack[-1]) != 1:
return None
else:
return self.stack[-1][-1]
class PrologToPas(TreeStrParser):
def add_level(self):
if self.next_is_sibling:
self.stack[-1].append(self.nameless_func)
self.stack.append([])
def close_level(self):
siblings = self.stack.pop(-1)
# self.stack[-1].append((siblings[0], siblings[1:]))
self.stack[-1][-1] = (self.stack[-1][-1], siblings)
def add_sibling(self, next_token):
self.stack[-1].append(next_token)
class PrologToTree(TreeStrParser):
def add_level(self):
self.stack.append([])
def close_level(self):
siblings = self.stack.pop(-1)
self.stack[-1][-1].extend(siblings)
def add_sibling(self, next_token):
self.stack[-1].append(Tree(next_token, []))
class LispToPas(TreeStrParser):
def add_level(self):
self.stack.append([])
def close_level(self):
siblings = self.stack.pop(-1)
self.stack[-1].append((siblings[0], siblings[1:]))
def add_sibling(self, next_token):
self.stack[-1].append(next_token)
class LispToTree(TreeStrParser):
def add_level(self):
self.stack.append([])
def close_level(self):
siblings = self.stack.pop(-1)
if len(siblings) > 0:
assert (len(siblings[0]) == 0)
siblings[0].extend(siblings[1:])
self.stack[-1].append(siblings[0])
def add_sibling(self, next_token):
self.stack[-1].append(Tree(next_token, []))
def _inc_convert_treestr(x, cls, self=-1, brackets="()"):
"""
:param x: lisp-style string
strings must be surrounded by single quotes (') and may not contain anything but single quotes
:return:
"""
if isinstance(self, cls):
ret = self.feed(x)
return ret, self
else:
_self = cls(x, brackets=brackets) if not isinstance(self, cls) else self
ret = _self.feed("")
if ret is None:
return None, _self
else:
if self is None:
return ret, _self
else:
return ret
def lisp_to_pas(x:str, self:LispToPas=-1, brackets="()"):
return _inc_convert_treestr(x, LispToPas, self=self, brackets=brackets)
def prolog_to_pas(x:str, self:PrologToPas=-1, brackets="()"):
return _inc_convert_treestr(x, PrologToPas, self=self, brackets=brackets)
def lisp_to_tree(x:str, self:LispToTree=-1, brackets="()"):
return _inc_convert_treestr(x, LispToTree, self=self, brackets=brackets)
def prolog_to_tree(x: str, self:PrologToTree = -1, brackets="()"):
return _inc_convert_treestr(x, PrologToTree, self=self, brackets=brackets)
def pas_to_lisp(x, brackets="()"):
if isinstance(x, tuple): # has children
head = x[0]
children = [pas_to_lisp(e, brackets=brackets) for e in x[1]]
return f"{brackets[0]}{head} {' '.join(children)}{brackets[1]}"
else:
return x
def pas_to_prolog(x, brackets="()"):
if isinstance(x, tuple):
head = x[0]
children = [pas_to_prolog(e, brackets=brackets) for e in x[1]]
return f"{head} {brackets[0]} {', '.join(children)} {brackets[1]}"
else:
return x
def pas_to_expression(x):
# flatten out the lists, replace tuples with lists
if isinstance(x, tuple):
return [x[0]] + [pas_to_expression(xe) for xe in x[1]]
else:
return x
def try_str_to_pas():
x = "wife(president('US'))"
pas = prolog_to_pas(x)
print(x)
print(pas)
x = "wife ( president ( 'united states' ) ) "
print(x)
print(prolog_to_pas(x))
x = " wife ( president ( 'united states (country , ,,) ' ) ) "
print(x)
print(prolog_to_pas(x))
x = " ( wife ( president 'united states (country , ,,) ' ) ) "
print(x)
print(prolog_to_pas(x))
x = "(wife(president 'united states (country , ,,) '))"
print(x)
print(prolog_to_pas(x))
x = "(wife(president 'united states (country , ,,) '))"
print(x)
print(prolog_to_pas(x))
def try_lisp_to_tree():
a = "(wife (president ((us))))"
b = "(wife (president us))"
c = "(wife (president (us)))"
at = lisp_to_tree(a)
bt = lisp_to_tree(b)
ct = lisp_to_tree(c)
print(at)
print(bt)
print(ct)
def pas_to_tree(x):
if isinstance(x, tuple): # has children
node = Tree(x[0], [])
for child in x[1]:
childnode = pas_to_tree(child)
node.append(childnode)
else:
node = Tree(x, [])
return node
def tree_to_lisp(x:Tree, brackets="()"):
if len(x) > 0:
children = [tree_to_lisp(xe, brackets=brackets) for xe in x]
return f"{brackets[0]}{x.label()} {' '.join(children)}{brackets[1]}"
else:
return x.label()
def tree_to_lisp_tokens(x:Tree, brackets="()"):
if len(x) > 0:
children = [tree_to_lisp_tokens(xe, brackets=brackets) for xe in x]
return [brackets[0], x.label()] + [childe for child in children for childe in child] + [brackets[1]]
else:
return [x.label()]
def tree_to_prolog(x:Tree, brackets="()"):
if len(x) > 0:
children = [tree_to_prolog(tc, brackets=brackets) for tc in x]
return f"{x.label()} {brackets[0]} {', '.join(children)} {brackets[1]}"
else:
return x.label()
def tree_size(x:Tree):
ret = sum([tree_size(xe) for xe in x])
ret += 1
return ret
class ActionTree(ParentedTree):
orderless = ["and", "or"]
singlechildcollapse = ["and", "or"]
def __init__(self, node, children=None):
super(ActionTree, self).__init__(node, children=children)
self._action = None
self._align = None
def action(self):
return self._action
def set_action(self, action:str):
self._action = action
def simplify(self): # TODO
for i in range(len(self)):
child = self[i]
if child._label in self.singlechildcollapse and len(child) == 2:
self[i] = child[0]
for child in self:
child.simplify()
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
ret = cls(tree._label, children)
ret._action = tree._action
return ret
else:
return tree
def eq(self, other):
assert(isinstance(other, ActionTree))
if self._label != other._label:
return False
if self._label in self.orderless:
# check if every child is in other and other contains no more
if len(self) != len(other):
return False
selfchildren = [selfchild for selfchild in self]
otherchildren = [otherchild for otherchild in other]
if not selfchildren[-1].eq(otherchildren[-1]):
return False # terminator must be same and in the end
else:
selfchildren = selfchildren[:-1]
otherchildren = otherchildren[:-1]
i = 0
while i < len(selfchildren):
selfchild = selfchildren[i]
j = 0
unbroken = True
while j < len(otherchildren):
otherchild = otherchildren[j]
if selfchild.eq(otherchild):
selfchildren.pop(i)
otherchildren.pop(j)
i -= 1
j -= 1
unbroken = False
break
j += 1
if unbroken:
return False
i += 1
if len(selfchildren) == 0 and len(otherchildren) == 0:
return True
else:
return False
else:
return all([selfchild.eq(otherchild) for selfchild, otherchild in zip(self, other)])
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
ret = cls(tree._label, children)
ret._action = tree._action
ret._align = tree._align
return ret
else:
return tree
def are_equal_trees(self, other, orderless={"and", "or"}, unktoken="@UNK@", use_terminator=False):
if self is None or other is None:
return False
assert(isinstance(other, Tree) and isinstance(self, Tree))
if self._label != other._label or self._label == unktoken or other._label == unktoken:
return False
if self._label in orderless or orderless == "__ALL__":
# check if every child is in other and other contains no more
if len(self) != len(other):
return False
selfchildren = [selfchild for selfchild in self]
otherchildren = [otherchild for otherchild in other]
if use_terminator:
if not are_equal_trees(selfchildren[-1], otherchildren[-1], orderless=orderless, use_terminator=use_terminator):
return False # terminator must be same and in the end
else:
selfchildren = selfchildren[:-1]
otherchildren = otherchildren[:-1]
i = 0
while i < len(selfchildren):
selfchild = selfchildren[i]
j = 0
unbroken = True
while j < len(otherchildren):
otherchild = otherchildren[j]
if are_equal_trees(selfchild, otherchild, orderless=orderless, use_terminator=use_terminator):
selfchildren.pop(i)
otherchildren.pop(j)
i -= 1
j -= 1
unbroken = False
break
j += 1
if unbroken:
return False
i += 1
if len(selfchildren) == 0 and len(otherchildren) == 0:
return True
else:
return False
else:
if len(self) != len(other):
return False
return all([are_equal_trees(selfchild, otherchild, orderless=orderless, use_terminator=use_terminator)
for selfchild, otherchild in zip(self, other)])
def action_seq_from_tree():
# depth first action sequence from action tree with actions attached
pass # TODO
class FuncGrammar(object):
typere = re.compile("<([^>]+)>([\*\+]?)")
def __init__(self, start_type:str, **kw):
super(FuncGrammar, self).__init__(**kw)
self.rules_by_type = {}
self.rules_by_arg = {}
self.constants = {}
self.symbols = set()
self.all_rules = set()
self.start_type = start_type
self.start_types = set([start_type])
def add_rule(self, rule:str):
if rule in self.all_rules:
return
splits = rule.split(" -> ")
assert(len(splits) == 2)
t, body = splits
func_splits = body.split(" :: ")
sibl_splits = body.split(" -- ")
assert(int(len(func_splits) > 1) + int(len(sibl_splits) > 1) <= 1)
if len(func_splits) > 1:
# function rule --> add children
arg_name = func_splits[0]
argchildren = func_splits[1].split(" ")
elif len(sibl_splits) > 1:
# sibling rule --> add siblings
arg_name = sibl_splits[0]
argchildren = sibl_splits[1].split(" ")
assert(len(argchildren) == 1)
else:
assert(len(body.split(" ")) == 1)
arg_name = body
argchildren = []
if t not in self.rules_by_type:
self.rules_by_type[t] = set()
if arg_name not in self.rules_by_arg:
self.rules_by_arg[arg_name] = set()
self.rules_by_type[t].add(rule)
self.rules_by_arg[arg_name].add(rule)
self.all_rules.add(rule)
self.symbols.add(arg_name)
for argchild in argchildren:
if not self.typere.match(argchild):
self.symbols.add(argchild)
def actions_for(self, x:str, format="lisp"):
if format == "lisp":
pas = lisp_to_pas(x)
elif format == "pred" or format == "prolog":
pas = prolog_to_pas(x)
else:
raise Exception(f"unknown format {format}")
ret = self._actions_for_rec_bottom_up(pas)
return ret
def _actions_for_rec_bottom_up(self, pas):
if isinstance(pas, tuple): # has children
arg_name, children = pas
children_rules, children_types = [], []
children_ruleses = [self._actions_for_rec_bottom_up(child) for child in children]
# get child types
for _child_rules in children_ruleses:
_child_rule = _child_rules[0]
child_type, child_body = _child_rule.split(" -> ")
children_types.append(child_type)
children_rules += _child_rules
# merge siblings into single child type
if len(children_types) > 0 and children_types[-1][-1] in "*+":
# variable number of children rules
exp_child_type = children_types[-1][:-1]
for child_type in children_types[:-1]:
assert(child_type == exp_child_type)
children_types = [children_types[-1]]
else:
arg_name, children = pas, []
children_types = []
children_rules = []
# find applicable rules
rules = self.rules_by_arg[arg_name]
valid_rules = set()
# has_sibl_rules = False
# has_func_rules = False
for rule in rules:
rule_type, rule_body = rule.split(" -> ")
func_splits = rule_body.split(" :: ")
sibl_splits = rule_body.split(" -- ")
is_func_rule = len(func_splits) > 1
is_sibl_rule = len(sibl_splits) > 1
if not is_sibl_rule and not is_func_rule: # terminal
valid_rules.add(rule)
elif not is_sibl_rule: # func nonterminal
rule_arg, rule_inptypes = func_splits[0], func_splits[1].split(" ")
addit = True
if len(children_types) != len(rule_inptypes): # must have same number of children
addit = False
continue
# children must match types
for rule_inptype, child_type in zip(rule_inptypes, children_types):
if rule_inptype != child_type:
addit = False
break
if not addit:
continue
if addit:
valid_rules.add(rule)
else:
raise Exception("sibling rule syntax no longer supported")
valid_rules.add(rule)
if len(valid_rules) == 0:
raise Exception(f"can not parse, valid rules for arg '{arg_name}' not found")
elif len(valid_rules) > 1:
raise Exception(f"can not parse, multiple valid rules for arg '{arg_name}' found")
else:
rule = list(valid_rules)[0]
return [rule] + children_rules
def _actions_for_rec_top_down(self, pas, out_type:str=None):
out_types = self.start_types if out_type is None else set([out_type])
ret = []
if isinstance(pas, tuple):
arg_name, children = pas
else:
arg_name, children = pas, []
# what's this doing??
if out_type is not None and out_type not in self.rules_by_type:
assert (pas == arg_name and len(children) == 0 and arg_name == out_type)
assert (arg_name in self.symbols)
return []
# find sibling child rules
# resolve sibling children before parenting
prev_sibl_type = None
new_children = []
new_children_types = []
new_children_rules = []
for child in children:
child_arg_name = child[0] if isinstance(child, tuple) else child
if child_arg_name not in self.rules_by_arg:
new_children.append(child_arg_name)
new_children_types.append(child_arg_name)
new_children_rules.append(None)
continue
possible_child_rules = self.rules_by_arg[child_arg_name]
is_sibl = False
child_type = None
for pcr in possible_child_rules:
rule_type, body = pcr.split(" -> ")
if rule_type[-1] in "*+":
is_sibl = True
assert(len(possible_child_rules) == 1) # can't have more than one rule if has sibl rule
# do sibl rule stuff
if prev_sibl_type is None:
prev_sibl_type = rule_type
new_children_rules.append([])
assert(rule_type == prev_sibl_type)
new_children_rules[-1].append(pcr)
if " -- " not in body: # seq terminator
prev_sibl_type = None # done doing siblings
new_children.append(None)
new_children_types.append(rule_type)
else:
rule_type, body = pcr.split(" -> ")
assert(child_type is None or child_type == rule_type) # arg can have only one return type
child_type = rule_type
if not is_sibl: # normal child
assert(prev_sibl_type is None)
new_children.append(child)
new_children_types.append(child_type)
new_children_rules.append(None)
rules = self.rules_by_arg[arg_name]
valid_rules = set()
for rule in rules:
rule_type, rule_body = rule.split(" -> ")
assert(rule_type[-1] not in "+*") # no sibling rules here
func_splits = rule_body.split(" :: ")
is_func_rule = len(func_splits) > 1
addit = True
if is_func_rule:
func_arg, func_inptypes = func_splits
func_inptypes = func_inptypes.split(" ")
else:
func_arg, func_inptypes = rule_body, []
# filter by output type
if rule_type not in out_types:
addit = False
continue
# filter by number of children
if len(func_inptypes) != len(new_children_types):
addit = False
continue
# filter by children signature
for new_child_type, func_inptype in zip(new_children_types, func_inptypes):
if new_child_type is None:
if func_inptype[-1] in "*+":
addit = False
break
else:
if new_child_type != func_inptype:
addit = False
break
if not addit:
continue
if addit:
valid_rules.add(rule)
if len(valid_rules) == 0:
raise Exception(f"can not parse, valid rules for arg '{arg_name}' not found")
elif len(valid_rules) > 1:
raise Exception(f"can not parse, multiple valid rules for arg '{arg_name}' found")
else:
rule = list(valid_rules)[0]
ret.append(rule)
rule_type, rule_body = rule.split(" -> ")
assert(rule_type[-1] not in "+*")
func_splits = rule_body.split(" :: ")
is_func_rule = len(func_splits) > 1
addit = True
if is_func_rule:
func_arg, func_inptypes = func_splits
func_inptypes = func_inptypes.split(" ")
else:
func_inptypes = []
assert (len(func_inptypes) == len(new_children_types))
child_rules = []
for new_child, new_child_rules, func_inptype in zip(new_children, new_children_rules, func_inptypes):
if new_child is not None:
r = self._actions_for_rec_top_down(new_child, func_inptype)
child_rules += r
else:
child_rules += new_child_rules
# child_rules = [x for x in child_rules if x is not None]
ret = ret + child_rules #[rule for child_rule in child_rules for rule in child_rule]
return ret
def _actions_for_rec_old(self, pas, out_type:str=None):
out_type = self.start_type if out_type is None else out_type
ret = []
# region
numchildren = None
if isinstance(pas, tuple): # function
arg_name, body = pas
numchildren = len(body)
elif isinstance(pas, list):
assert(out_type[-1] in ["*", "+"])
arg_name, body = pas[0], pas[1:]
else: # not a function
arg_name, body = pas, []
numchildren = 0
if out_type not in self.rules_by_type:
assert(pas == arg_name and len(body) == 0 and arg_name == out_type)
assert(arg_name in self.symbols)
return [], []
func_rules = self.rules_by_arg[arg_name]
valid_rules = set()
# endregion
# print(arg_name, body)
if body is not None:
body_terminal_signature = [None if isinstance(x, (tuple, list)) else x for x in body]
if arg_name == "_":
print(arg_name)
for rule in func_rules:
rule_type, rule_body = rule.split(" -> ")
func_splits = rule_body.split(" :: ")
sibl_splits = rule_body.split(" -- ")
addit = True
if len(func_splits) > 1:
rule_numchildren = len(func_splits[1].split(" "))
rule_argchildren = func_splits[1].split(" ")
elif len(sibl_splits) > 1:
rule_numchildren = len(sibl_splits[1].split(" "))
rule_argchildren = sibl_splits[1].split(" ")
assert(rule_numchildren == 1)
else:
rule_numchildren = 0
if rule_type != out_type:
addit = False
continue
if len(sibl_splits) == 1:
if numchildren != rule_numchildren:
addit = False
continue
if len(func_splits) > 1 and rule_argchildren[-1][-1] not in ["+", "*"]:
for body_terminal_signature_e, rule_arg_child in zip(body_terminal_signature, rule_argchildren):
if rule_arg_child in self.rules_by_type:
continue
elif body_terminal_signature_e is not None \
and body_terminal_signature_e != rule_arg_child:
addit = False
break
if addit:
valid_rules.add(rule)
if len(valid_rules) == 0:
raise Exception(f"can not parse, valid rules for arg '{arg_name}' not found")
elif len(valid_rules) > 1:
raise Exception(f"can not parse, multiple valid rules for arg '{arg_name}' found")
else:
rule = list(valid_rules)[0]
ret.append(rule)
rule_type, rule_body = rule.split(" -> ")
rule_func_splits = rule_body.split(" :: ")
rule_sibl_splits = rule_body.split(" -- ")
assert (int(len(rule_func_splits) > 1) + int(len(rule_sibl_splits) > 1) <= 1)
if len(rule_func_splits) > 1:
rule_arg, rule_inptypes = rule_func_splits
rule_inptypes = rule_inptypes.split(" ")
assert(rule_arg == arg_name)
for rule_inptype in rule_inptypes:
if rule_inptype[-1] in ["+", "*"]:
child_rules, body = self._actions_for_rec(body, rule_inptype)
ret = ret + child_rules
else:
child_rules, _body = self._actions_for_rec(body[0], rule_inptype)
body = body[1:]
ret = ret + child_rules
return ret, body
# if len(rule_inptypes) == 1 and rule_inptypes[0][-1] in ["+", "*"]:
# child_rules = self._actions_for_rec(body, rule_inptypes[0])
# ret = ret + child_rules
# else:
# if len(rule_inptypes) != len(body):
# print(rule_inptypes)
# assert(len(rule_inptypes) == len(body))
# child_rules = [self._actions_for_rec(body_e, rule_inptype) for body_e, rule_inptype in zip(body, rule_inptypes)]
# child_rules = [x for x in child_rules if x is not None]
# ret = ret + [rule for child_rule in child_rules for rule in child_rule]
# return ret
elif len(rule_sibl_splits) > 1:
rule_arg, rule_sibltypes = rule_sibl_splits
rule_sibltypes = rule_sibltypes.split(" ")
assert(len(rule_sibltypes) == 1)
assert(rule_arg == arg_name)
if rule_sibltypes[0][-1] in ["*", "+"]:
sibl_rules, body = self._actions_for_rec(body, rule_sibltypes[0])
else:
raise NotImplemented()
ret = ret + sibl_rules
return ret, body
else:
assert(len(rule_body.split(" ")) == 1)
return ret, body
def actions_to_tree(self, remaining_actions:List[str]):
tree, remaining_actions = self._actions_to_tree_rec(remaining_actions)
assert(len(remaining_actions) == 0)
return tree
def _actions_to_tree_rec(self, actions:List[str], out_type:str=None):
out_type = self.start_type if out_type is None else out_type
ret = ActionTree(out_type, [])
remaining_actions = actions
if out_type not in self.rules_by_type:
assert(out_type in self.symbols)
return ret, remaining_actions
while len(remaining_actions) > 0:
action = remaining_actions[0]
ret.set_action(action)
rule_type, rule_body = action.split(" -> ")
rule_func_splits = rule_body.split(" :: ")
rule_sibl_splits = rule_body.split(" -- ")
assert (int(len(rule_func_splits) > 1) + int(len(rule_sibl_splits) > 1) <= 1)
if len(rule_func_splits) > 1:
rule_arg, rule_inptypes = rule_func_splits
rule_inptypes = rule_inptypes.split(" ")
ret.set_label(rule_arg)
remaining_actions = remaining_actions[1:]
if len(rule_inptypes) == 1 and rule_inptypes[-1][-1] in "*+":
rule_inptype = rule_inptypes[-1][:-1]
terminated = False
while not terminated:
subtree, remaining_actions = self._actions_to_tree_rec(remaining_actions, out_type=rule_inptype)
ret.append(subtree)
if subtree.label() == f"{rule_inptypes[-1]}:END@":
terminated = True
else:
for rule_inptype in rule_inptypes:
subtree, remaining_actions = self._actions_to_tree_rec(remaining_actions, out_type=rule_inptype)
if isinstance(subtree, Tree):
subtree = [subtree]
for subtree_e in subtree:
ret.append(subtree_e)
return ret, remaining_actions
elif len(rule_sibl_splits) > 1:
raise Exception("sibling rules no longer supported")
rule_arg, rule_sibl_types = rule_sibl_splits
rule_sibl_types = rule_sibl_types.split(" ")
assert(len(rule_sibl_types) == 1)
ret.set_label(rule_arg)
remaining_actions = remaining_actions[1:]
siblings, remaining_actions = self._actions_to_tree_rec(remaining_actions, out_type=rule_sibl_types[-1])
if isinstance(siblings, Tree):
siblings = [siblings]
ret = [ret] + siblings
return ret, remaining_actions
else:
assert(len(rule_body.split(" ")) == 1)
ret.set_label(rule_body)
remaining_actions = remaining_actions[1:]
return ret, remaining_actions
if __name__ == '__main__':
# try_str_to_pas()
try_lisp_to_tree()
|
the-stack_106_20532
|
import random
import json
import pymsteams
import torch
from model import NeuralNet
from nltk_utils import bag_of_words, tokenize
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open('intents.json', 'r') as json_data:
intents = json.load(json_data)
FILE = "data.pth"
data = torch.load(FILE)
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data['all_words']
tags = data['tags']
model_state = data["model_state"]
model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
bot_name = "Sam"
print("Let's chat! (type 'quit' to exit)")
while True:
sentence = input("You: ")
if "send message" in sentence:
text_message = input("type your message: ")
# You must create the connectorcard object with the Microsoft Webhook URL
myTeamsMessage = pymsteams.connectorcard("<Microsoft Webhook URL>")
# Add text to the message.
myTeamsMessage.text("this is my text")
# send the message.
myTeamsMessage.send()
if "quit" in sentence:
break
sentence = tokenize(sentence)
X = bag_of_words(sentence, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(device)
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent["tag"]:
print(f"{bot_name}: {random.choice(intent['responses'])}")
else:
print(f"{bot_name}: I do not understand...")
|
the-stack_106_20534
|
import logging
from collections import namedtuple
from six import string_types
from samtranslator.metrics.method_decorator import cw_timer
from samtranslator.model.intrinsics import ref, fnGetAtt, make_or_condition
from samtranslator.model.apigateway import (
ApiGatewayDeployment,
ApiGatewayRestApi,
ApiGatewayStage,
ApiGatewayAuthorizer,
ApiGatewayResponse,
ApiGatewayDomainName,
ApiGatewayBasePathMapping,
ApiGatewayUsagePlan,
ApiGatewayUsagePlanKey,
ApiGatewayApiKey,
)
from samtranslator.model.route53 import Route53RecordSetGroup
from samtranslator.model.exceptions import InvalidResourceException, InvalidTemplateException, InvalidDocumentException
from samtranslator.model.s3_utils.uri_parser import parse_s3_uri
from samtranslator.region_configuration import RegionConfiguration
from samtranslator.swagger.swagger import SwaggerEditor
from samtranslator.model.intrinsics import is_intrinsic, fnSub
from samtranslator.model.lambda_ import LambdaPermission
from samtranslator.translator import logical_id_generator
from samtranslator.translator.arn_generator import ArnGenerator
from samtranslator.model.tags.resource_tagging import get_tag_list
LOG = logging.getLogger(__name__)
_CORS_WILDCARD = "'*'"
CorsProperties = namedtuple(
"_CorsProperties", ["AllowMethods", "AllowHeaders", "AllowOrigin", "MaxAge", "AllowCredentials"]
)
# Default the Cors Properties to '*' wildcard and False AllowCredentials. Other properties are actually Optional
CorsProperties.__new__.__defaults__ = (None, None, _CORS_WILDCARD, None, False)
AuthProperties = namedtuple(
"_AuthProperties",
[
"Authorizers",
"DefaultAuthorizer",
"InvokeRole",
"AddDefaultAuthorizerToCorsPreflight",
"ApiKeyRequired",
"ResourcePolicy",
"UsagePlan",
],
)
AuthProperties.__new__.__defaults__ = (None, None, None, True, None, None, None)
UsagePlanProperties = namedtuple(
"_UsagePlanProperties", ["CreateUsagePlan", "Description", "Quota", "Tags", "Throttle", "UsagePlanName"]
)
UsagePlanProperties.__new__.__defaults__ = (None, None, None, None, None, None)
GatewayResponseProperties = ["ResponseParameters", "ResponseTemplates", "StatusCode"]
class SharedApiUsagePlan(object):
"""
Collects API information from different API resources in the same template,
so that these information can be used in the shared usage plan
"""
SHARED_USAGE_PLAN_CONDITION_NAME = "SharedUsagePlanCondition"
def __init__(self):
self.usage_plan_shared = False
self.stage_keys_shared = list()
self.api_stages_shared = list()
self.depends_on_shared = list()
# shared resource level attributes
self.conditions = set()
self.any_api_without_condition = False
self.deletion_policy = None
self.update_replace_policy = None
def get_combined_resource_attributes(self, resource_attributes, conditions):
"""
This method returns a dictionary which combines 'DeletionPolicy', 'UpdateReplacePolicy' and 'Condition'
values of API definitions that could be used in Shared Usage Plan resources.
Parameters
----------
resource_attributes: Dict[str]
A dictionary of resource level attributes of the API resource
conditions: Dict[str]
Conditions section of the template
"""
self._set_deletion_policy(resource_attributes.get("DeletionPolicy"))
self._set_update_replace_policy(resource_attributes.get("UpdateReplacePolicy"))
self._set_condition(resource_attributes.get("Condition"), conditions)
combined_resource_attributes = dict()
if self.deletion_policy:
combined_resource_attributes["DeletionPolicy"] = self.deletion_policy
if self.update_replace_policy:
combined_resource_attributes["UpdateReplacePolicy"] = self.update_replace_policy
# do not set Condition if any of the API resource does not have Condition in it
if self.conditions and not self.any_api_without_condition:
combined_resource_attributes["Condition"] = SharedApiUsagePlan.SHARED_USAGE_PLAN_CONDITION_NAME
return combined_resource_attributes
def _set_deletion_policy(self, deletion_policy):
if deletion_policy:
if self.deletion_policy:
# update only if new deletion policy is Retain
if deletion_policy == "Retain":
self.deletion_policy = deletion_policy
else:
self.deletion_policy = deletion_policy
def _set_update_replace_policy(self, update_replace_policy):
if update_replace_policy:
if self.update_replace_policy:
# if new value is Retain or
# new value is retain and current value is Delete then update its value
if (update_replace_policy == "Retain") or (
update_replace_policy == "Snapshot" and self.update_replace_policy == "Delete"
):
self.update_replace_policy = update_replace_policy
else:
self.update_replace_policy = update_replace_policy
def _set_condition(self, condition, template_conditions):
# if there are any API without condition, then skip
if self.any_api_without_condition:
return
if condition and condition not in self.conditions:
if template_conditions is None:
raise InvalidTemplateException(
"Can't have condition without having 'Conditions' section in the template"
)
if self.conditions:
self.conditions.add(condition)
or_condition = make_or_condition(self.conditions)
template_conditions[SharedApiUsagePlan.SHARED_USAGE_PLAN_CONDITION_NAME] = or_condition
else:
self.conditions.add(condition)
template_conditions[SharedApiUsagePlan.SHARED_USAGE_PLAN_CONDITION_NAME] = condition
elif condition is None:
self.any_api_without_condition = True
if template_conditions and SharedApiUsagePlan.SHARED_USAGE_PLAN_CONDITION_NAME in template_conditions:
del template_conditions[SharedApiUsagePlan.SHARED_USAGE_PLAN_CONDITION_NAME]
class ApiGenerator(object):
def __init__(
self,
logical_id,
cache_cluster_enabled,
cache_cluster_size,
variables,
depends_on,
definition_body,
definition_uri,
name,
stage_name,
shared_api_usage_plan,
template_conditions,
tags=None,
endpoint_configuration=None,
method_settings=None,
binary_media=None,
minimum_compression_size=None,
cors=None,
auth=None,
gateway_responses=None,
access_log_setting=None,
canary_setting=None,
tracing_enabled=None,
resource_attributes=None,
passthrough_resource_attributes=None,
open_api_version=None,
models=None,
domain=None,
description=None,
mode=None,
):
"""Constructs an API Generator class that generates API Gateway resources
:param logical_id: Logical id of the SAM API Resource
:param cache_cluster_enabled: Whether cache cluster is enabled
:param cache_cluster_size: Size of the cache cluster
:param variables: API Gateway Variables
:param depends_on: Any resources that need to be depended on
:param definition_body: API definition
:param definition_uri: URI to API definition
:param name: Name of the API Gateway resource
:param stage_name: Name of the Stage
:param tags: Stage Tags
:param access_log_setting: Whether to send access logs and where for Stage
:param canary_setting: Canary Setting for Stage
:param tracing_enabled: Whether active tracing with X-ray is enabled
:param resource_attributes: Resource attributes to add to API resources
:param passthrough_resource_attributes: Attributes such as `Condition` that are added to derived resources
:param models: Model definitions to be used by API methods
:param description: Description of the API Gateway resource
"""
self.logical_id = logical_id
self.cache_cluster_enabled = cache_cluster_enabled
self.cache_cluster_size = cache_cluster_size
self.variables = variables
self.depends_on = depends_on
self.definition_body = definition_body
self.definition_uri = definition_uri
self.name = name
self.stage_name = stage_name
self.tags = tags
self.endpoint_configuration = endpoint_configuration
self.method_settings = method_settings
self.binary_media = binary_media
self.minimum_compression_size = minimum_compression_size
self.cors = cors
self.auth = auth
self.gateway_responses = gateway_responses
self.access_log_setting = access_log_setting
self.canary_setting = canary_setting
self.tracing_enabled = tracing_enabled
self.resource_attributes = resource_attributes
self.passthrough_resource_attributes = passthrough_resource_attributes
self.open_api_version = open_api_version
self.remove_extra_stage = open_api_version
self.models = models
self.domain = domain
self.description = description
self.shared_api_usage_plan = shared_api_usage_plan
self.template_conditions = template_conditions
self.mode = mode
def _construct_rest_api(self):
"""Constructs and returns the ApiGateway RestApi.
:returns: the RestApi to which this SAM Api corresponds
:rtype: model.apigateway.ApiGatewayRestApi
"""
rest_api = ApiGatewayRestApi(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes)
# NOTE: For backwards compatibility we need to retain BinaryMediaTypes on the CloudFormation Property
# Removing this and only setting x-amazon-apigateway-binary-media-types results in other issues.
rest_api.BinaryMediaTypes = self.binary_media
rest_api.MinimumCompressionSize = self.minimum_compression_size
if self.endpoint_configuration:
self._set_endpoint_configuration(rest_api, self.endpoint_configuration)
elif not RegionConfiguration.is_apigw_edge_configuration_supported():
# Since this region does not support EDGE configuration, we explicitly set the endpoint type
# to Regional which is the only supported config.
self._set_endpoint_configuration(rest_api, "REGIONAL")
if self.definition_uri and self.definition_body:
raise InvalidResourceException(
self.logical_id, "Specify either 'DefinitionUri' or 'DefinitionBody' property and not both."
)
if self.open_api_version:
if not SwaggerEditor.safe_compare_regex_with_string(
SwaggerEditor.get_openapi_versions_supported_regex(), self.open_api_version
):
raise InvalidResourceException(
self.logical_id, "The OpenApiVersion value must be of the format '3.0.0'."
)
self._add_cors()
self._add_auth()
self._add_gateway_responses()
self._add_binary_media_types()
self._add_models()
if self.definition_uri:
rest_api.BodyS3Location = self._construct_body_s3_dict()
elif self.definition_body:
# # Post Process OpenApi Auth Settings
self.definition_body = self._openapi_postprocess(self.definition_body)
rest_api.Body = self.definition_body
if self.name:
rest_api.Name = self.name
if self.description:
rest_api.Description = self.description
if self.mode:
rest_api.Mode = self.mode
return rest_api
def _construct_body_s3_dict(self):
"""Constructs the RestApi's `BodyS3Location property`_, from the SAM Api's DefinitionUri property.
:returns: a BodyS3Location dict, containing the S3 Bucket, Key, and Version of the Swagger definition
:rtype: dict
"""
if isinstance(self.definition_uri, dict):
if not self.definition_uri.get("Bucket", None) or not self.definition_uri.get("Key", None):
# DefinitionUri is a dictionary but does not contain Bucket or Key property
raise InvalidResourceException(
self.logical_id, "'DefinitionUri' requires Bucket and Key properties to be specified."
)
s3_pointer = self.definition_uri
else:
# DefinitionUri is a string
s3_pointer = parse_s3_uri(self.definition_uri)
if s3_pointer is None:
raise InvalidResourceException(
self.logical_id,
"'DefinitionUri' is not a valid S3 Uri of the form "
"'s3://bucket/key' with optional versionId query parameter.",
)
body_s3 = {"Bucket": s3_pointer["Bucket"], "Key": s3_pointer["Key"]}
if "Version" in s3_pointer:
body_s3["Version"] = s3_pointer["Version"]
return body_s3
def _construct_deployment(self, rest_api):
"""Constructs and returns the ApiGateway Deployment.
:param model.apigateway.ApiGatewayRestApi rest_api: the RestApi for this Deployment
:returns: the Deployment to which this SAM Api corresponds
:rtype: model.apigateway.ApiGatewayDeployment
"""
deployment = ApiGatewayDeployment(
self.logical_id + "Deployment", attributes=self.passthrough_resource_attributes
)
deployment.RestApiId = rest_api.get_runtime_attr("rest_api_id")
if not self.remove_extra_stage:
deployment.StageName = "Stage"
return deployment
def _construct_stage(self, deployment, swagger, redeploy_restapi_parameters):
"""Constructs and returns the ApiGateway Stage.
:param model.apigateway.ApiGatewayDeployment deployment: the Deployment for this Stage
:returns: the Stage to which this SAM Api corresponds
:rtype: model.apigateway.ApiGatewayStage
"""
# If StageName is some intrinsic function, then don't prefix the Stage's logical ID
# This will NOT create duplicates because we allow only ONE stage per API resource
stage_name_prefix = self.stage_name if isinstance(self.stage_name, string_types) else ""
if stage_name_prefix.isalnum():
stage_logical_id = self.logical_id + stage_name_prefix + "Stage"
else:
generator = logical_id_generator.LogicalIdGenerator(self.logical_id + "Stage", stage_name_prefix)
stage_logical_id = generator.gen()
stage = ApiGatewayStage(stage_logical_id, attributes=self.passthrough_resource_attributes)
stage.RestApiId = ref(self.logical_id)
stage.update_deployment_ref(deployment.logical_id)
stage.StageName = self.stage_name
stage.CacheClusterEnabled = self.cache_cluster_enabled
stage.CacheClusterSize = self.cache_cluster_size
stage.Variables = self.variables
stage.MethodSettings = self.method_settings
stage.AccessLogSetting = self.access_log_setting
stage.CanarySetting = self.canary_setting
stage.TracingEnabled = self.tracing_enabled
if swagger is not None:
deployment.make_auto_deployable(
stage, self.remove_extra_stage, swagger, self.domain, redeploy_restapi_parameters
)
if self.tags is not None:
stage.Tags = get_tag_list(self.tags)
return stage
def _construct_api_domain(self, rest_api):
"""
Constructs and returns the ApiGateway Domain and BasepathMapping
"""
if self.domain is None:
return None, None, None
if self.domain.get("DomainName") is None or self.domain.get("CertificateArn") is None:
raise InvalidResourceException(
self.logical_id, "Custom Domains only works if both DomainName and CertificateArn" " are provided."
)
self.domain["ApiDomainName"] = "{}{}".format(
"ApiGatewayDomainName", logical_id_generator.LogicalIdGenerator("", self.domain.get("DomainName")).gen()
)
domain = ApiGatewayDomainName(self.domain.get("ApiDomainName"), attributes=self.passthrough_resource_attributes)
domain.DomainName = self.domain.get("DomainName")
endpoint = self.domain.get("EndpointConfiguration")
if endpoint is None:
endpoint = "REGIONAL"
self.domain["EndpointConfiguration"] = "REGIONAL"
elif endpoint not in ["EDGE", "REGIONAL", "PRIVATE"]:
raise InvalidResourceException(
self.logical_id,
"EndpointConfiguration for Custom Domains must be"
" one of {}.".format(["EDGE", "REGIONAL", "PRIVATE"]),
)
if endpoint == "REGIONAL":
domain.RegionalCertificateArn = self.domain.get("CertificateArn")
else:
domain.CertificateArn = self.domain.get("CertificateArn")
domain.EndpointConfiguration = {"Types": [endpoint]}
mutual_tls_auth = self.domain.get("MutualTlsAuthentication", None)
if mutual_tls_auth:
if isinstance(mutual_tls_auth, dict):
if not set(mutual_tls_auth.keys()).issubset({"TruststoreUri", "TruststoreVersion"}):
invalid_keys = list()
for key in mutual_tls_auth.keys():
if not key in {"TruststoreUri", "TruststoreVersion"}:
invalid_keys.append(key)
invalid_keys.sort()
raise InvalidResourceException(
",".join(invalid_keys),
"Available MutualTlsAuthentication fields are {}.".format(
["TruststoreUri", "TruststoreVersion"]
),
)
domain.MutualTlsAuthentication = {}
if mutual_tls_auth.get("TruststoreUri", None):
domain.MutualTlsAuthentication["TruststoreUri"] = mutual_tls_auth["TruststoreUri"]
if mutual_tls_auth.get("TruststoreVersion", None):
domain.MutualTlsAuthentication["TruststoreVersion"] = mutual_tls_auth["TruststoreVersion"]
else:
raise InvalidResourceException(
mutual_tls_auth,
"MutualTlsAuthentication must be a map with at least one of the following fields {}.".format(
["TruststoreUri", "TruststoreVersion"]
),
)
if self.domain.get("SecurityPolicy", None):
domain.SecurityPolicy = self.domain["SecurityPolicy"]
# Create BasepathMappings
if self.domain.get("BasePath") and isinstance(self.domain.get("BasePath"), string_types):
basepaths = [self.domain.get("BasePath")]
elif self.domain.get("BasePath") and isinstance(self.domain.get("BasePath"), list):
basepaths = self.domain.get("BasePath")
else:
basepaths = None
basepath_resource_list = []
if basepaths is None:
basepath_mapping = ApiGatewayBasePathMapping(
self.logical_id + "BasePathMapping", attributes=self.passthrough_resource_attributes
)
basepath_mapping.DomainName = ref(self.domain.get("ApiDomainName"))
basepath_mapping.RestApiId = ref(rest_api.logical_id)
basepath_mapping.Stage = ref(rest_api.logical_id + ".Stage")
basepath_resource_list.extend([basepath_mapping])
else:
for path in basepaths:
path = "".join(e for e in path if e.isalnum())
logical_id = "{}{}{}".format(self.logical_id, path, "BasePathMapping")
basepath_mapping = ApiGatewayBasePathMapping(
logical_id, attributes=self.passthrough_resource_attributes
)
basepath_mapping.DomainName = ref(self.domain.get("ApiDomainName"))
basepath_mapping.RestApiId = ref(rest_api.logical_id)
basepath_mapping.Stage = ref(rest_api.logical_id + ".Stage")
basepath_mapping.BasePath = path
basepath_resource_list.extend([basepath_mapping])
# Create the Route53 RecordSetGroup resource
record_set_group = None
if self.domain.get("Route53") is not None:
route53 = self.domain.get("Route53")
if route53.get("HostedZoneId") is None and route53.get("HostedZoneName") is None:
raise InvalidResourceException(
self.logical_id,
"HostedZoneId or HostedZoneName is required to enable Route53 support on Custom Domains.",
)
logical_id = logical_id_generator.LogicalIdGenerator(
"", route53.get("HostedZoneId") or route53.get("HostedZoneName")
).gen()
record_set_group = Route53RecordSetGroup(
"RecordSetGroup" + logical_id, attributes=self.passthrough_resource_attributes
)
if "HostedZoneId" in route53:
record_set_group.HostedZoneId = route53.get("HostedZoneId")
if "HostedZoneName" in route53:
record_set_group.HostedZoneName = route53.get("HostedZoneName")
record_set_group.RecordSets = self._construct_record_sets_for_domain(self.domain)
return domain, basepath_resource_list, record_set_group
def _construct_record_sets_for_domain(self, domain):
recordset_list = []
recordset = {}
route53 = domain.get("Route53")
recordset["Name"] = domain.get("DomainName")
recordset["Type"] = "A"
recordset["AliasTarget"] = self._construct_alias_target(self.domain)
recordset_list.extend([recordset])
recordset_ipv6 = {}
if route53.get("IpV6") is not None and route53.get("IpV6") is True:
recordset_ipv6["Name"] = domain.get("DomainName")
recordset_ipv6["Type"] = "AAAA"
recordset_ipv6["AliasTarget"] = self._construct_alias_target(self.domain)
recordset_list.extend([recordset_ipv6])
return recordset_list
def _construct_alias_target(self, domain):
alias_target = {}
route53 = domain.get("Route53")
target_health = route53.get("EvaluateTargetHealth")
if target_health is not None:
alias_target["EvaluateTargetHealth"] = target_health
if domain.get("EndpointConfiguration") == "REGIONAL":
alias_target["HostedZoneId"] = fnGetAtt(self.domain.get("ApiDomainName"), "RegionalHostedZoneId")
alias_target["DNSName"] = fnGetAtt(self.domain.get("ApiDomainName"), "RegionalDomainName")
else:
if route53.get("DistributionDomainName") is None:
route53["DistributionDomainName"] = fnGetAtt(self.domain.get("ApiDomainName"), "DistributionDomainName")
alias_target["HostedZoneId"] = "Z2FDTNDATAQYW2"
alias_target["DNSName"] = route53.get("DistributionDomainName")
return alias_target
@cw_timer(prefix="Generator", name="Api")
def to_cloudformation(self, redeploy_restapi_parameters):
"""Generates CloudFormation resources from a SAM API resource
:returns: a tuple containing the RestApi, Deployment, and Stage for an empty Api.
:rtype: tuple
"""
rest_api = self._construct_rest_api()
domain, basepath_mapping, route53 = self._construct_api_domain(rest_api)
deployment = self._construct_deployment(rest_api)
swagger = None
if rest_api.Body is not None:
swagger = rest_api.Body
elif rest_api.BodyS3Location is not None:
swagger = rest_api.BodyS3Location
stage = self._construct_stage(deployment, swagger, redeploy_restapi_parameters)
permissions = self._construct_authorizer_lambda_permission()
usage_plan = self._construct_usage_plan(rest_api_stage=stage)
return rest_api, deployment, stage, permissions, domain, basepath_mapping, route53, usage_plan
def _add_cors(self):
"""
Add CORS configuration to the Swagger file, if necessary
"""
INVALID_ERROR = "Invalid value for 'Cors' property"
if not self.cors:
return
if self.cors and not self.definition_body:
raise InvalidResourceException(
self.logical_id, "Cors works only with inline Swagger specified in 'DefinitionBody' property."
)
if isinstance(self.cors, string_types) or is_intrinsic(self.cors):
# Just set Origin property. Others will be defaults
properties = CorsProperties(AllowOrigin=self.cors)
elif isinstance(self.cors, dict):
# Make sure keys in the dict are recognized
if not all(key in CorsProperties._fields for key in self.cors.keys()):
raise InvalidResourceException(self.logical_id, INVALID_ERROR)
properties = CorsProperties(**self.cors)
else:
raise InvalidResourceException(self.logical_id, INVALID_ERROR)
if not SwaggerEditor.is_valid(self.definition_body):
raise InvalidResourceException(
self.logical_id,
"Unable to add Cors configuration because "
"'DefinitionBody' does not contain a valid Swagger definition.",
)
if properties.AllowCredentials is True and properties.AllowOrigin == _CORS_WILDCARD:
raise InvalidResourceException(
self.logical_id,
"Unable to add Cors configuration because "
"'AllowCredentials' can not be true when "
"'AllowOrigin' is \"'*'\" or not set",
)
editor = SwaggerEditor(self.definition_body)
for path in editor.iter_on_path():
try:
editor.add_cors(
path,
properties.AllowOrigin,
properties.AllowHeaders,
properties.AllowMethods,
max_age=properties.MaxAge,
allow_credentials=properties.AllowCredentials,
)
except InvalidTemplateException as ex:
raise InvalidResourceException(self.logical_id, ex.message)
# Assign the Swagger back to template
self.definition_body = editor.swagger
def _add_binary_media_types(self):
"""
Add binary media types to Swagger
"""
if not self.binary_media:
return
# We don't raise an error here like we do for similar cases because that would be backwards incompatible
if self.binary_media and not self.definition_body:
return
editor = SwaggerEditor(self.definition_body)
editor.add_binary_media_types(self.binary_media)
# Assign the Swagger back to template
self.definition_body = editor.swagger
def _add_auth(self):
"""
Add Auth configuration to the Swagger file, if necessary
"""
if not self.auth:
return
if self.auth and not self.definition_body:
raise InvalidResourceException(
self.logical_id, "Auth works only with inline Swagger specified in " "'DefinitionBody' property."
)
# Make sure keys in the dict are recognized
if not all(key in AuthProperties._fields for key in self.auth.keys()):
raise InvalidResourceException(self.logical_id, "Invalid value for 'Auth' property")
if not SwaggerEditor.is_valid(self.definition_body):
raise InvalidResourceException(
self.logical_id,
"Unable to add Auth configuration because "
"'DefinitionBody' does not contain a valid Swagger definition.",
)
swagger_editor = SwaggerEditor(self.definition_body)
auth_properties = AuthProperties(**self.auth)
authorizers = self._get_authorizers(auth_properties.Authorizers, auth_properties.DefaultAuthorizer)
if authorizers:
swagger_editor.add_authorizers_security_definitions(authorizers)
self._set_default_authorizer(
swagger_editor,
authorizers,
auth_properties.DefaultAuthorizer,
auth_properties.AddDefaultAuthorizerToCorsPreflight,
auth_properties.Authorizers,
)
if auth_properties.ApiKeyRequired:
swagger_editor.add_apikey_security_definition()
self._set_default_apikey_required(swagger_editor)
if auth_properties.ResourcePolicy:
for path in swagger_editor.iter_on_path():
swagger_editor.add_resource_policy(auth_properties.ResourcePolicy, path, self.stage_name)
if auth_properties.ResourcePolicy.get("CustomStatements"):
swagger_editor.add_custom_statements(auth_properties.ResourcePolicy.get("CustomStatements"))
self.definition_body = self._openapi_postprocess(swagger_editor.swagger)
def _construct_usage_plan(self, rest_api_stage=None):
"""Constructs and returns the ApiGateway UsagePlan, ApiGateway UsagePlanKey, ApiGateway ApiKey for Auth.
:param model.apigateway.ApiGatewayStage stage: the stage of rest api
:returns: UsagePlan, UsagePlanKey, ApiKey for this rest Api
:rtype: model.apigateway.ApiGatewayUsagePlan, model.apigateway.ApiGatewayUsagePlanKey,
model.apigateway.ApiGatewayApiKey
"""
create_usage_plans_accepted_values = ["SHARED", "PER_API", "NONE"]
if not self.auth:
return []
auth_properties = AuthProperties(**self.auth)
if auth_properties.UsagePlan is None:
return []
usage_plan_properties = auth_properties.UsagePlan
# throws error if UsagePlan is not a dict
if not isinstance(usage_plan_properties, dict):
raise InvalidResourceException(self.logical_id, "'UsagePlan' must be a dictionary")
# throws error if the property invalid/ unsupported for UsagePlan
if not all(key in UsagePlanProperties._fields for key in usage_plan_properties.keys()):
raise InvalidResourceException(self.logical_id, "Invalid property for 'UsagePlan'")
create_usage_plan = usage_plan_properties.get("CreateUsagePlan")
usage_plan = None
api_key = None
usage_plan_key = None
if create_usage_plan is None:
raise InvalidResourceException(self.logical_id, "'CreateUsagePlan' is a required field for UsagePlan.")
if create_usage_plan not in create_usage_plans_accepted_values:
raise InvalidResourceException(
self.logical_id, "'CreateUsagePlan' accepts one of {}.".format(create_usage_plans_accepted_values)
)
if create_usage_plan == "NONE":
return []
# create usage plan for this api only
elif usage_plan_properties.get("CreateUsagePlan") == "PER_API":
usage_plan_logical_id = self.logical_id + "UsagePlan"
usage_plan = ApiGatewayUsagePlan(
logical_id=usage_plan_logical_id,
depends_on=[self.logical_id],
attributes=self.passthrough_resource_attributes,
)
api_stages = list()
api_stage = dict()
api_stage["ApiId"] = ref(self.logical_id)
api_stage["Stage"] = ref(rest_api_stage.logical_id)
api_stages.append(api_stage)
usage_plan.ApiStages = api_stages
api_key = self._construct_api_key(usage_plan_logical_id, create_usage_plan, rest_api_stage)
usage_plan_key = self._construct_usage_plan_key(usage_plan_logical_id, create_usage_plan, api_key)
# create a usage plan for all the Apis
elif create_usage_plan == "SHARED":
LOG.info("Creating SHARED usage plan for all the Apis")
usage_plan_logical_id = "ServerlessUsagePlan"
if self.logical_id not in self.shared_api_usage_plan.depends_on_shared:
self.shared_api_usage_plan.depends_on_shared.append(self.logical_id)
usage_plan = ApiGatewayUsagePlan(
logical_id=usage_plan_logical_id,
depends_on=self.shared_api_usage_plan.depends_on_shared,
attributes=self.shared_api_usage_plan.get_combined_resource_attributes(
self.passthrough_resource_attributes, self.template_conditions
),
)
api_stage = dict()
api_stage["ApiId"] = ref(self.logical_id)
api_stage["Stage"] = ref(rest_api_stage.logical_id)
if api_stage not in self.shared_api_usage_plan.api_stages_shared:
self.shared_api_usage_plan.api_stages_shared.append(api_stage)
usage_plan.ApiStages = self.shared_api_usage_plan.api_stages_shared
api_key = self._construct_api_key(usage_plan_logical_id, create_usage_plan, rest_api_stage)
usage_plan_key = self._construct_usage_plan_key(usage_plan_logical_id, create_usage_plan, api_key)
if usage_plan_properties.get("UsagePlanName"):
usage_plan.UsagePlanName = usage_plan_properties.get("UsagePlanName")
if usage_plan_properties.get("Description"):
usage_plan.Description = usage_plan_properties.get("Description")
if usage_plan_properties.get("Quota"):
usage_plan.Quota = usage_plan_properties.get("Quota")
if usage_plan_properties.get("Tags"):
usage_plan.Tags = usage_plan_properties.get("Tags")
if usage_plan_properties.get("Throttle"):
usage_plan.Throttle = usage_plan_properties.get("Throttle")
return usage_plan, api_key, usage_plan_key
def _construct_api_key(self, usage_plan_logical_id, create_usage_plan, rest_api_stage):
"""
:param usage_plan_logical_id: String
:param create_usage_plan: String
:param rest_api_stage: model.apigateway.ApiGatewayStage stage: the stage of rest api
:return: api_key model.apigateway.ApiGatewayApiKey resource which is created for the given usage plan
"""
if create_usage_plan == "SHARED":
# create an api key resource for all the apis
LOG.info("Creating api key resource for all the Apis from SHARED usage plan")
api_key_logical_id = "ServerlessApiKey"
api_key = ApiGatewayApiKey(
logical_id=api_key_logical_id,
depends_on=[usage_plan_logical_id],
attributes=self.shared_api_usage_plan.get_combined_resource_attributes(
self.passthrough_resource_attributes, self.template_conditions
),
)
api_key.Enabled = True
stage_key = dict()
stage_key["RestApiId"] = ref(self.logical_id)
stage_key["StageName"] = ref(rest_api_stage.logical_id)
if stage_key not in self.shared_api_usage_plan.stage_keys_shared:
self.shared_api_usage_plan.stage_keys_shared.append(stage_key)
api_key.StageKeys = self.shared_api_usage_plan.stage_keys_shared
# for create_usage_plan = "PER_API"
else:
# create an api key resource for this api
api_key_logical_id = self.logical_id + "ApiKey"
api_key = ApiGatewayApiKey(
logical_id=api_key_logical_id,
depends_on=[usage_plan_logical_id],
attributes=self.passthrough_resource_attributes,
)
api_key.Enabled = True
stage_keys = list()
stage_key = dict()
stage_key["RestApiId"] = ref(self.logical_id)
stage_key["StageName"] = ref(rest_api_stage.logical_id)
stage_keys.append(stage_key)
api_key.StageKeys = stage_keys
return api_key
def _construct_usage_plan_key(self, usage_plan_logical_id, create_usage_plan, api_key):
"""
:param usage_plan_logical_id: String
:param create_usage_plan: String
:param api_key: model.apigateway.ApiGatewayApiKey resource
:return: model.apigateway.ApiGatewayUsagePlanKey resource that contains the mapping between usage plan and api key
"""
if create_usage_plan == "SHARED":
# create a mapping between api key and the usage plan
usage_plan_key_logical_id = "ServerlessUsagePlanKey"
resource_attributes = self.shared_api_usage_plan.get_combined_resource_attributes(
self.passthrough_resource_attributes, self.template_conditions
)
# for create_usage_plan = "PER_API"
else:
# create a mapping between api key and the usage plan
usage_plan_key_logical_id = self.logical_id + "UsagePlanKey"
resource_attributes = self.passthrough_resource_attributes
usage_plan_key = ApiGatewayUsagePlanKey(
logical_id=usage_plan_key_logical_id,
depends_on=[api_key.logical_id],
attributes=resource_attributes,
)
usage_plan_key.KeyId = ref(api_key.logical_id)
usage_plan_key.KeyType = "API_KEY"
usage_plan_key.UsagePlanId = ref(usage_plan_logical_id)
return usage_plan_key
def _add_gateway_responses(self):
"""
Add Gateway Response configuration to the Swagger file, if necessary
"""
if not self.gateway_responses:
return
if self.gateway_responses and not self.definition_body:
raise InvalidResourceException(
self.logical_id,
"GatewayResponses works only with inline Swagger specified in " "'DefinitionBody' property.",
)
# Make sure keys in the dict are recognized
for responses_key, responses_value in self.gateway_responses.items():
if is_intrinsic(responses_value):
# TODO: Add intrinsic support for this field.
raise InvalidResourceException(
self.logical_id,
"Unable to set GatewayResponses attribute because "
"intrinsic functions are not supported for this field.",
)
elif not isinstance(responses_value, dict):
raise InvalidResourceException(
self.logical_id,
"Invalid property type '{}' for GatewayResponses. "
"Expected an object of type 'GatewayResponse'.".format(type(responses_value).__name__),
)
for response_key in responses_value.keys():
if response_key not in GatewayResponseProperties:
raise InvalidResourceException(
self.logical_id,
"Invalid property '{}' in 'GatewayResponses' property '{}'.".format(
response_key, responses_key
),
)
if not SwaggerEditor.is_valid(self.definition_body):
raise InvalidResourceException(
self.logical_id,
"Unable to add Auth configuration because "
"'DefinitionBody' does not contain a valid Swagger definition.",
)
swagger_editor = SwaggerEditor(self.definition_body)
gateway_responses = {}
for response_type, response in self.gateway_responses.items():
gateway_responses[response_type] = ApiGatewayResponse(
api_logical_id=self.logical_id,
response_parameters=response.get("ResponseParameters", {}),
response_templates=response.get("ResponseTemplates", {}),
status_code=response.get("StatusCode", None),
)
if gateway_responses:
swagger_editor.add_gateway_responses(gateway_responses)
# Assign the Swagger back to template
self.definition_body = swagger_editor.swagger
def _add_models(self):
"""
Add Model definitions to the Swagger file, if necessary
:return:
"""
if not self.models:
return
if self.models and not self.definition_body:
raise InvalidResourceException(
self.logical_id, "Models works only with inline Swagger specified in " "'DefinitionBody' property."
)
if not SwaggerEditor.is_valid(self.definition_body):
raise InvalidResourceException(
self.logical_id,
"Unable to add Models definitions because "
"'DefinitionBody' does not contain a valid Swagger definition.",
)
if not all(isinstance(model, dict) for model in self.models.values()):
raise InvalidResourceException(self.logical_id, "Invalid value for 'Models' property")
swagger_editor = SwaggerEditor(self.definition_body)
swagger_editor.add_models(self.models)
# Assign the Swagger back to template
self.definition_body = self._openapi_postprocess(swagger_editor.swagger)
def _openapi_postprocess(self, definition_body):
"""
Convert definitions to openapi 3 in definition body if OpenApiVersion flag is specified.
If the is swagger defined in the definition body, we treat it as a swagger spec and do not
make any openapi 3 changes to it
"""
if definition_body.get("swagger") is not None:
return definition_body
if definition_body.get("openapi") is not None and self.open_api_version is None:
self.open_api_version = definition_body.get("openapi")
if self.open_api_version and SwaggerEditor.safe_compare_regex_with_string(
SwaggerEditor.get_openapi_version_3_regex(), self.open_api_version
):
if definition_body.get("securityDefinitions"):
components = definition_body.get("components", {})
components["securitySchemes"] = definition_body["securityDefinitions"]
definition_body["components"] = components
del definition_body["securityDefinitions"]
if definition_body.get("definitions"):
components = definition_body.get("components", {})
components["schemas"] = definition_body["definitions"]
definition_body["components"] = components
del definition_body["definitions"]
# removes `consumes` and `produces` options for CORS in openapi3 and
# adds `schema` for the headers in responses for openapi3
if definition_body.get("paths"):
for path in definition_body.get("paths"):
if definition_body.get("paths").get(path).get("options"):
definition_body_options = definition_body.get("paths").get(path).get("options").copy()
for field in definition_body_options.keys():
# remove unsupported produces and consumes in options for openapi3
if field in ["produces", "consumes"]:
del definition_body["paths"][path]["options"][field]
# add schema for the headers in options section for openapi3
if field in ["responses"]:
options_path = definition_body["paths"][path]["options"]
if options_path and not isinstance(options_path.get(field), dict):
raise InvalidDocumentException(
[
InvalidTemplateException(
"Value of responses in options method for path {} must be a "
"dictionary according to Swagger spec.".format(path)
)
]
)
if (
options_path
and options_path.get(field).get("200")
and options_path.get(field).get("200").get("headers")
):
headers = definition_body["paths"][path]["options"][field]["200"]["headers"]
for header in headers.keys():
header_value = {
"schema": definition_body["paths"][path]["options"][field]["200"][
"headers"
][header]
}
definition_body["paths"][path]["options"][field]["200"]["headers"][
header
] = header_value
return definition_body
def _get_authorizers(self, authorizers_config, default_authorizer=None):
authorizers = {}
if default_authorizer == "AWS_IAM":
authorizers[default_authorizer] = ApiGatewayAuthorizer(
api_logical_id=self.logical_id, name=default_authorizer, is_aws_iam_authorizer=True
)
if not authorizers_config:
if "AWS_IAM" in authorizers:
return authorizers
return None
if not isinstance(authorizers_config, dict):
raise InvalidResourceException(self.logical_id, "Authorizers must be a dictionary.")
for authorizer_name, authorizer in authorizers_config.items():
if not isinstance(authorizer, dict):
raise InvalidResourceException(
self.logical_id, "Authorizer %s must be a dictionary." % (authorizer_name)
)
authorizers[authorizer_name] = ApiGatewayAuthorizer(
api_logical_id=self.logical_id,
name=authorizer_name,
user_pool_arn=authorizer.get("UserPoolArn"),
function_arn=authorizer.get("FunctionArn"),
identity=authorizer.get("Identity"),
function_payload_type=authorizer.get("FunctionPayloadType"),
function_invoke_role=authorizer.get("FunctionInvokeRole"),
authorization_scopes=authorizer.get("AuthorizationScopes"),
)
return authorizers
def _get_permission(self, authorizer_name, authorizer_lambda_function_arn):
"""Constructs and returns the Lambda Permission resource allowing the Authorizer to invoke the function.
:returns: the permission resource
:rtype: model.lambda_.LambdaPermission
"""
rest_api = ApiGatewayRestApi(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes)
api_id = rest_api.get_runtime_attr("rest_api_id")
partition = ArnGenerator.get_partition_name()
resource = "${__ApiId__}/authorizers/*"
source_arn = fnSub(
ArnGenerator.generate_arn(partition=partition, service="execute-api", resource=resource),
{"__ApiId__": api_id},
)
lambda_permission = LambdaPermission(
self.logical_id + authorizer_name + "AuthorizerPermission", attributes=self.passthrough_resource_attributes
)
lambda_permission.Action = "lambda:InvokeFunction"
lambda_permission.FunctionName = authorizer_lambda_function_arn
lambda_permission.Principal = "apigateway.amazonaws.com"
lambda_permission.SourceArn = source_arn
return lambda_permission
def _construct_authorizer_lambda_permission(self):
if not self.auth:
return []
auth_properties = AuthProperties(**self.auth)
authorizers = self._get_authorizers(auth_properties.Authorizers)
if not authorizers:
return []
permissions = []
for authorizer_name, authorizer in authorizers.items():
# Construct permissions for Lambda Authorizers only
if not authorizer.function_arn:
continue
permission = self._get_permission(authorizer_name, authorizer.function_arn)
permissions.append(permission)
return permissions
def _set_default_authorizer(
self, swagger_editor, authorizers, default_authorizer, add_default_auth_to_preflight=True, api_authorizers=None
):
if not default_authorizer:
return
if not isinstance(default_authorizer, string_types):
raise InvalidResourceException(
self.logical_id,
"DefaultAuthorizer is not a string.",
)
if not authorizers.get(default_authorizer) and default_authorizer != "AWS_IAM":
raise InvalidResourceException(
self.logical_id,
"Unable to set DefaultAuthorizer because '"
+ default_authorizer
+ "' was not defined in 'Authorizers'.",
)
for path in swagger_editor.iter_on_path():
swagger_editor.set_path_default_authorizer(
path,
default_authorizer,
authorizers=authorizers,
add_default_auth_to_preflight=add_default_auth_to_preflight,
api_authorizers=api_authorizers,
)
def _set_default_apikey_required(self, swagger_editor):
for path in swagger_editor.iter_on_path():
swagger_editor.set_path_default_apikey_required(path)
def _set_endpoint_configuration(self, rest_api, value):
"""
Sets endpoint configuration property of AWS::ApiGateway::RestApi resource
:param rest_api: RestApi resource
:param string/dict value: Value to be set
"""
if isinstance(value, dict) and value.get("Type"):
rest_api.Parameters = {"endpointConfigurationTypes": value.get("Type")}
rest_api.EndpointConfiguration = {"Types": [value.get("Type")]}
if "VPCEndpointIds" in value.keys():
rest_api.EndpointConfiguration["VpcEndpointIds"] = value.get("VPCEndpointIds")
else:
rest_api.EndpointConfiguration = {"Types": [value]}
rest_api.Parameters = {"endpointConfigurationTypes": value}
|
the-stack_106_20535
|
# Configuration file for Jupyter Hub
from jinja2 import Template
from oauthenticator.github import GitHubOAuthenticator
from jupyterhub.auth import PAMAuthenticator
from jupyterhub.traitlets import Command
from jupyterhub.apihandlers.base import APIHandler
from jupyterhub.handlers.login import LogoutHandler
from jupyterhub.utils import maybe_future
import asyncio
import yaml
from tornado.escape import url_escape
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest
from jupyterhub.handlers.base import BaseHandler
from traitlets import Unicode
SESSION_COOKIE_NAME = 'jupyterhub-session-id'
c = get_config()
c.JupyterHub.log_level = 10
c.JupyterHub.shutdown_on_logout = True
try:
from http.cookies import Morsel
except ImportError:
from Cookie import Morsel
Morsel._reserved[str('samesite')] = str('SameSite')
class WrappedGitHubAuthenticator(GitHubOAuthenticator):
async def authenticate(self,handler,data):
result = await GitHubOAuthenticator.authenticate(self, handler, data)
result['name'] = 'github_user_' + result['name']
return result
yamlconfig = {}
with open("/application/config/config.yaml", "r") as f:
yamlconfig = yaml.full_load(f)
auth_url = "http://localhost:8080" + "/v1/users/auth"
backendhost = "localhost:8080"
class MyAuthenticator(WrappedGitHubAuthenticator,PAMAuthenticator):
async def add_user(self, user):
"""Hook called whenever a new user is added
If self.create_system_users, the user will attempt to be created if it doesn't exist.
"""
user_exists = await maybe_future(self.system_user_exists(user))
if not user_exists:
if self.create_system_users:
await maybe_future(self.add_system_user(user))
dir = Path('/app/db/notebook_dir/' + user.name)
if not dir.exists():
print('adding user',user.name)
os.makedirs(f'/app/db/notebook_dir/{user.name}/examples',exist_ok=True)
subprocess.check_call(['cp', '-r', '/srv/ipython/examples', '/app/db/notebook_dir/' + user.name + "/examples"])
subprocess.check_call(['chown', '-R', user.name, '/app/db/notebook_dir/' + user.name ])
else:
raise KeyError("User %s does not exist." % user.name)
await maybe_future(super().add_user(user))
async def authenticate(self,handler,data):
username = data['username']
password = data['password']
url = auth_url + "?user_name=" + username + "&token=" + password
print('backendhost',backendhost)
req = HTTPRequest(
url,
method="GET",
headers={"Host": backendhost,
"Pragma":"no-cache",
"Cache-Control":"no-cache",
"Content-Type":"application/json;charset=UTF-8",
"Origin":backendhost,
"Sec-Fetch-Site":"same-site",
"Sec-Fetch-Mode":"cors",
"Sec-Fetch-Dest":"empty",
"Referer":backendhost,
"Accept-Encoding":"gzip,deflate,br",
"Accept-Language":"zh-CN,zh;q=0.9,ja;q=0.8,en;q=0.7,zh-HK;q=0.6"
}
)
resp_data = await self.fetch(req)
print(resp_data)
if resp_data.get("message") and resp_data["message"] == "success":
print(resp_data["data"]["id"])
print('yes!!!')
theUserName = 'user_' + resp_data["data"]["id"][0:10]
print(theUserName)
return theUserName
else:
raise ValueError("Authenticate Failed")
class ExtloginHandler(BaseHandler):
"""Render the login page."""
def _render(self, login_error=None, username=None,token=None):
context = {
"next": url_escape(self.get_argument('next', default='')),
"username": username,
"password":token,
"login_error": login_error,
"login_url": self.settings['login_url'],
"authenticator_login_url": url_concat(
self.authenticator.login_url(self.hub.base_url),
{'next': self.get_argument('next', '')},
),
}
custom_html = Template(
self.authenticator.get_custom_html(self.hub.base_url)
).render(**context)
return self.render_template('external_login.html',
**context,
custom_html=custom_html,
)
async def get(self):
user = self.get_current_user()
username = self.get_argument('username', default='')
token = self.get_argument('token',default='')
print(username,token)
self.finish(await self._render(username=username,token=token))
def _backend_logout_cleanup(self, name):
"""Default backend logout actions
Send a log message, clear some cookies, increment the logout counter.
"""
self.log.info("User logged out: %s", name)
self.clear_login_cookie()
self.statsd.incr('logout')
async def default_handle_logout(self):
"""The default logout action
Optionally cleans up servers, clears cookies, increments logout counter
Cleaning up servers can be prevented by setting shutdown_on_logout to
False.
"""
user = self.current_user
if user:
if self.shutdown_on_logout:
await self._shutdown_servers(user)
self._backend_logout_cleanup(user.name)
async def _shutdown_servers(self, user):
"""Shutdown servers for logout
Get all active servers for the provided user, stop them.
"""
active_servers = [
name
for (name, spawner) in user.spawners.items()
if spawner.active and not spawner.pending
]
if active_servers:
self.log.info("Shutting down %s's servers", user.name)
futures = []
for server_name in active_servers:
futures.append(maybe_future(self.stop_single_user(user, server_name)))
await asyncio.gather(*futures)
c.JupyterHub.extra_handlers = [("/external/login",ExtloginHandler)]
c.JupyterHub.authenticator_class = MyAuthenticator
import os
import sys
import subprocess
from pathlib import Path
join = os.path.join
here = os.path.dirname(__file__)
root = os.environ.get('OAUTHENTICATOR_DIR', here)
sys.path.insert(0, root)
allowed_users = set()
def update_allowed_users():
with open(join(root, 'userlist')) as f:
for line in f:
if not line:
continue
parts = line.split()
name = parts[0]
allowed_users.add(name)
def check_allowed(username):
update_allowed_users()
return username in allowed_users
c.JupyterHub.template_paths = [os.environ['OAUTHENTICATOR_DIR'] + '/templates']
c.JupyterHub.tornado_settings = {
'headers': {
'Access-Control-Allow-Origin':'*',
'Content-Security-Policy': "frame-ancestors * " ,
},
}
c.NotebookApp.tornado_settings = {
'headers': {
'Access-Control-Allow-Origin':'*',
'Content-Security-Policy': "frame-ancestors * ",
}
}
def generateExampleHook(spawner):
username = spawner.user.name
print(f'/app/db/notebook_dir/{username}/examples/en-horizontal-learning-task.ipynb')
os.makedirs(f'/app/db/notebook_dir/{username}/examples',exist_ok=True)
enFile = Path(f'/app/db/notebook_dir/{username}/examples/en-horizontal-learning-task.ipynb')
if not enFile.exists():
subprocess.check_call(['cp', '-f', '/srv/ipython/examples/en-horizontal-learning-task.ipynb', f'/app/db/notebook_dir/{username}/examples/en-horizontal-learning-task.ipynb'])
zhFile = Path(f'/app/db/notebook_dir/{username}/examples/zh-horizontal-learning-task.ipynb')
if not zhFile.exists():
subprocess.check_call(['cp', '-f', '/srv/ipython/examples/zh-horizontal-learning-task.ipynb', f'/app/db/notebook_dir/{username}/examples/zh-horizontal-learning-task.ipynb'])
c.Spawner.pre_spawn_hook = generateExampleHook
c.Spawner.http_timeout = 100
c.Spawner.notebook_dir = '/app/db/notebook_dir/{username}'
c.Spawner.args = ['''--ServerApp.tornado_settings={
'headers':{
'Access-Control-Allow-Origin':'*',
'Content-Security-Policy': "frame-ancestors 'self' * ",
'cookie_options': {'samesite':'None','Secure':True},
}
} --ServerApp.allow_remote_access=True''']
c.Spawner.args = ['''--config=/application/jupyter_jupyterlab_server_config.py''']
c.Spawner.cmd = ["jupyter-labhub"]
c.MyAuthenticator.create_system_users = True
# ssl config
ssl = "/application/ssl"
keyfile = join(ssl, 'jupyter.key')
certfile = join(ssl, 'jupyter.crt')
print('keyfile',keyfile)
print('certfile',certfile)
# if os.path.exists(keyfile):
# c.JupyterHub.ssl_key = keyfile
# if os.path.exists(certfile):
# c.JupyterHub.ssl_cert = certfile
|
the-stack_106_20536
|
import sys, os
sys.path.append(os.path.dirname(__file__)+'/../../../')
import FormulaSolidityPort
import FormulaNativePython
MIN = 0
MAX = 2 ** 256 - 1
PPM_RESOLUTION = 1000000
def add(a, b):
assert a + b <= MAX, 'error {} + {}'.format(a, b)
return a + b
def sub(a, b):
assert a - b >= MIN, 'error {} - {}'.format(a, b)
return a - b
def mul(a, b):
assert a * b <= MAX, 'error {} * {}'.format(a, b)
return a * b
def div(a, b):
assert b != 0, 'error {} / {}'.format(a, b)
return a // b
def ratio(x, n, d):
return x if n == d else div(mul(x, n), d)
class Token():
def __init__(self, symbol):
self.symbol = symbol
self.totalSupply = 0
self.balanceOf = {}
def register(self, user):
self.balanceOf[user] = 0
def mint(self, user, amount):
self.totalSupply = add(self.totalSupply, amount)
self.balanceOf[user] = add(self.balanceOf[user], amount)
def burn(self, user, amount):
self.totalSupply = sub(self.totalSupply, amount)
self.balanceOf[user] = sub(self.balanceOf[user], amount)
def transfer(self, source, target, amount):
self.balanceOf[source] = sub(self.balanceOf[source], amount)
self.balanceOf[target] = add(self.balanceOf[target], amount)
def serialize(self):
return {
'totalSupply': self.totalSupply,
'balanceOf': self.balanceOf,
}
class Branch():
def __init__(self, token):
self.reserveRate = 0
self.reserveWeight = 0
self.reserveStaked = 0
self.reserveToken = token
self.smartToken = Token('smart' + token.symbol)
def addLiquidity(self, pool, user, amount):
reserveAmount = amount if amount != 'all' else self.reserveToken.balanceOf[user]
supplyAmount = ratio(reserveAmount, self.smartToken.totalSupply, self.reserveStaked)
self.reserveToken.transfer(user, pool, reserveAmount)
self.smartToken.mint(user, supplyAmount)
self.reserveStaked = add(self.reserveStaked, reserveAmount)
def remLiquidity(self, pool, user, amount, lo, hi):
supplyAmount = amount if amount != 'all' else self.smartToken.balanceOf[user]
reserveAmount = ratio(ratio(supplyAmount, self.reserveStaked, self.smartToken.totalSupply), lo, hi)
self.smartToken.burn(user, supplyAmount)
self.reserveToken.transfer(pool, user, reserveAmount)
self.reserveStaked = sub(self.reserveStaked, reserveAmount)
def virtualStaked(self, amp):
return mul(self.reserveStaked, amp)
def virtualBalance(self, amp, id):
return add(mul(self.reserveStaked, sub(amp, 1)), self.reserveToken.balanceOf[id])
def serialize(self):
return {
'reserveRate': self.reserveRate,
'reserveWeight': self.reserveWeight,
'reserveStaked': self.reserveStaked,
'reserveToken': self.reserveToken.serialize(),
'smartToken': self.smartToken.serialize(),
}
class Pool():
def __init__(self, id, amp, fee, factor, mainToken, sideToken):
self.id = id
self.amp = amp
self.fee = fee
self.factor = factor
self.mainSymbol = mainToken.symbol
self.sideSymbol = sideToken.symbol
self.branches = {token.symbol: Branch(token) for token in [mainToken, sideToken]}
def setRates(self, mainRate, sideRate):
self.branches[self.mainSymbol].reserveRate = mainRate
self.branches[self.sideSymbol].reserveRate = sideRate
def addLiquidity(self, symbol, user, amount):
self.branches[symbol].addLiquidity(self.id, user, amount)
self._updateWeights()
def remLiquidity(self, symbol, user, amount):
x = self.branches[self.mainSymbol].virtualStaked(self.amp)
y = self.branches[self.mainSymbol].virtualBalance(self.amp, self.id)
self.branches[symbol].remLiquidity(self.id, user, amount, *sorted([x, y]))
self._updateWeights()
def convert(self, updateWeights, sourceSymbol, targetSymbol, user, amount):
if updateWeights: self._updateWeights()
sourceBranch = self.branches[sourceSymbol]
targetBranch = self.branches[targetSymbol]
targetAmount = FormulaSolidityPort.crossReserveTargetAmount(
sourceBranch.virtualBalance(self.amp, self.id),
sourceBranch.reserveWeight,
targetBranch.virtualBalance(self.amp, self.id),
targetBranch.reserveWeight,
amount
)
sFee = self.fee
dFee = add(sFee, self._adjustedFee())
sAmount = div(mul(targetAmount, sFee), PPM_RESOLUTION)
dAmount = div(mul(targetAmount, dFee), PPM_RESOLUTION)
sourceBranch.reserveToken.transfer(user, self.id, amount)
targetBranch.reserveToken.transfer(self.id, user, sub(targetAmount, dAmount))
targetBranch.reserveStaked = add(targetBranch.reserveStaked, sAmount)
def closeArbitrage(self, user):
self._updateWeights()
mainBranch = self.branches[self.mainSymbol]
sideBranch = self.branches[self.sideSymbol]
amount = mainBranch.reserveStaked - mainBranch.reserveToken.balanceOf[self.id]
if amount > 0:
self.convert(False, self.mainSymbol, self.sideSymbol, user, amount)
if amount < 0:
self.convert(False, self.sideSymbol, self.mainSymbol, user, int(-FormulaNativePython.crossReserveTargetAmount(
mainBranch.virtualBalance(self.amp, self.id),
mainBranch.reserveWeight,
sideBranch.virtualBalance(self.amp, self.id),
sideBranch.reserveWeight,
amount
)))
def _updateWeights(self):
mainBranch = self.branches[self.mainSymbol]
sideBranch = self.branches[self.sideSymbol]
if mainBranch.reserveStaked > 0 or sideBranch.reserveStaked > 0:
mainWeight, sideWeight = FormulaSolidityPort.balancedWeights(
mainBranch.virtualStaked(self.amp),
mainBranch.virtualBalance(self.amp, self.id),
sideBranch.virtualBalance(self.amp, self.id),
mainBranch.reserveRate,
sideBranch.reserveRate
)
mainBranch.reserveWeight = mainWeight
sideBranch.reserveWeight = sideWeight
def _adjustedFee(self):
mainBranch = self.branches[self.mainSymbol]
sideBranch = self.branches[self.sideSymbol]
x = mul(mul(mainBranch.reserveStaked, mainBranch.reserveRate), sideBranch.reserveWeight)
y = mul(mul(sideBranch.reserveStaked, sideBranch.reserveRate), mainBranch.reserveWeight)
return div(mul(mul(sub(y, x), self.amp), self.factor), mul(y, PPM_RESOLUTION)) if y > 0 else 0
def serialize(self):
return {
'amp': self.amp,
'fee': self.fee,
'factor': self.factor,
self.mainSymbol: self.branches[self.mainSymbol].serialize(),
self.sideSymbol: self.branches[self.sideSymbol].serialize(),
}
def newPool(amp, fee, factor, mainSymbol, sideSymbol, numOfUsers, initialAmount):
pool = Pool('pool', amp, fee, factor, Token(mainSymbol), Token(sideSymbol))
for symbol in [mainSymbol, sideSymbol]:
pool.branches[symbol].reserveToken.register(pool.id)
for i in range(numOfUsers):
userId = 'user{}'.format(i + 1)
pool.branches[symbol].smartToken.register(userId)
pool.branches[symbol].reserveToken.register(userId)
pool.branches[symbol].reserveToken.mint(userId, initialAmount)
return pool
|
the-stack_106_20538
|
"""
===============
Radon transform
===============
In computed tomography, the tomography reconstruction problem is to obtain
a tomographic slice image from a set of projections [1]_. A projection is formed
by drawing a set of parallel rays through the 2D object of interest, assigning
the integral of the object's contrast along each ray to a single pixel in the
projection. A single projection of a 2D object is one dimensional. To
enable computed tomography reconstruction of the object, several projections
must be acquired, each of them corresponding to a different angle between the
rays with respect to the object. A collection of projections at several angles
is called a sinogram, which is a linear transform of the original image.
The inverse Radon transform is used in computed tomography to reconstruct
a 2D image from the measured projections (the sinogram). A practical, exact
implementation of the inverse Radon transform does not exist, but there are
several good approximate algorithms available.
As the inverse Radon transform reconstructs the object from a set of
projections, the (forward) Radon transform can be used to simulate a
tomography experiment.
This script performs the Radon transform to simulate a tomography experiment
and reconstructs the input image based on the resulting sinogram formed by
the simulation. Two methods for performing the inverse Radon transform
and reconstructing the original image are compared: The Filtered Back
Projection (FBP) and the Simultaneous Algebraic Reconstruction
Technique (SART).
.. seealso::
- AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
http://www.slaney.org/pct/pct-toc.html
- http://en.wikipedia.org/wiki/Radon_transform
The forward transform
=====================
As our original image, we will use the Shepp-Logan phantom. When calculating
the Radon transform, we need to decide how many projection angles we wish
to use. As a rule of thumb, the number of projections should be about the
same as the number of pixels there are across the object (to see why this
is so, consider how many unknown pixel values must be determined in the
reconstruction process and compare this to the number of measurements
provided by the projections), and we follow that rule here. Below is the
original image and its Radon transform, often known as its _sinogram_:
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage import data_dir
from skimage.transform import radon, rescale
image = imread(data_dir + "/phantom.png", as_grey=True)
image = rescale(image, scale=0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(image.shape), endpoint=True)
sinogram = radon(image, theta=theta, circle=True)
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.subplots_adjust(hspace=0.4, wspace=0.5)
plt.show()
"""
.. image:: PLOT2RST.current_figure
Reconstruction with the Filtered Back Projection (FBP)
======================================================
The mathematical foundation of the filtered back projection is the Fourier
slice theorem [2]_. It uses Fourier transform of the projection and
interpolation in Fourier space to obtain the 2D Fourier transform of the image,
which is then inverted to form the reconstructed image. The filtered back
projection is among the fastest methods of performing the inverse Radon
transform. The only tunable parameter for the FBP is the filter, which is
applied to the Fourier transformed projections. It may be used to suppress
high frequency noise in the reconstruction. ``skimage`` provides a few
different options for the filter.
"""
from skimage.transform import iradon
reconstruction_fbp = iradon(sinogram, theta=theta, circle=True)
error = reconstruction_fbp - image
print('FBP rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2)))
imkwargs = dict(vmin=-0.2, vmax=0.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Reconstruction\nFiltered back projection")
ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nFiltered back projection")
ax2.imshow(reconstruction_fbp - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
"""
.. image:: PLOT2RST.current_figure
Reconstruction with the Simultaneous Algebraic Reconstruction Technique
=======================================================================
Algebraic reconstruction techniques for tomography are based on a
straightforward idea: for a pixelated image the value of a single ray in a
particular projection is simply a sum of all the pixels the ray passes through
on its way through the object. This is a way of expressing the forward Radon
transform. The inverse Radon transform can then be formulated as a (large) set
of linear equations. As each ray passes through a small fraction of the pixels
in the image, this set of equations is sparse, allowing iterative solvers for
sparse linear systems to tackle the system of equations. One iterative method
has been particularly popular, namely Kaczmarz' method [3]_, which has the
property that the solution will approach a least-squares solution of the
equation set.
The combination of the formulation of the reconstruction problem as a set
of linear equations and an iterative solver makes algebraic techniques
relatively flexible, hence some forms of prior knowledge can be incorporated
with relative ease.
``skimage`` provides one of the more popular variations of the algebraic
reconstruction techniques: the Simultaneous Algebraic Reconstruction Technique
(SART) [1]_ [4]_. It uses Kaczmarz' method [3]_ as the iterative solver. A good
reconstruction is normally obtained in a single iteration, making the method
computationally effective. Running one or more extra iterations will normally
improve the reconstruction of sharp, high frequency features and reduce the
mean squared error at the expense of increased high frequency noise (the user
will need to decide on what number of iterations is best suited to the problem
at hand. The implementation in ``skimage`` allows prior information of the
form of a lower and upper threshold on the reconstructed values to be supplied
to the reconstruction.
"""
from skimage.transform import iradon_sart
reconstruction_sart = iradon_sart(sinogram, theta=theta)
error = reconstruction_sart - image
print('SART (1 iteration) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
fig, ax = plt.subplots(2, 2, figsize=(8, 8.5))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.set_title("Reconstruction\nSART")
ax1.imshow(reconstruction_sart, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nSART")
ax2.imshow(reconstruction_sart - image, cmap=plt.cm.Greys_r, **imkwargs)
# Run a second iteration of SART by supplying the reconstruction
# from the first iteration as an initial estimate
reconstruction_sart2 = iradon_sart(sinogram, theta=theta,
image=reconstruction_sart)
error = reconstruction_sart2 - image
print('SART (2 iterations) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
ax3.set_title("Reconstruction\nSART, 2 iterations")
ax3.imshow(reconstruction_sart2, cmap=plt.cm.Greys_r)
ax4.set_title("Reconstruction error\nSART, 2 iterations")
ax4.imshow(reconstruction_sart2 - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
"""
.. image:: PLOT2RST.current_figure
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
IEEE Press 1988. http://www.slaney.org/pct/pct-toc.html
.. [2] Wikipedia, Radon transform,
http://en.wikipedia.org/wiki/Radon_transform#Relationship_with_the_Fourier_transform
.. [3] S Kaczmarz, "Angenaeherte Aufloesung von Systemen linearer
Gleichungen", Bulletin International de l'Academie Polonaise des
Sciences et des Lettres 35 pp 355--357 (1937)
.. [4] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction technique
(SART): a superior implementation of the ART algorithm", Ultrasonic
Imaging 6 pp 81--94 (1984)
"""
|
the-stack_106_20539
|
import pygame
import sys
SCREENWIDTH = 800
SCREENHEIGHT = 600
max_iteration = 255
pygame.init()
screen = pygame.display.set_mode(
(SCREENWIDTH, SCREENHEIGHT), pygame.DOUBLEBUF | pygame.HWSURFACE)
pygame.display.set_caption("Mandelbrot Fractal")
fractal = screen.copy()
pygame.mixer.init()
fractal.fill((0, 0, 0))
for i in range(SCREENWIDTH):
print(i)
for j in range(SCREENHEIGHT):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sys.exit()
x0 = (float(i)/SCREENWIDTH)*3.5 - 2.5
y0 = (float(j)/SCREENHEIGHT)*2 - 1
x = 0
y = 0
iteration = 0
while x*x + y*y < 2*2 and iteration < max_iteration:
xtemp = x*x - y*y + x0
y = 2*x*y + y0
x = xtemp
iteration = iteration + 1
fractal.set_at((i, j), (iteration, iteration, iteration))
screen.blit(fractal, (0, 0))
pygame.display.flip()
running = True
while running:
# Events
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sys.exit()
|
the-stack_106_20540
|
"""Module for creating and managing random sequences or keys."""
import random
from shufflealgos import List
class Key:
"""Class for managing random sequences of integers."""
def __init__(
self, globalminval: int = 1, globalmaxval: int = 200,
length: int = 100, values: List[int] = None) -> None:
"""Build Key object in different ways depending on parameters.
Parameters
----------
globalminval : int, optional
The absolute minimum value of the key (not the current
minimum value in key), defaults to 1
globalmaxval : int, optional
The absolute maximum value of the key (not the current
maximum value in key), defaults to 200 as per one of the
research papers
length : int, optional
The length of the key to be generated if `values` is not
specified. Defaults to 100 as per one of the research
papers
values : List[int], optional
A user-defined list of integers that will be treated as the
user key.
"""
self.__values = values
self.__globalminval: int = None
self.__globalmaxval: int = None
if values is None:
self.__globalminval = globalminval
self.__globalmaxval = globalmaxval
self.__values = self.get_random_key(
globalminval, globalmaxval, length)
else:
self.__values = values.copy()
if self.__globalminval is None and self.__globalmaxval is None:
self.__globalminval = min(self.__values)
self.__globalmaxval = max(self.__values)
else:
self.__globalminval = globalminval
self.__globalmaxval = globalmaxval
def get_extended_key(self, new_size: int):
"""Return a new key whose size is `new_size`.
Return a new key whose size is `new_size`, based on the
the terms of the previous key, and whose new terms are defined
as shifted values of the canonical terms.
"""
new_values: List[int] = self.__values.copy()
next_idx: int = self.length
range_magnitude: int = self.__globalmaxval - self.__globalminval + 1
while next_idx < new_size:
new_term: int = (self.__values[next_idx % self.length]
+ next_idx // self.length)
if not self.__globalminval <= new_term <= self.__globalmaxval:
new_term = (
((new_term - self.__globalminval) % (range_magnitude))
+ self.__globalminval
)
new_values.append(new_term)
next_idx += 1
return Key(self.__globalminval, self.__globalmaxval, values=new_values)
def shift_to_range(self, global_range_min: int, global_range_max: int):
"""Shift the values of the key to the specified range."""
range_magnitude: int = global_range_max - global_range_min + 1
new_terms: List[int] = list()
for term in self.__values:
new_term = term
if not global_range_min <= term <= global_range_max:
if term < global_range_min:
new_term = (term % range_magnitude) + global_range_min
elif term > global_range_max:
new_term = (((term - global_range_min) % range_magnitude)
+ global_range_min)
new_terms.append(new_term)
return Key(global_range_min, global_range_max, values=new_terms)
@property
def values(self) -> List[int]:
"""Get the key's values."""
return self.__values
@property
def globalminval(self) -> int:
"""Get absolute minimum value of key."""
return self.__globalminval
@property
def globalmaxval(self) -> int:
"""Get absolute maximum value of key."""
return self.__globalmaxval
@property
def localminval(self) -> int:
"""Get the relative minimum value of key."""
return min(self.__values)
@property
def localmaxval(self) -> int:
"""Get the relative maximum value of key."""
return max(self.__values)
@property
def length(self) -> int:
"""Return length of key."""
return len(self.__values)
@staticmethod
def get_random_key(minval: int, maxval: int, length: int) -> List[int]:
"""Generate a random key.
Parameters
----------
minval: int
The absolute minimum value of the key
maxval: int
The absolute maximum value of the key
length:
The desired legnth of the key
"""
return random.choices(range(minval, maxval + 1), k=length)
def __str__(self) -> str:
"""Return string representation of key."""
return f"Key({self.__values})"
def __repr__(self) -> str:
"""Return object representation of key."""
return repr(str(self))
|
the-stack_106_20541
|
# 2021 - Douglas Diniz - www.manualdocodigo.com.br
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from hexdata import HexData
from selections import Selections
class HexEditor_p(QtWidgets.QWidget):
def __init__(self, parent):
super(HexEditor_p, self).__init__(parent)
self._scroll = parent
self.BYTES_PER_LINE = 16
self.NUMBER_OF_LINES = 15
self.FONT_SIZE = 12
self._cursorTimer = QtCore.QTimer()
self.data = HexData()
self.data.setData(bytearray(os.urandom(1024 * 20)))
self.setFont(QtGui.QFont("Courier", self.FONT_SIZE))
self.setFocusPolicy(Qt.StrongFocus)
self.penStandard = QtGui.QPen(self.palette().color(QtGui.QPalette.WindowText))
self._charWidth = self.fontMetrics().width("9")
self._charHeight = self.fontMetrics().height()
self.addr_xpos = 0
self.addr_width = self.numHexChars(len(self.data)) * self._charWidth + self._charWidth
self.hex_xpos = self.addr_width
self.hex_width = (self.BYTES_PER_LINE * 3 + 1) * self._charWidth
self.ascii_xpos = self.addr_width + self.hex_width
self.ascii_width = (self.BYTES_PER_LINE + 2) * self._charWidth
self.widget_width = self.ascii_xpos + self.ascii_width
self.firstIndexToPaint = 0
self.lastIndexToPaint = 0
self._cursorIndexInData = 0
self._cursorHexPosition = 0
self._cursorXPositionInCanvas = 0
self._cursorYPositionInCanvas = 0
self._cursorBlink = False
# For the selection we have the place we clicked and the start and end of the selection
# We can drag up or down related to the clicked position, so we need to save the
# clicked position.
self.currentSelection = {"click": 0, "start": 0, "end": 0}
self.selections = Selections()
self.adjustEditorToData()
self.setCursorPosition(0)
self._cursorTimer.timeout.connect(self.updateCursor)
self._cursorTimer.setInterval(500)
self._cursorTimer.start()
parent.setFixedSize(
self.ascii_xpos
+ self.ascii_width
+ self.style().pixelMetric(QtWidgets.QStyle.PM_ScrollBarExtent),
self._charHeight * self.NUMBER_OF_LINES + self._charHeight // 2,
)
def setData(self, data):
if isinstance(data, (bytearray, bytes, QtCore.QByteArray)):
self.data.setData(data)
self.adjustEditorToData()
self.setCursorPosition(0)
self.update()
else:
print("The Data should be a bytearray or bytes")
def getData(self):
return self.data.getData()
def setNumberOfBytesPerLine(self, num):
self.BYTES_PER_LINE = num
def setNumberOfLines(self, num):
self.NUMBER_OF_LINES = num
def setFontSize(self, size):
if size < 8:
self.FONT_SIZE = 8
elif size > 72:
self.FONT_SIZE = 72
else:
self.FONT_SIZE = size
def adjustEditorToData(self):
self.setMinimumHeight(
(((len(self.data) - 1) // self.BYTES_PER_LINE) * self._charHeight)
+ self._charHeight
+ self._charHeight // 2
)
def updateCursor(self):
self._cursorBlink = not self._cursorBlink
self.update(
self._cursorXPositionInCanvas,
self._cursorYPositionInCanvas,
self._charWidth,
self._charHeight,
)
def clickedInAddressArea(self, point):
if point.x() > self.addr_xpos and point.x() < self.addr_xpos + self.addr_width:
return True
return False
def clickedInHexArea(self, point):
if point.x() > self.hex_xpos and point.x() < self.hex_xpos + self.hex_width:
return True
return False
def clickedInAsciiArea(self, point):
if point.x() > self.ascii_xpos and point.x() < self.ascii_xpos + self.ascii_width:
return True
return False
def setCursorPosition(self, address):
self.setCursorVariables(address * 2)
self.currentSelection["click"] = self._cursorIndexInData
self.currentSelection["start"] = self._cursorIndexInData
self.currentSelection["end"] = self._cursorIndexInData
self.update()
self.ensureCursorVisible()
def ensureCursorVisible(self):
self._scroll.ensureVisible(
self._cursorXPositionInCanvas,
self._cursorYPositionInCanvas,
50,
self.NUMBER_OF_LINES * self._charHeight // 2,
)
def mousePressEvent(self, e):
"""The mouse click event starts a new selection and update the cursor variables"""
self.update()
if self.clickedInHexArea(e.pos()):
self.setCursorVariables(self.mapPointToHexIndex(e.pos()))
self.currentSelection["click"] = self._cursorIndexInData
self.currentSelection["start"] = self._cursorIndexInData
self.currentSelection["end"] = self._cursorIndexInData
elif self.clickedInAddressArea(e.pos()):
lineStartAddr = self.mapPointToLineStartPos(e.pos())
if lineStartAddr >= len(self.data):
lineStartAddr = len(self.data) - len(self.data) % self.BYTES_PER_LINE - 1
self.setCursorVariables(lineStartAddr * 2)
self.currentSelection["click"] = lineStartAddr
self.currentSelection["start"] = lineStartAddr
lineEndAddr = lineStartAddr + self.BYTES_PER_LINE - 1
if lineEndAddr >= len(self.data):
lineEndAddr = len(self.data) - 1
self.currentSelection["end"] = lineEndAddr
elif self.clickedInAsciiArea(e.pos()):
self.setCursorVariables(self.mapPointToDataIndex(e.pos()) * 2)
self.currentSelection["click"] = self._cursorIndexInData
self.currentSelection["start"] = self._cursorIndexInData
self.currentSelection["end"] = self._cursorIndexInData
def mouseMoveEvent(self, e):
"""This method is called when we drag the mouse over the widget canvas.
This way the user can select a block of bytes.
So we use the mouse location the calculate the start and end points of the selection."""
self.update()
if self.mapPointToDataIndex(e.pos()) >= 0:
cursorPos = self.mapPointToDataIndex(e.pos())
if cursorPos >= self.currentSelection["click"]:
self.currentSelection["start"] = self.currentSelection["click"]
self.currentSelection["end"] = cursorPos
else:
self.currentSelection["start"] = cursorPos
self.currentSelection["end"] = self.currentSelection["click"]
self.setCursorVariables(self.currentSelection["start"] * 2)
elif self.mapPointToLineStartPos(e.pos()) >= 0:
lineAddrSelected = self.mapPointToLineStartPos(e.pos())
if lineAddrSelected >= self.currentSelection["click"]:
self.currentSelection["start"] = self.currentSelection["click"]
lineEndAddr = lineAddrSelected + self.BYTES_PER_LINE - 1
if lineEndAddr >= len(self.data):
lineEndAddr = len(self.data) - 1
self.currentSelection["end"] = lineEndAddr
else:
self.currentSelection["start"] = lineAddrSelected
self.currentSelection["end"] = self.currentSelection["click"]
self.setCursorVariables(self.currentSelection["start"] * 2)
def setCursorVariables(self, hexIndex):
self._cursorIndexInData = hexIndex // 2
if self._cursorIndexInData >= len(self.data):
self._cursorIndexInData = len(self.data) - 1
self._cursorHexPosition = hexIndex
if self._cursorHexPosition >= len(self.data) * 2:
self._cursorHexPosition = len(self.data) * 2 - 2
self._cursorYPositionInCanvas = (
(self._cursorHexPosition // (2 * self.BYTES_PER_LINE)) * self._charHeight
+ self._charHeight
+ 2
)
x = self._cursorHexPosition % (2 * self.BYTES_PER_LINE)
self._cursorXPositionInCanvas = (
(((x // 2) * 3) + (x % 2)) * self._charWidth + self.hex_xpos + self._charWidth
)
def mapPointToHexIndex(self, point):
if (
point.x() > self.hex_xpos
and point.x() < self.hex_xpos + self.hex_width - self._charWidth
):
x = (point.x() - self.hex_xpos) // self._charWidth
if x % 3 == 2:
x = (x // 3) * 2 + 1
else:
x = (x // 3) * 2
y = (point.y() // self._charHeight) * self.BYTES_PER_LINE * 2
else:
return -1
return x + y
def resetCurrentSelection(self, pos):
"""Reset the current selection, point all the variabels to a single position"""
if pos < 0:
pos = 0
self.currentSelection["click"] = pos
self.currentSelection["start"] = pos
self.currentSelection["end"] = pos
def mapPointToDataIndex(self, point):
if (
point.x() > self.hex_xpos
and point.x() < self.hex_xpos + self.hex_width - self._charWidth
):
x = ((point.x() - self.hex_xpos) // self._charWidth) // 3
y = (point.y() // self._charHeight) * self.BYTES_PER_LINE
elif (
point.x() > self.ascii_xpos
and point.x() < self.ascii_xpos + self.ascii_width - self._charWidth
):
x = ((point.x() - self.ascii_xpos) // self._charWidth) - 1
y = (point.y() // self._charHeight) * self.BYTES_PER_LINE
else:
return -1
dataIndex = x + y
if dataIndex >= len(self.data):
dataIndex = len(self.data) - 1
return dataIndex
def mapPointToLineStartPos(self, point):
if point.x() > self.addr_xpos and point.x() < self.hex_xpos:
x = (point.x() - self.hex_xpos) // self._charWidth
y = (point.y() // self._charHeight) * self.BYTES_PER_LINE
else:
return -1
return y
def keyPressEvent(self, e):
key = e.text()
if (key >= "0" and key <= "9") or (key >= "a" and key <= "f"):
if len(self.data) > 0:
# If there is a block selection active, we need to start the changes
# from the beginning of the block.
if self.currentSelection["start"] != self.currentSelection["end"]:
selectionSize = (
self.currentSelection["end"] - self.currentSelection["start"] + 1
)
self.selections.add(
self.currentSelection["start"], self.currentSelection["end"]
)
self.setCursorVariables(self.currentSelection["start"] * 2)
self.data.replaceWithValue(self.currentSelection["start"], selectionSize, 0x0)
self.resetCurrentSelection(self.currentSelection["start"])
else:
self.selections.add(self._cursorIndexInData, self._cursorIndexInData)
byte = self.data[self._cursorIndexInData]
# print(f"{byte:02x}")
if self._cursorHexPosition % 2 == 1:
byte = (byte & 0xF0) | (int(key, 16) & 0xF)
else:
byte = (byte & 0xF) | ((int(key, 16) & 0xF) << 4)
# print(f"{byte:02x}")
self.replaceByte(self._cursorIndexInData, byte)
self.setCursorVariables(self._cursorHexPosition + 1)
if e.matches(QtGui.QKeySequence.Delete):
self.selections.add(self.currentSelection["start"], self.currentSelection["end"])
if self.currentSelection["start"] != self.currentSelection["end"]:
selectionSize = self.currentSelection["end"] - self.currentSelection["start"] + 1
self.data.remove(self.currentSelection["start"], selectionSize)
else:
self.data.remove(self.currentSelection["start"], 1)
self.update()
def replaceByte(self, index, byte):
self.data[index] = byte
def insert(self, pos, data):
self.data.insert(pos, data)
def paintEvent(self, e):
painter = QtGui.QPainter(self)
painter.setPen(Qt.gray)
painter.drawLine(self.ascii_xpos, e.rect().top(), self.ascii_xpos, self.height())
painter.drawLine(self.hex_xpos, e.rect().top(), self.hex_xpos, self.height())
painter.setPen(self.penStandard)
self.firstIndexToPaint = (
(e.rect().top() // self._charHeight) - self._charHeight
) * self.BYTES_PER_LINE
self.lastIndexToPaint = (
(e.rect().bottom() // self._charHeight) + self._charHeight
) * self.BYTES_PER_LINE
if self.firstIndexToPaint < 0:
self.firstIndexToPaint = 0
if self.lastIndexToPaint > len(self.data):
self.lastIndexToPaint = len(self.data)
# Address part
self.paintAddressArea(painter, e)
# Hex part
self.paintHexArea(painter, e)
# Latin1 part
self.paintLatin1Area(painter, e)
# Paint Cursor Line
self.paintCursor(painter, e)
def paintAddressArea(self, painter, e):
painter.fillRect(
QtCore.QRect(0, e.rect().top(), self.addr_width, self.height()),
QtGui.QColor(0xD4, 0xD4, 0xD4, 0xFF),
)
ypos = (
(self.firstIndexToPaint) / self.BYTES_PER_LINE
) * self._charHeight + self._charHeight
xpos = self._charWidth / 2
lineNum = self.firstIndexToPaint
while lineNum < self.lastIndexToPaint:
address = lineNum
if (
(
self.currentSelection["start"] >= address
and self.currentSelection["start"] < address + self.BYTES_PER_LINE
)
or (
self.currentSelection["end"] >= address
and self.currentSelection["end"] < address + self.BYTES_PER_LINE
)
or (
address >= self.currentSelection["start"]
and address < self.currentSelection["end"]
)
):
painter.setBackground(QtGui.QBrush(QtGui.QColor(0xFF, 0x00, 0x00, 0x80)))
painter.setBackgroundMode(Qt.OpaqueMode)
else:
painter.setBackgroundMode(Qt.TransparentMode)
painter.drawText(xpos, ypos, f"{address:0{self.numHexChars(len(self.data))}x}")
ypos += self._charHeight
lineNum += self.BYTES_PER_LINE
def paintHexArea(self, painter, e):
painter.fillRect(
QtCore.QRect(self.hex_xpos, e.rect().top(), self.hex_width, self.height()),
self.palette().color(QtGui.QPalette.Base),
)
ypos = (
(self.firstIndexToPaint) / self.BYTES_PER_LINE
) * self._charHeight + self._charHeight
lineNum = self.firstIndexToPaint
if self.currentSelection["start"] != self.currentSelection["end"]:
polygons = self.generateSelectionPolygonPoints()
painter.setBrush(QtGui.QBrush(QtGui.QColor(0, 0xFF, 0, 0x80)))
for polygon in polygons:
polygonQt = QtGui.QPolygonF()
for point in polygon:
polygonQt.append(QtCore.QPointF(point[0], point[1]))
painter.drawPolygon(polygonQt)
while lineNum < self.lastIndexToPaint:
xpos = self.hex_xpos
for i in range(lineNum, lineNum + self.BYTES_PER_LINE):
if i >= len(self.data):
break
hex = self.data[i]
if self.isInCursorLine(i, self._cursorIndexInData):
painter.fillRect(
QtCore.QRect(
xpos, ypos - self._charHeight + 4, self._charWidth * 3, self._charHeight
),
QtGui.QColor(0x6D, 0x9E, 0xFF, 0x20),
)
# Painting the current selection with a different color
if (
i >= self.currentSelection["start"]
and i <= self.currentSelection["end"]
and self.currentSelection["start"] != self.currentSelection["end"]
):
# painter.setBackground(QtGui.QBrush(QtGui.QColor(0x00, 0xff, 0x00, 0x30)))
# painter.setBackgroundMode(Qt.OpaqueMode)
painter.setBackgroundMode(Qt.TransparentMode)
elif self.selections.isSelected(i):
painter.setBackground(QtGui.QBrush(QtGui.QColor(0xFF, 0x00, 0x00, 0x30)))
painter.setBackgroundMode(Qt.OpaqueMode)
else:
painter.setBackgroundMode(Qt.TransparentMode)
painter.drawText(xpos, ypos, " ")
xpos += self._charWidth
if i == self._cursorIndexInData:
painter.setBackground(QtGui.QBrush(QtGui.QColor(0x6D, 0x9E, 0xFF, 0xFF)))
painter.setBackgroundMode(Qt.OpaqueMode)
painter.drawText(xpos, ypos, f"{hex:02x}")
xpos += self._charWidth * 2
ypos += self._charHeight
lineNum += self.BYTES_PER_LINE
def generateSelectionPolygonPoints(self):
points = []
startLine = self.currentSelection["start"] // self.BYTES_PER_LINE
endLine = self.currentSelection["end"] // self.BYTES_PER_LINE
posStartLine = self.currentSelection["start"] % self.BYTES_PER_LINE
posEndLine = self.currentSelection["end"] % self.BYTES_PER_LINE
start = self.dataPosToCanvasPoint(self.currentSelection["start"])
end = self.dataPosToCanvasPoint(self.currentSelection["end"])
if startLine == endLine:
polygon = []
polygon.append([start[0], start[1]])
polygon.append([end[0] + self._charWidth * 3, start[1]])
polygon.append([end[0] + self._charWidth * 3, end[1] + self._charHeight])
polygon.append([start[0], end[1] + self._charHeight])
points.append(polygon)
elif endLine - startLine == 1 and posStartLine > posEndLine:
polygon1 = []
polygon1.append([start[0], start[1]])
polygon1.append([self.ascii_xpos - self._charWidth // 2, start[1]])
polygon1.append([self.ascii_xpos - self._charWidth // 2, start[1] + self._charHeight])
polygon1.append([start[0], start[1] + self._charHeight])
points.append(polygon1)
polygon2 = []
polygon2.append([self.hex_xpos + self._charWidth // 2, end[1]])
polygon2.append([end[0] + self._charWidth * 3, end[1]])
polygon2.append([end[0] + self._charWidth * 3, end[1] + self._charHeight])
polygon2.append([self.hex_xpos + self._charWidth // 2, end[1] + self._charHeight])
points.append(polygon2)
else:
polygon = []
polygon.append([start[0], start[1]])
polygon.append([self.ascii_xpos - self._charWidth // 2, start[1]])
polygon.append([self.ascii_xpos - self._charWidth // 2, end[1]])
polygon.append([end[0] + self._charWidth * 3, end[1]])
polygon.append([end[0] + self._charWidth * 3, end[1] + self._charHeight])
polygon.append([self.hex_xpos + self._charWidth // 2, end[1] + self._charHeight])
polygon.append([self.hex_xpos + self._charWidth // 2, start[1] + self._charHeight])
polygon.append([start[0], start[1] + self._charHeight])
points.append(polygon)
return points
def dataPosToCanvasPoint(self, pos):
x = (pos % self.BYTES_PER_LINE) * self._charWidth * 3 + self.hex_xpos
y = (pos // self.BYTES_PER_LINE) * self._charHeight
return [x + self._charWidth // 2, y + 3]
def dataPosToCanvasEnvelop(self, pos):
x = self.hex_xpos
y = (pos // self.BYTES_PER_LINE) * self._charHeight
return [x + self._charWidth // 2, y + 3]
def paintLatin1Area(self, painter, e):
painter.setBackgroundMode(Qt.TransparentMode)
painter.fillRect(
QtCore.QRect(self.ascii_xpos, e.rect().top(), self.ascii_width, self.height()),
QtGui.QColor(0xFF, 0xFB, 0xD0, 0xFF),
)
ypos = (
(self.firstIndexToPaint) / self.BYTES_PER_LINE
) * self._charHeight + self._charHeight
lineNum = self.firstIndexToPaint
while lineNum < self.lastIndexToPaint:
xpos = self.ascii_xpos + self._charWidth
for i in range(lineNum, lineNum + self.BYTES_PER_LINE):
if i >= len(self.data):
break
ch = self.data[i]
if ch < 0x20 or (ch > 0x7E and ch < 0xA0) or ch == 0xAD:
ch = "."
else:
ch = chr(ch)
if self.currentSelection["start"] <= i and self.currentSelection["end"] >= i:
painter.setBackground(QtGui.QBrush(QtGui.QColor(0xFF, 0x00, 0xFF, 0x80)))
painter.setBackgroundMode(Qt.OpaqueMode)
else:
painter.setBackgroundMode(Qt.TransparentMode)
painter.drawText(xpos, ypos, ch)
xpos += self._charWidth
ypos += self._charHeight
lineNum += self.BYTES_PER_LINE
def paintCursor(self, painter, e):
if self._cursorBlink:
painter.fillRect(
self._cursorXPositionInCanvas,
self._cursorYPositionInCanvas,
self._charWidth,
2,
self.palette().color(QtGui.QPalette.WindowText),
)
def isInCursorLine(self, pos, cursor):
lineStart = (cursor // self.BYTES_PER_LINE) * self.BYTES_PER_LINE
if pos >= lineStart and pos <= lineStart + self.BYTES_PER_LINE - 1:
return True
return False
def numHexChars(self, num):
numHexs = 0
while num:
num >>= 4
numHexs += 1
return numHexs
|
the-stack_106_20543
|
import unittest
import time
from selenium import webdriver
from HtmlTestRunner import HTMLTestRunner
class My_Test(unittest.TestCase):
''' 百度搜索测试'''
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
self.driver.implicitly_wait(10)
self.base_url = 'http://www.baidu.com/'
def test_baidu(self):
driver = self.driver
driver.get(self.base_url+'/')
driver.find_element_by_id('kw').clear()
driver.find_element_by_id('kw').send_keys('美女')
driver.find_element_by_id('su').click()
time.sleep(2)
title = driver.title
self.assertEqual(title,'美女_百度搜索')
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
#通过以上办法无法生成测试报告,最简单的办法看连接
#https://blog.csdn.net/weixin_38981172/article/details/82389416
|
the-stack_106_20544
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Qtum Core developers
# Copyright (c) 2020 The BCS Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
|
the-stack_106_20545
|
import time
import os
import json
from vcpeutils.csar_parser import CsarParser
from robot.api import logger
from datetime import datetime
import sys
from ONAPLibrary.PreloadSDNCKeywords import PreloadSDNCKeywords
from ONAPLibrary.RequestSOKeywords import RequestSOKeywords
from ONAPLibrary.BaseAAIKeywords import BaseAAIKeywords
from ONAPLibrary.UUIDKeywords import UUIDKeywords
class SoUtils:
def __init__(self):
# SO urls, note: do NOT add a '/' at the end of the url
self.so_nbi_port = '8080'
self.so_host = 'so.onap'
self.so_si_path = '/onap/so/infra/serviceInstantiation/v7/serviceInstances'
self.so_orch_path = '/onap/so/infra/orchestrationRequests/v6'
self.service_req_api_url = 'http://' + self.so_host + ':' + self.so_nbi_port
self.so_check_progress_api_url = 'http://' + self.so_host + ':' + self.so_nbi_port + self.so_orch_path + '/'
self.so_userpass = 'InfraPortalClient', 'password1$'
self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
# mr utls
self.mr_ip_addr = 'mr.onap'
self.mr_ip_port = '3904'
# sdnc urls
self.sdnc_ip_addr = 'sdnc.onap'
self.sdnc_preloading_port = '8282'
self.sdnc_endpoint = 'http://' + self.sdnc_ip_addr + ':' + self.sdnc_preloading_port
self.sdnc_preload_vnf_url = '/restconf/operations/VNF-API:preload-vnf-topology-operation'
# properties
self.homing_solution = 'sniro' # value is either 'sniro' or 'oof'
self.customer_location_used_by_oof = {
"customerLatitude": "32.897480",
"customerLongitude": "-97.040443",
"customerName": "some_company"
}
self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
self.instance_name_prefix = {
'service': 'svc',
'network': 'net',
'vnf': 'vnf',
'vfmodule': 'vf'
}
# set the openstack cloud access credentials here
self.cloud = {
'--os-auth-url': 'http://10.12.25.2:5000',
'--os-username': 'kxi',
'--os-user-domain-id': 'default',
'--os-project-domain-id': 'default',
'--os-tenant-id': '09d8566ea45e43aa974cf447ed591d77',
'--os-region-name': 'RegionOne',
'--os-password': 'n3JhGMGuDzD8',
'--os-project-domain-name': 'Integration-SB-03',
'--os-identity-api-version': '3'
}
self.template_path = 'robot/assets/templates'
self.owning_entity_name = 'OE-Demonstration1'
self.project_name = 'Project-Demonstration'
self.owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
self.global_subscriber_id = 'Demonstration'
self.vgw_VfModuleModelInvariantUuid = '26d6a718-17b2-4ba8-8691-c44343b2ecd2'
self.so = RequestSOKeywords()
self.aai = BaseAAIKeywords()
self.uuid = UUIDKeywords()
@staticmethod
def add_req_info(req_details, instance_name, product_family_id=None):
req_details['requestInfo'] = {
'instanceName': instance_name,
'source': 'VID',
'suppressRollback': 'true',
'requestorId': 'vCPE-Robot'
}
if product_family_id:
req_details['requestInfo']['productFamilyId'] = product_family_id
@staticmethod
def add_related_instance(req_details, instance_id, instance_model):
instance = {"instanceId": instance_id, "modelInfo": instance_model}
if 'relatedInstanceList' not in req_details:
req_details['relatedInstanceList'] = [{"relatedInstance": instance}]
else:
req_details['relatedInstanceList'].append({"relatedInstance": instance})
def generate_vnf_or_network_request(self, instance_name, vnf_or_network_model, service_instance_id, service_model,
tenant_id, region_name):
req_details = {
'modelInfo': vnf_or_network_model,
'cloudConfiguration': {"lcpCloudRegionId": region_name,
"tenantId": tenant_id},
'requestParameters': {"userParams": []},
'platform': {"platformName": "Platform-Demonstration"}
}
self.add_req_info(req_details, instance_name, self.product_family_id)
self.add_related_instance(req_details, service_instance_id, service_model)
return {'requestDetails': req_details}
def generate_vfmodule_request(self, instance_name, vfmodule_model, service_instance_id,
service_model, vnf_instance_id, vnf_model, tenant_id, region_name):
req_details = {
'modelInfo': vfmodule_model,
'cloudConfiguration': {"lcpCloudRegionId": region_name,
"tenantId": tenant_id},
'requestParameters': {"usePreload": 'true'}
}
self.add_req_info(req_details, instance_name, self.product_family_id)
self.add_related_instance(req_details, service_instance_id, service_model)
self.add_related_instance(req_details, vnf_instance_id, vnf_model)
return {'requestDetails': req_details}
def generate_service_request(self, instance_name, model):
req_details = {
'modelInfo': model,
'subscriberInfo': {'globalSubscriberId': self.global_subscriber_id},
'requestParameters': {
"userParams": [],
"subscriptionServiceType": "vCPE",
"aLaCarte": 'true'
}
}
self.add_req_info(req_details, instance_name)
self.add_project_info(req_details)
self.add_owning_entity(req_details)
return {'requestDetails': req_details}
def add_project_info(self, req_details):
req_details['project'] = {'projectName': self.project_name}
def add_owning_entity(self, req_details):
req_details['owningEntity'] = {'owningEntityId': self.owning_entity_id,
'owningEntityName': self.owning_entity_name}
def generate_custom_service_request(self, instance_name, model, brg_mac, tenant_id, region_name):
brg_mac_enc = brg_mac.replace(':', '-')
req_details = {
'modelInfo': model,
'subscriberInfo': {'subscriberName': 'Kaneohe',
'globalSubscriberId': self.global_subscriber_id},
'cloudConfiguration': {"lcpCloudRegionId": region_name,
"tenantId": tenant_id},
'requestParameters': {
"userParams": [
{
'name': 'BRG_WAN_MAC_Address',
'value': brg_mac
},
{
'name': 'VfModuleNames',
'value': [
{
'VfModuleModelInvariantUuid': self.vgw_VfModuleModelInvariantUuid,
'VfModuleName': 'VGW2BRG-{0}'.format(brg_mac_enc)
}
]
},
{
"name": "Customer_Location",
"value": self.customer_location_used_by_oof
},
{
"name": "Homing_Solution",
"value": self.homing_solution
}
],
"subscriptionServiceType": "vCPE",
'aLaCarte': 'false'
}
}
self.add_req_info(req_details, instance_name, self.custom_product_family_id)
self.add_project_info(req_details)
self.add_owning_entity(req_details)
return {'requestDetails': req_details}
def create_custom_service(self, csar_file, brg_mac, tenant_id, region_name, name_suffix=None):
parser = CsarParser()
if not parser.parse_csar(csar_file):
return False
# yyyymmdd_hhmm
if not name_suffix:
name_suffix = '_' + datetime.now().strftime('%Y%m%d%H%M')
# create service
instance_name = '_'.join([self.instance_name_prefix['service'],
parser.svc_model['modelName'][0:10], name_suffix])
instance_name = instance_name.lower()
req = self.generate_custom_service_request(instance_name, parser.svc_model, brg_mac, tenant_id, region_name)
logger.info(json.dumps(req, indent=2, sort_keys=True))
logger.info('Creating custom service {0}.'.format(instance_name))
req_id, svc_instance_id = self.so.run_create_request(self.service_req_api_url, self.so_si_path,
json.dumps(req), auth=self.so_userpass)
done, resp = self.so.run_polling_get_request(self.so_check_progress_api_url, req_id, tries=50, interval=5,
auth=self.so_userpass)
return done
def create_entire_service(self, csar_file, vnf_template_file, preload_dict, region_name, tenant_id, ssh_key):
"""
:param csar_file:
:param vnf_template_file:
:param preload_dict:
:param region_name:
:param tenant_id
:param ssh_key
:return: service instance UUID
"""
name_suffix = str(self.uuid.generate_timestamp())
logger.info('\n----------------------------------------------------------------------------------')
logger.info('Start to create entire service defined in csar: {0}'.format(csar_file))
parser = CsarParser()
logger.info('Parsing csar ...')
if not parser.parse_csar(csar_file):
logger.error('Cannot parse csar: {0}'.format(csar_file))
return None
# Set Global timestamp for instancenames
global_timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
# create service
instance_name = '_'.join([self.instance_name_prefix['service'],
parser.svc_model['modelName'], global_timestamp, name_suffix])
instance_name = instance_name.lower()
instance_name = instance_name.replace(' ', '')
instance_name = instance_name.replace(':', '')
logger.info('Creating service instance: {0}.'.format(instance_name))
req = self.generate_service_request(instance_name, parser.svc_model)
logger.debug(json.dumps(req, indent=2, sort_keys=True))
req_id, svc_instance_id = self.so.run_create_request(self.service_req_api_url, self.so_si_path,
json.dumps(req), auth=self.so_userpass)
done, resp = self.so.run_polling_get_request(self.so_check_progress_api_url, req_id, tries=50, interval=5,
auth=self.so_userpass)
if not done:
return None
# wait for AAI to complete traversal
self.aai.wait_for_node_to_exist('service-instance', 'service-instance-id', svc_instance_id)
# create networks
for model in parser.net_models:
base_name = model['modelCustomizationName'].lower().replace('mux_vg', 'mux_gw')
network_name = '_'.join([self.instance_name_prefix['network'], base_name, name_suffix])
network_name = network_name.lower()
logger.info('Creating network: ' + network_name)
req = self.generate_vnf_or_network_request(network_name, model, svc_instance_id, parser.svc_model,
tenant_id, region_name)
logger.debug(json.dumps(req, indent=2, sort_keys=True))
url = '/'.join([self.so_si_path, svc_instance_id, 'networks'])
req_id, net_instance_id = self.so.run_create_request(self.service_req_api_url, url, json.dumps(req),
auth=self.so_userpass)
done, resp = self.so.run_polling_get_request(self.so_check_progress_api_url, req_id, tries=50, interval=5,
auth=self.so_userpass)
if not done:
return None
logger.info('Changing subnet name to ' + self.network_name_to_subnet_name(network_name))
self.set_network_name(network_name)
subnet_name_changed = False
for i in range(20):
time.sleep(3)
if self.set_subnet_name(network_name):
subnet_name_changed = True
break
if not subnet_name_changed:
logger.error('Failed to change subnet name for ' + network_name)
return None
vnf_model = None
vnf_instance_id = None
# create VNF
if len(parser.vnf_models) == 1:
vnf_model = parser.vnf_models[0]
vnf_instance_name = '_'.join([self.instance_name_prefix['vnf'],
vnf_model['modelCustomizationName'].split(' ')[0], name_suffix])
vnf_instance_name = vnf_instance_name.lower()
vnf_instance_name = vnf_instance_name.replace(' ', '')
vnf_instance_name = vnf_instance_name.replace(':', '')
logger.info('Creating VNF: ' + vnf_instance_name)
req = self.generate_vnf_or_network_request(vnf_instance_name, vnf_model, svc_instance_id, parser.svc_model,
tenant_id, region_name)
logger.debug(json.dumps(req, indent=2, sort_keys=True))
url = '/'.join([self.so_si_path, svc_instance_id, 'vnfs'])
req_id, vnf_instance_id = self.so.run_create_request(self.service_req_api_url, url, json.dumps(req),
auth=self.so_userpass)
done, resp = self.so.run_polling_get_request(self.so_check_progress_api_url, req_id, tries=50, interval=5,
auth=self.so_userpass)
if not done:
logger.error('Failed to create VNF {0}.'.format(vnf_instance_name))
return False
# wait for AAI to complete traversal
if not vnf_instance_id:
logger.error('No VNF instance ID returned!')
sys.exit()
self.aai.wait_for_node_to_exist('generic-vnf', 'vnf-id', vnf_instance_id)
# SDNC Preload
preloader = PreloadSDNCKeywords()
vfmodule_name = '_'.join(['vf',
parser.vfmodule_models[0]['modelCustomizationName'].split('..')[0].lower(),
name_suffix])
extra_preload = {
'pub_key': ssh_key,
'vnf_type': parser.vfmodule_models[0]['modelCustomizationName'],
'generic_vnf_type': parser.vfmodule_models[0]['modelCustomizationName'],
'service_type': svc_instance_id,
'generic_vnf_name': vnf_model['modelCustomizationName'],
'vnf_name': vfmodule_name,
'mr_ip_addr': self.mr_ip_addr,
'mr_ip_port': self.mr_ip_port,
'sdnc_oam_ip': self.sdnc_ip_addr,
'suffix': name_suffix,
'oam_onap_net': 'oam_network_2No2',
'oam_onap_subnet': 'oam_network_2No2',
'public_net': 'external',
'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
}
preload_dict.update(extra_preload)
preloader.preload_vfmodule(self.sdnc_endpoint, self.sdnc_preload_vnf_url, self.template_path, vnf_template_file,
preload_dict)
# create VF Module
if len(parser.vfmodule_models) == 1:
if not vnf_instance_id or not vnf_model:
logger.error('Invalid VNF instance ID or VNF model!')
sys.exit()
model = parser.vfmodule_models[0]
vfmodule_instance_name = '_'.join([self.instance_name_prefix['vfmodule'],
model['modelCustomizationName'].split('..')[0], name_suffix])
vfmodule_instance_name = vfmodule_instance_name.lower()
vfmodule_instance_name = vfmodule_instance_name.replace(' ', '')
vfmodule_instance_name = vfmodule_instance_name.replace(':', '')
logger.info('Creating VF Module: ' + vfmodule_instance_name)
req = self.generate_vfmodule_request(vfmodule_instance_name, model, svc_instance_id, parser.svc_model,
vnf_instance_id, vnf_model, tenant_id, region_name)
logger.debug(json.dumps(req, indent=2, sort_keys=True))
url = '/'.join([self.so_si_path, svc_instance_id, 'vnfs', vnf_instance_id, 'vfModules'])
req_id, vfmodule_instance_id = self.so.run_create_request(self.service_req_api_url, url, json.dumps(req),
auth=self.so_userpass)
done, resp = self.so.run_polling_get_request(self.so_check_progress_api_url, req_id, tries=50, interval=50,
auth=self.so_userpass)
if not done:
logger.error('Failed to create VF Module {0}.'.format(vfmodule_instance_name))
return None
return svc_instance_id
@staticmethod
def network_name_to_subnet_name(network_name):
"""
:param network_name: example: vcpe_net_cpe_signal_201711281221
:return: vcpe_net_cpe_signal_subnet_201711281221
"""
fields = network_name.split('_')
fields.insert(-1, 'subnet')
return '_'.join(fields)
def set_network_name(self, network_name):
param = ' '.join([k + ' ' + v for k, v in list(self.cloud.items())])
openstackcmd = 'openstack ' + param
cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
os.popen(cmd)
def set_subnet_name(self, network_name):
"""
Example: network_name = vcpe_net_cpe_signal_201711281221
set subnet name to vcpe_net_cpe_signal_subnet_201711281221
:return:
"""
param = ' '.join([k + ' ' + v for k, v in list(self.cloud.items())])
openstackcmd = 'openstack ' + param
# expected results: | subnets | subnet_id |
subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
subnet_id = subnet_info[2].strip()
subnet_name = self.network_name_to_subnet_name(network_name)
cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
os.popen(cmd)
logger.info("Subnet name set to: " + subnet_name)
return True
else:
logger.error("Can't get subnet info from network name: " + network_name)
return False
|
the-stack_106_20548
|
"""
Schema differencing support.
"""
import logging
import sqlalchemy
from sqlalchemy.types import Float
log = logging.getLogger(__name__)
def getDiffOfModelAgainstDatabase(metadata, engine, excludeTables=None):
"""
Return differences of model against database.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
db_metadata = sqlalchemy.MetaData(engine, reflect=True)
# sqlite will include a dynamically generated 'sqlite_sequence' table if
# there are autoincrement sequences in the database; this should not be
# compared.
if engine.dialect.name == 'sqlite':
if 'sqlite_sequence' in db_metadata.tables:
db_metadata.remove(db_metadata.tables['sqlite_sequence'])
return SchemaDiff(metadata, db_metadata,
labelA='model',
labelB='database',
excludeTables=excludeTables)
def getDiffOfModelAgainstModel(metadataA, metadataB, excludeTables=None):
"""
Return differences of model against another model.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
return SchemaDiff(metadataA, metadataB, excludeTables)
class ColDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Column`
between two :class:`~sqlalchemy.schema.Table` instances, ``A``
and ``B``.
.. attribute:: col_A
The :class:`~sqlalchemy.schema.Column` object for A.
.. attribute:: col_B
The :class:`~sqlalchemy.schema.Column` object for B.
.. attribute:: type_A
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
.. attribute:: type_B
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
"""
diff = False
def __init__(self,col_A,col_B):
self.col_A = col_A
self.col_B = col_B
self.type_A = col_A.type
self.type_B = col_B.type
self.affinity_A = self.type_A._type_affinity
self.affinity_B = self.type_B._type_affinity
if self.affinity_A is not self.affinity_B:
self.diff = True
return
if isinstance(self.type_A,Float) or isinstance(self.type_B,Float):
if not (isinstance(self.type_A,Float) and isinstance(self.type_B,Float)):
self.diff=True
return
for attr in ('precision','scale','length'):
A = getattr(self.type_A,attr,None)
B = getattr(self.type_B,attr,None)
if not (A is None or B is None) and A!=B:
self.diff=True
return
def __nonzero__(self):
return self.diff
class TableDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Table`
between two :class:`~sqlalchemy.schema.MetaData` instances, ``A``
and ``B``.
.. attribute:: columns_missing_from_A
A sequence of column names that were found in B but weren't in
A.
.. attribute:: columns_missing_from_B
A sequence of column names that were found in A but weren't in
B.
.. attribute:: columns_different
A dictionary containing information about columns that were
found to be different.
It maps column names to a :class:`ColDiff` objects describing the
differences found.
"""
__slots__ = (
'columns_missing_from_A',
'columns_missing_from_B',
'columns_different',
)
def __nonzero__(self):
return bool(
self.columns_missing_from_A or
self.columns_missing_from_B or
self.columns_different
)
class SchemaDiff(object):
"""
Compute the difference between two :class:`~sqlalchemy.schema.MetaData`
objects.
The string representation of a :class:`SchemaDiff` will summarise
the changes found between the two
:class:`~sqlalchemy.schema.MetaData` objects.
The length of a :class:`SchemaDiff` will give the number of
changes found, enabling it to be used much like a boolean in
expressions.
:param metadataA:
First :class:`~sqlalchemy.schema.MetaData` to compare.
:param metadataB:
Second :class:`~sqlalchemy.schema.MetaData` to compare.
:param labelA:
The label to use in messages about the first
:class:`~sqlalchemy.schema.MetaData`.
:param labelB:
The label to use in messages about the second
:class:`~sqlalchemy.schema.MetaData`.
:param excludeTables:
A sequence of table names to exclude.
.. attribute:: tables_missing_from_A
A sequence of table names that were found in B but weren't in
A.
.. attribute:: tables_missing_from_B
A sequence of table names that were found in A but weren't in
B.
.. attribute:: tables_different
A dictionary containing information about tables that were found
to be different.
It maps table names to a :class:`TableDiff` objects describing the
differences found.
"""
def __init__(self,
metadataA, metadataB,
labelA='metadataA',
labelB='metadataB',
excludeTables=None):
self.metadataA, self.metadataB = metadataA, metadataB
self.labelA, self.labelB = labelA, labelB
self.label_width = max(len(labelA),len(labelB))
excludeTables = set(excludeTables or [])
A_table_names = set(metadataA.tables.keys())
B_table_names = set(metadataB.tables.keys())
self.tables_missing_from_A = sorted(
B_table_names - A_table_names - excludeTables
)
self.tables_missing_from_B = sorted(
A_table_names - B_table_names - excludeTables
)
self.tables_different = {}
for table_name in A_table_names.intersection(B_table_names):
td = TableDiff()
A_table = metadataA.tables[table_name]
B_table = metadataB.tables[table_name]
A_column_names = set(A_table.columns.keys())
B_column_names = set(B_table.columns.keys())
td.columns_missing_from_A = sorted(
B_column_names - A_column_names
)
td.columns_missing_from_B = sorted(
A_column_names - B_column_names
)
td.columns_different = {}
for col_name in A_column_names.intersection(B_column_names):
cd = ColDiff(
A_table.columns.get(col_name),
B_table.columns.get(col_name)
)
if cd:
td.columns_different[col_name]=cd
# XXX - index and constraint differences should
# be checked for here
if td:
self.tables_different[table_name]=td
def __str__(self):
''' Summarize differences. '''
out = []
column_template =' %%%is: %%r' % self.label_width
for names,label in (
(self.tables_missing_from_A,self.labelA),
(self.tables_missing_from_B,self.labelB),
):
if names:
out.append(
' tables missing from %s: %s' % (
label,', '.join(sorted(names))
)
)
for name,td in sorted(self.tables_different.items()):
out.append(
' table with differences: %s' % name
)
for names,label in (
(td.columns_missing_from_A,self.labelA),
(td.columns_missing_from_B,self.labelB),
):
if names:
out.append(
' %s missing these columns: %s' % (
label,', '.join(sorted(names))
)
)
for name,cd in td.columns_different.items():
out.append(' column with differences: %s' % name)
out.append(column_template % (self.labelA,cd.col_A))
out.append(column_template % (self.labelB,cd.col_B))
if out:
out.insert(0, 'Schema diffs:')
return '\n'.join(out)
else:
return 'No schema diffs'
def __len__(self):
"""
Used in bool evaluation, return of 0 means no diffs.
"""
return (
len(self.tables_missing_from_A) +
len(self.tables_missing_from_B) +
len(self.tables_different)
)
|
the-stack_106_20556
|
import math
import pdb
import torch
import torch.nn as nn
from torch.nn import functional as F
from .fully_connected import MLP
class TRMMHAttention(nn.Module):
def __init__(self, n_heads, d_model, dropout_p=0.):
super(TRMMHAttention, self).__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_model // n_heads
self.dropout_p = dropout_p
#instantiate single Q, K, and V weights and split them in multiple heads during forward (better parallelization on GPU)
# this is better than creating n_heads weights matrices and run them in a loop
self.Q = nn.Linear(d_model, d_model)
self.K = nn.Linear(d_model, d_model)
self.V = nn.Linear(d_model, d_model)
#self.qkv = nn.Linear(3*d_model, d_model) #?
self.dropout = nn.Dropout(p=dropout_p)
self.out = nn.Linear(d_model, d_model)
def forward(self, in_1, in_2, attn_mask_1, attn_mask_2):
assert in_1.shape[0] == in_2.shape[0], 'Uncompatible batch sizes'
b_size = in_1.shape[0]
# split the result of linear operations in n_heads heads (b_size, seq_len, n_heads, d_k)
q = self.Q(in_1).view(b_size, -1, self.n_heads, self.d_k)
k = self.K(in_2).view(b_size, -1, self.n_heads, self.d_k)
v = self.V(in_2).view(b_size, -1, self.n_heads, self.d_k)
#transpose to get dimension (b_size, n_heads, seq_len, d_k)
# this because torch.matmul performs matrix multiplication between the last two dimensions (all the other are considered batch)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
#build attn mask by projecting the set_1 mask on the columns and set_2 mask on the rows
attn_mask = attn_mask_1.unsqueeze(-1) * attn_mask_2.unsqueeze(-2)
outs, attn_scores = self.scaledDotProductAttention(q, k, v, attn_mask)
out_concat = outs.transpose(1, 2).contiguous().view(b_size, -1, self.d_model)
#attn_scores = attn_scores.transpose(1, 2)
return self.out(out_concat), attn_scores
#? detach it from the multi-head attn class? In case there is the need to pass an nn.Dropout() as parameter
def scaledDotProductAttention(self, query, key, value, attn_mask):
attn_logits = torch.matmul(query, torch.transpose(key, -2, -1))/ math.sqrt(key.shape[-1])
#mask padding and future words with a great negative value.
# DO NOT USE '-INF' BECAUSE IT WILL GENERATE NaN AFTER SOFTMAX FOR PADDING ROWS (filled with all 0's)
#take into account mixed precision and avoid overflow (-1e+4 can be represented on 16 bits)
_MASKING_VALUE = -1e+30 if attn_logits.dtype == torch.float32 else -1e+4
if attn_logits.dim() == 4:
#in case of multi-head attentions there is an additional dimension given by the n of heads
masked_attn_logits = attn_logits.masked_fill(attn_mask[:, None, :, :]==0, value=_MASKING_VALUE)
else:
masked_attn_logits = attn_logits.masked_fill(attn_mask==0, value=_MASKING_VALUE)
attn_scores = F.softmax(masked_attn_logits, dim=-1)
attn_scores = self.dropout(attn_scores)
out = torch.matmul(attn_scores, value)
return out, attn_scores
class TRMEncoderLayer(nn.Module):
def __init__(self, n_heads, d_model, dropout_p=0.):
super(TRMEncoderLayer, self).__init__()
self.mhead_attn = TRMMHAttention(n_heads=n_heads,
d_model=d_model,
dropout_p=dropout_p)
self.dropout_1 = nn.Dropout(p=dropout_p)
self.norm_1 = nn.LayerNorm(d_model)
self.mlp = MLP(in_features=d_model,
hidden_dim=d_model//2,
out_features=d_model,
dropout_p=dropout_p,
use_relu=True)
self.dropout_2 = nn.Dropout(p=dropout_p)
self.norm_2 = nn.LayerNorm(d_model)
def forward(self, inputs, attn_mask):
mhead_out, attn_scores = self.mhead_attn(inputs, inputs, attn_mask, attn_mask)
mid_out = self.norm_1(self.dropout_1(mhead_out) + inputs)
out = self.norm_2(self.dropout_2(self.mlp(mid_out)) + mid_out)
return out, attn_scores
class TRMDecoderLayer(nn.Module):
def __init__(self, n_heads, d_model, d_ff, dropout_p=0.):
super(TRMDecoderLayer, self).__init__()
self.mhead_self_attn = TRMMHAttention(n_heads=n_heads,
d_model=d_model,
dropout_p=dropout_p)
self.dropout_1 = nn.Dropout(p=dropout_p)
self.norm_1 = nn.LayerNorm(d_model)
self.mhead_enc_attn = TRMMHAttention(n_heads=n_heads,
d_model=d_model,
dropout_p=dropout_p)
self.dropout_2 = nn.Dropout(p=dropout_p)
self.norm_2 = nn.LayerNorm(d_model)
self.mlp = MLP(in_features=d_ff,
hidden_dim=d_ff//2,
out_features=d_ff,
dropout_p=dropout_p,
use_relu=True)
self.dropout_3 = nn.Dropout(p=dropout_p)
self.norm_3 = nn.LayerNorm(d_model)
def forward(self, inputs, enc_outs, inputs_mask, enc_mask):
s_mhead_out, s_attn_scores = self.mhead_self_attn(inputs, inputs, attn_mask=inputs_mask)
mid_out_1 = self.norm_1(self.dropout_1(s_mhead_out) + inputs)
joint_mask = None #TODO compute joint mask between inputs and enc_outs
e_mhead_out, e_attn_scores = self.mhead_enc_attn(inputs, enc_outs, attn_mask=joint_mask)
mid_out_2 = self.norm_1(self.dropout_1(e_mhead_out) + mid_out_1)
out = self.norm_2(self.dropout_2(self.mlp(mid_out_2)) + mid_out_2)
return out, s_attn_scores, e_attn_scores
|
the-stack_106_20557
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class LockInformation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_details': 'ErrorDetails',
'lock_duration_in_seconds': 'str',
'locked_by_app': 'str',
'locked_by_user': 'UserInfo',
'locked_until_date_time': 'str',
'lock_token': 'str',
'lock_type': 'str',
'use_scratch_pad': 'str'
}
attribute_map = {
'error_details': 'errorDetails',
'lock_duration_in_seconds': 'lockDurationInSeconds',
'locked_by_app': 'lockedByApp',
'locked_by_user': 'lockedByUser',
'locked_until_date_time': 'lockedUntilDateTime',
'lock_token': 'lockToken',
'lock_type': 'lockType',
'use_scratch_pad': 'useScratchPad'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""LockInformation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._error_details = None
self._lock_duration_in_seconds = None
self._locked_by_app = None
self._locked_by_user = None
self._locked_until_date_time = None
self._lock_token = None
self._lock_type = None
self._use_scratch_pad = None
self.discriminator = None
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('lock_duration_in_seconds'), kwargs.get('lock_duration_in_seconds', None))
setattr(self, "_{}".format('locked_by_app'), kwargs.get('locked_by_app', None))
setattr(self, "_{}".format('locked_by_user'), kwargs.get('locked_by_user', None))
setattr(self, "_{}".format('locked_until_date_time'), kwargs.get('locked_until_date_time', None))
setattr(self, "_{}".format('lock_token'), kwargs.get('lock_token', None))
setattr(self, "_{}".format('lock_type'), kwargs.get('lock_type', None))
setattr(self, "_{}".format('use_scratch_pad'), kwargs.get('use_scratch_pad', None))
@property
def error_details(self):
"""Gets the error_details of this LockInformation. # noqa: E501
:return: The error_details of this LockInformation. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this LockInformation.
:param error_details: The error_details of this LockInformation. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def lock_duration_in_seconds(self):
"""Gets the lock_duration_in_seconds of this LockInformation. # noqa: E501
Sets the time, in seconds, until the lock expires when there is no activity on the envelope. If no value is entered, then the default value of 300 seconds is used. The maximum value is 1,800 seconds. The lock duration can be extended. # noqa: E501
:return: The lock_duration_in_seconds of this LockInformation. # noqa: E501
:rtype: str
"""
return self._lock_duration_in_seconds
@lock_duration_in_seconds.setter
def lock_duration_in_seconds(self, lock_duration_in_seconds):
"""Sets the lock_duration_in_seconds of this LockInformation.
Sets the time, in seconds, until the lock expires when there is no activity on the envelope. If no value is entered, then the default value of 300 seconds is used. The maximum value is 1,800 seconds. The lock duration can be extended. # noqa: E501
:param lock_duration_in_seconds: The lock_duration_in_seconds of this LockInformation. # noqa: E501
:type: str
"""
self._lock_duration_in_seconds = lock_duration_in_seconds
@property
def locked_by_app(self):
"""Gets the locked_by_app of this LockInformation. # noqa: E501
Specifies the friendly name of the application that is locking the envelope. # noqa: E501
:return: The locked_by_app of this LockInformation. # noqa: E501
:rtype: str
"""
return self._locked_by_app
@locked_by_app.setter
def locked_by_app(self, locked_by_app):
"""Sets the locked_by_app of this LockInformation.
Specifies the friendly name of the application that is locking the envelope. # noqa: E501
:param locked_by_app: The locked_by_app of this LockInformation. # noqa: E501
:type: str
"""
self._locked_by_app = locked_by_app
@property
def locked_by_user(self):
"""Gets the locked_by_user of this LockInformation. # noqa: E501
:return: The locked_by_user of this LockInformation. # noqa: E501
:rtype: UserInfo
"""
return self._locked_by_user
@locked_by_user.setter
def locked_by_user(self, locked_by_user):
"""Sets the locked_by_user of this LockInformation.
:param locked_by_user: The locked_by_user of this LockInformation. # noqa: E501
:type: UserInfo
"""
self._locked_by_user = locked_by_user
@property
def locked_until_date_time(self):
"""Gets the locked_until_date_time of this LockInformation. # noqa: E501
The datetime until the envelope lock expires. # noqa: E501
:return: The locked_until_date_time of this LockInformation. # noqa: E501
:rtype: str
"""
return self._locked_until_date_time
@locked_until_date_time.setter
def locked_until_date_time(self, locked_until_date_time):
"""Sets the locked_until_date_time of this LockInformation.
The datetime until the envelope lock expires. # noqa: E501
:param locked_until_date_time: The locked_until_date_time of this LockInformation. # noqa: E501
:type: str
"""
self._locked_until_date_time = locked_until_date_time
@property
def lock_token(self):
"""Gets the lock_token of this LockInformation. # noqa: E501
A unique identifier provided to the owner of the envelope lock. Used to prove ownership of the lock. # noqa: E501
:return: The lock_token of this LockInformation. # noqa: E501
:rtype: str
"""
return self._lock_token
@lock_token.setter
def lock_token(self, lock_token):
"""Sets the lock_token of this LockInformation.
A unique identifier provided to the owner of the envelope lock. Used to prove ownership of the lock. # noqa: E501
:param lock_token: The lock_token of this LockInformation. # noqa: E501
:type: str
"""
self._lock_token = lock_token
@property
def lock_type(self):
"""Gets the lock_type of this LockInformation. # noqa: E501
The type of envelope lock. Currently \"edit\" is the only supported type. # noqa: E501
:return: The lock_type of this LockInformation. # noqa: E501
:rtype: str
"""
return self._lock_type
@lock_type.setter
def lock_type(self, lock_type):
"""Sets the lock_type of this LockInformation.
The type of envelope lock. Currently \"edit\" is the only supported type. # noqa: E501
:param lock_type: The lock_type of this LockInformation. # noqa: E501
:type: str
"""
self._lock_type = lock_type
@property
def use_scratch_pad(self):
"""Gets the use_scratch_pad of this LockInformation. # noqa: E501
Reserved for future use. Indicates whether a scratchpad is used for editing information. # noqa: E501
:return: The use_scratch_pad of this LockInformation. # noqa: E501
:rtype: str
"""
return self._use_scratch_pad
@use_scratch_pad.setter
def use_scratch_pad(self, use_scratch_pad):
"""Sets the use_scratch_pad of this LockInformation.
Reserved for future use. Indicates whether a scratchpad is used for editing information. # noqa: E501
:param use_scratch_pad: The use_scratch_pad of this LockInformation. # noqa: E501
:type: str
"""
self._use_scratch_pad = use_scratch_pad
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LockInformation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LockInformation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, LockInformation):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_20560
|
"""setup for the dlib project
Copyright (C) 2015 Ehsan Azar ([email protected])
License: Boost Software License See LICENSE.txt for the full license.
This file basically just uses CMake to compile the dlib python bindings project
located in the tools/python folder and then puts the outputs into standard
python packages.
To build the dlib:
python setup.py build
To build and install:
python setup.py install
To package the wheel (after pip installing twine and wheel):
python setup.py bdist_wheel
To upload the binary wheel to PyPi
twine upload dist/*.whl
To upload the source distribution to PyPi
python setup.py sdist
twine upload dist/dlib-*.tar.gz
To exclude/include certain options in the cmake config use --yes and --no:
for example:
--yes USE_AVX_INSTRUCTIONS: will set -DUSE_AVX_INSTRUCTIONS=yes
--no USE_AVX_INSTRUCTIONS: will set -DUSE_AVX_INSTRUCTIONS=no
Additional options:
--compiler-flags: pass flags onto the compiler, e.g. --compiler-flags "-Os -Wall" passes -Os -Wall onto GCC.
-G: Set the CMake generator. E.g. -G "Visual Studio 14 2015"
--clean: delete any previous build folders and rebuild. You should do this if you change any build options
by setting --compiler-flags or --yes or --no since last time you ran a build to make sure the changes
take effect.
--set: set arbitrary options e.g. --set CUDA_HOST_COMPILER=/usr/bin/gcc-6.4.0
"""
import os
import re
import sys
import shutil
import platform
import subprocess
import multiprocessing
from distutils import log
from math import ceil,floor
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
def get_extra_cmake_options():
"""read --clean, --yes, --no, --set, --compiler-flags, and -G options from the command line and add them as cmake switches.
"""
_cmake_extra_options = []
_clean_build_folder = False
opt_key = None
argv = [arg for arg in sys.argv] # take a copy
# parse command line options and consume those we care about
for arg in argv:
if opt_key == 'compiler-flags':
_cmake_extra_options.append('-DCMAKE_CXX_FLAGS={arg}'.format(arg=arg.strip()))
elif opt_key == 'G':
_cmake_extra_options += ['-G', arg.strip()]
elif opt_key == 'yes':
_cmake_extra_options.append('-D{arg}=yes'.format(arg=arg.strip()))
elif opt_key == 'no':
_cmake_extra_options.append('-D{arg}=no'.format(arg=arg.strip()))
elif opt_key == 'set':
_cmake_extra_options.append('-D{arg}'.format(arg=arg.strip()))
if opt_key:
sys.argv.remove(arg)
opt_key = None
continue
if arg == '--clean':
_clean_build_folder = True
sys.argv.remove(arg)
continue
if arg in ['--yes', '--no', '--set', '--compiler-flags']:
opt_key = arg[2:].lower()
sys.argv.remove(arg)
continue
if arg in ['-G']:
opt_key = arg[1:]
sys.argv.remove(arg)
continue
return _cmake_extra_options, _clean_build_folder
cmake_extra_options,clean_build_folder = get_extra_cmake_options()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
def rmtree(name):
"""remove a directory and its subdirectories.
"""
def remove_read_only(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
if os.path.exists(name):
log.info('Removing old directory {}'.format(name))
shutil.rmtree(name, ignore_errors=False, onerror=remove_read_only)
class CMakeBuild(build_ext):
def get_cmake_version(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("\n*******************************************************************\n" +
" CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions) +
"\n*******************************************************************\n")
return re.search(r'version\s*([\d.]+)', out.decode()).group(1)
def run(self):
cmake_version = self.get_cmake_version()
if platform.system() == "Windows":
if LooseVersion(cmake_version) < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cmake_args += cmake_extra_options
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
# Do a parallel build
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
# Do a parallel build
build_args += ['--', '-j'+str(num_available_cpu_cores(2))]
build_folder = os.path.abspath(self.build_temp)
if clean_build_folder:
rmtree(build_folder)
if not os.path.exists(build_folder):
os.makedirs(build_folder)
cmake_setup = ['cmake', ext.sourcedir] + cmake_args
cmake_build = ['cmake', '--build', '.'] + build_args
print("Building extension for Python {}".format(sys.version.split('\n',1)[0]))
print("Invoking CMake setup: '{}'".format(' '.join(cmake_setup)))
sys.stdout.flush()
subprocess.check_call(cmake_setup, cwd=build_folder)
print("Invoking CMake build: '{}'".format(' '.join(cmake_build)))
sys.stdout.flush()
subprocess.check_call(cmake_build, cwd=build_folder)
def num_available_cpu_cores(ram_per_build_process_in_gb):
if 'TRAVIS' in os.environ and os.environ['TRAVIS']=='true':
# When building on travis-ci, just use 2 cores since travis-ci limits
# you to that regardless of what the hardware might suggest.
return 2
try:
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
mem_gib = mem_bytes/(1024.**3)
num_cores = multiprocessing.cpu_count()
# make sure we have enough ram for each build process.
mem_cores = int(floor(mem_gib/float(ram_per_build_process_in_gb)+0.5));
# We are limited either by RAM or CPU cores. So pick the limiting amount
# and return that.
return max(min(num_cores, mem_cores), 1)
except ValueError:
return 2 # just assume 2 if we can't get the os to tell us the right answer.
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = '--ignore docs --ignore dlib'
def run_tests(self):
import shlex
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
def read_version_from_cmakelists(cmake_file):
"""Read version information
"""
major = re.findall("set\(CPACK_PACKAGE_VERSION_MAJOR.*\"(.*)\"", open(cmake_file).read())[0]
minor = re.findall("set\(CPACK_PACKAGE_VERSION_MINOR.*\"(.*)\"", open(cmake_file).read())[0]
patch = re.findall("set\(CPACK_PACKAGE_VERSION_PATCH.*\"(.*)\"", open(cmake_file).read())[0]
return major + '.' + minor + '.' + patch
def read_entire_file(fname):
"""Read text out of a file relative to setup.py.
"""
return open(os.path.join(fname)).read()
setup(
name='dlib',
version=read_version_from_cmakelists('dlib/CMakeLists.txt'),
description='A toolkit for making real world machine learning and data analysis applications',
long_description='See http://dlib.net for documentation.',
author='Davis King',
author_email='[email protected]',
url='https://github.com/davisking/dlib',
license='Boost Software License',
ext_modules=[CMakeExtension('dlib','tools/python')],
cmdclass=dict(build_ext=CMakeBuild, test=PyTest),
zip_safe=False,
tests_require=['pytest'],
packages=['dlib'],
keywords=['dlib', 'Computer Vision', 'Machine Learning'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft',
'Operating System :: Microsoft :: Windows',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Software Development',
],
)
|
the-stack_106_20563
|
# Copyright 2020 Facebook, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch.fx
import torch
from flexflow.core.flexflow_type import ActiMode, AggrMode, PoolType, DataType, LossType, MetricsType, OpType, enum_to_int
class Node(object):
def __init__(self, name, inedges):
self.name = name
self.inedges = inedges
pass
class ModuleNode(Node):
def __init__(self, name, inedges, module):
super(ModuleNode, self).__init__(name, inedges)
self.module = module
class FunctionNode(Node):
def __init__(self, name, inedges, function):
super(FunctionNode, self).__init__(name, inedges)
self.function = function
class OutputNode(Node):
def __init__(self, name, inedges):
super(OutputNode, self).__init__(name, inedges)
class InputNode(Node):
def __init__(self, name):
super(InputNode, self).__init__(name, None)
def __symbolic_trace(model):
assert isinstance(model, torch.nn.Module), "model must be a torch.nn.Module"
traced = torch.fx.symbolic_trace(model)
modules_by_name = dict()
for name, module in model.named_modules():
modules_by_name[name] = module
graph = list()
for node in traced.graph.nodes:
if node.op == "call_module":
assert node.target in modules_by_name, "cannot find module %s in model".format(node.target)
graph.append(ModuleNode(node.name, node.args, modules_by_name[node.target]))
elif node.op == "placeholder":
graph.append(InputNode(node.name))
elif node.op == "get_attr":
pass
elif node.op == "call_function" or node.op == "call_method":
graph.append(FunctionNode(node.name, node.args, node.target))
elif node.op == "output":
graph.append(OutputNode(node.name, node.args))
else:
assert False, "Encounter unhandled operator type: {}".format(node.op)
return graph
def parse_input(op_str, node):
assert node.inedges == None, "wrong format"
op_str = op_str + str(enum_to_int(OpType, OpType.INPUT)) + "\n"
return op_str
def parse_output(op_str, node):
#FIXME assume there is 1 output
assert len(node.inedges) == 1, "wrong format"
op_str = op_str + str(enum_to_int(OpType, OpType.OUTPUT)) + "\n"
return op_str
def parse_add(op_str, node):
assert len(node.inedges) == 2, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.ADD)) + "\n"
return op_str
def parse_concat(op_str, node):
#FIXME assume it is a merge
op_str = op_str + str(enum_to_int(OpType, OpType.CONCAT)) + ", "
op_str = op_str + str(node.inedges[1]) + "\n"
return op_str
def parse_flat(op_str, node):
#assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.FLAT)) + "\n"
return op_str
def parse_linear(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.LINEAR)) + ", "
op_str = op_str + str(node.module.out_features) + ", "
op_str = op_str + str(enum_to_int(ActiMode, ActiMode.AC_MODE_NONE)) + ", "
if node.module.bias != None:
op_str = op_str + "1\n"
else:
op_str = op_str + "0\n"
return op_str
def parse_conv2d(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.CONV2D)) + ", "
op_str = op_str + str(node.module.out_channels) + ", "
op_str = op_str + str(node.module.kernel_size[0]) + ", "
op_str = op_str + str(node.module.kernel_size[1]) + ", "
op_str = op_str + str(node.module.stride[0]) + ", "
op_str = op_str + str(node.module.stride[1]) + ", "
op_str = op_str + str(node.module.padding[1]) + ", "
op_str = op_str + str(node.module.padding[1]) + ", "
op_str = op_str + str(enum_to_int(ActiMode, ActiMode.AC_MODE_NONE)) + ", "
op_str = op_str + str(node.module.groups) + ", "
if node.module.bias != None:
op_str = op_str + "1\n"
else:
op_str = op_str + "0\n"
return op_str
def parse_pool2d(op_str, node, pool_type):
assert len(node.inedges) == 1, "wrong number of inputs"
#FIXME MaxPool2d supports ceil_mode
op_str = op_str + str(enum_to_int(OpType, OpType.POOL2D)) + ", "
op_str = op_str + str(node.module.kernel_size) + ", "
op_str = op_str + str(node.module.stride) + ", "
op_str = op_str + str(node.module.padding) + ", "
op_str = op_str + str(enum_to_int(PoolType, pool_type)) + ", "
op_str = op_str + str(enum_to_int(ActiMode, ActiMode.AC_MODE_NONE)) + "\n"
return op_str
def parse_batchnorm2d(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
# FIXME BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) args are not in FF
op_str = op_str + str(enum_to_int(OpType, OpType.BATCH_NORM)) + "\n"
return op_str
def parse_dropout(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.DROPOUT)) + ", "
op_str = op_str + str(node.module.p) + "\n"
return op_str
def parse_relu(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.RELU)) + "\n"
return op_str
def parse_sigmoid(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.SIGMOID)) + "\n"
return op_str
def parse_tanh(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.TANH)) + "\n"
return op_str
def parse_elu(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.ELU)) + "\n"
return op_str
def parse_softmax(op_str, node):
assert len(node.inedges) == 1, "wrong number of inputs"
op_str = op_str + str(enum_to_int(OpType, OpType.SOFTMAX)) + "\n"
return op_str
def torch_to_flexflow(model, filename):
graph = __symbolic_trace(model)
out_file = open(filename, "w")
for node in graph:
# op name
op_str = node.name + ", "
# op inedges
#input
if node.inedges == None:
pass
#others
else:
inedges = node.inedges[0]
# print(inedges, type(inedges))
if type(inedges) == list:
pass
elif type(inedges) == tuple:
pass
elif type(inedges) == torch.fx.immutable_collections.immutable_list:
pass
else:
inedges = [inedges]
for inedge in inedges:
op_str = op_str + inedge.name + ":"
op_str = op_str + ", "
#op type
if type(node) == InputNode:
op_str = parse_input(op_str, node)
if type(node) == OutputNode:
op_str = parse_output(op_str, node)
if type(node) == FunctionNode:
function_name = str(node.function)
if function_name.find('add') >= 0:
op_str = parse_add(op_str, node)
elif function_name.find('cat') >= 0:
op_str = parse_concat(op_str, node)
elif function_name.find('flatten') >= 0:
op_str = parse_flat(op_str, node)
elif function_name.find('relu') >= 0:
op_str = parse_relu(op_str, node)
else:
# Unrecogonized type
assert False, "Unrecogonized built-in function: {}".format(function_name)
if type(node) == ModuleNode:
assert len(node.inedges) == 1, "wrong format"
if type(node.module) == torch.nn.modules.linear.Linear:
op_str = parse_linear(op_str, node)
elif type(node.module) == torch.nn.modules.conv.Conv2d:
op_str = parse_conv2d(op_str, node)
elif type(node.module) == torch.nn.modules.pooling.MaxPool2d:
op_str = parse_pool2d(op_str, node, PoolType.POOL_MAX)
elif type(node.module) == torch.nn.modules.pooling.AvgPool2d:
op_str = parse_pool2d(op_str, node, PoolType.POOL_AVG)
elif type(node.module) == torch.nn.modules.batchnorm.BatchNorm2d:
op_str = parse_batchnorm2d(op_str, node)
elif type(node.module) == torch.nn.modules.dropout.Dropout:
op_str = parse_dropout(op_str, node)
elif type(node.module) == torch.nn.modules.flatten.Flatten:
op_str = parse_flat(op_str, node)
elif type(node.module) == torch.nn.modules.activation.ReLU:
op_str = parse_relu(op_str, node)
elif type(node.module) == torch.nn.modules.activation.Sigmoid:
op_str = parse_sigmoid(op_str, node)
elif type(node.module) == torch.nn.modules.activation.Tanh:
op_str = parse_tanh(op_str, node)
elif type(node.module) == torch.nn.modules.activation.ELU:
op_str = parse_elu(op_str, node)
elif type(node.module) == torch.nn.modules.activation.Softmax:
op_str = parse_softmax(op_str, node)
else:
print(node.module)
assert 0, "unknown op"
print(op_str)
out_file.write(op_str)
out_file.close()
|
the-stack_106_20564
|
"""Gravitational Search Algorithm.
"""
import numpy as np
import opytimizer.math.general as g
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.exception as e
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class GSA(Optimizer):
"""A GSA class, inherited from Optimizer.
This is the designed class to define GSA-related
variables and methods.
References:
E. Rashedi, H. Nezamabadi-Pour and S. Saryazdi. GSA: a gravitational search algorithm.
Information Sciences (2009).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> GSA.')
# Overrides its parent class with the receiving params
super(GSA, self).__init__()
# Initial gravity value
self.G = 2.467
# Builds the class
self.build(params)
logger.info('Class overrided.')
@property
def G(self):
"""float: Initial gravity.
"""
return self._G
@G.setter
def G(self, G):
if not isinstance(G, (float, int)):
raise e.TypeError('`G` should be a float or integer')
if G < 0:
raise e.ValueError('`G` should be >= 0')
self._G = G
@property
def velocity(self):
"""np.array: Array of velocities.
"""
return self._velocity
@velocity.setter
def velocity(self, velocity):
if not isinstance(velocity, np.ndarray):
raise e.TypeError('`velocity` should be a numpy array')
self._velocity = velocity
def compile(self, space):
"""Compiles additional information that is used by this optimizer.
Args:
space (Space): A Space object containing meta-information.
"""
# Arrays of velocities
self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
def _calculate_mass(self, agents):
"""Calculates agents' mass (eq. 16).
Args:
agents (list): List of agents.
Returns:
The agents' mass.
"""
# Gathers the best and worst agents
best, worst = agents[0].fit, agents[-1].fit
# Calculates agents' masses using equation 15
mass = [(agent.fit - worst) / (best - worst + c.EPSILON) for agent in agents]
# Normalizes agents' masses
norm_mass = mass / (np.sum(mass) + c.EPSILON)
return norm_mass
def _calculate_force(self, agents, mass, gravity):
"""Calculates agents' force (eq. 7-9).
Args:
agents (list): List of agents.
mass (np.array): An array of agents' mass.
gravity (float): Current gravity value.
Returns:
The attraction force between all agents.
"""
# Calculates the force
force = [[gravity * (mass[i] * mass[j]) / (g.euclidean_distance(agents[i].position, agents[j].position) + c.EPSILON)
* (agents[j].position - agents[i].position) for j in range(len(agents))] for i in range(len(agents))]
# Transforms the force into an array
force = np.asarray(force)
# Applies a stochastic trait to the force
force = np.sum(r.generate_uniform_random_number() * force, axis=1)
return force
def update(self, space, iteration):
"""Wraps Gravitational Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
iteration (int): Current iteration.
"""
# Sorts agents
space.agents.sort(key=lambda x: x.fit)
# Calculates the current gravity
gravity = self.G / (iteration + 1)
# Calculates agents' mass
mass = self._calculate_mass(space.agents)
# Calculates agents' attraction force
force = self._calculate_force(space.agents, mass, gravity)
# Iterates through all agents
for i, agent in enumerate(space.agents):
# Calculates the acceleration (eq. 10)
acceleration = force[i] / (mass[i] + c.EPSILON)
# Updates current agent velocity (eq. 11)
r1 = r.generate_uniform_random_number()
self.velocity[i] = r1 * self.velocity[i] + acceleration
# Updates current agent position (eq. 12)
agent.position += self.velocity[i]
|
the-stack_106_20565
|
"""Platform for the Daikin AC."""
import asyncio
from datetime import timedelta
import logging
from socket import timeout
import async_timeout
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOSTS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import Throttle
from . import config_flow # noqa pylint_disable=unused-import
from .const import KEY_HOST
REQUIREMENTS = ['pydaikin==1.1.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'daikin'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
COMPONENT_TYPES = ['climate', 'sensor']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(
CONF_HOSTS, default=[]
): vol.All(cv.ensure_list, [cv.string]),
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Establish connection with Daikin."""
if DOMAIN not in config:
return True
hosts = config[DOMAIN].get(CONF_HOSTS)
if not hosts:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={'source': SOURCE_IMPORT}))
for host in hosts:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data={
KEY_HOST: host,
}))
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Establish connection with Daikin."""
conf = entry.data
daikin_api = await daikin_api_setup(hass, conf[KEY_HOST])
if not daikin_api:
return False
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: daikin_api})
await asyncio.wait([
hass.config_entries.async_forward_entry_setup(entry, component)
for component in COMPONENT_TYPES
])
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await asyncio.wait([
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in COMPONENT_TYPES
])
hass.data[DOMAIN].pop(config_entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return True
async def daikin_api_setup(hass, host):
"""Create a Daikin instance only once."""
from pydaikin.appliance import Appliance
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with async_timeout.timeout(10):
device = Appliance(host, session)
await device.init()
except asyncio.TimeoutError:
_LOGGER.error("Connection to Daikin could not be established")
return None
except Exception: # pylint: disable=broad-except
_LOGGER.error("Unexpected error creating device")
return None
api = DaikinApi(device)
return api
class DaikinApi:
"""Keep the Daikin instance in one place and centralize the update."""
def __init__(self, device):
"""Initialize the Daikin Handle."""
self.device = device
self.name = device.values['name']
self.ip_address = device.ip
self._available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Pull the latest data from Daikin."""
try:
await self.device.update_status()
self._available = True
except timeout:
_LOGGER.warning(
"Connection failed for %s", self.ip_address
)
self._available = False
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def mac(self):
"""Return mac-address of device."""
return self.device.values.get(CONNECTION_NETWORK_MAC)
@property
def device_info(self):
"""Return a device description for device registry."""
info = self.device.values
return {
'connections': {(CONNECTION_NETWORK_MAC, self.mac)},
'identifieres': self.mac,
'manufacturer': 'Daikin',
'model': info.get('model'),
'name': info.get('name'),
'sw_version': info.get('ver').replace('_', '.'),
}
|
the-stack_106_20566
|
class IntcodeMemory(object):
def __init__(self, init_data=None):
self.data = {}
for address, value in enumerate(init_data):
self.data[address] = value
def __repr__(self):
return repr(self.data)
def __setitem__(self, address, value):
self.data[address] = value
def _load_memory_value(self, address):
if address not in self.data:
self.data[address] = 0
return self.data[address]
def __getitem__(self, key):
if isinstance(key, int):
return self._load_memory_value(key)
elif isinstance(key, slice):
indices = key.indices(len(self.data))
indice_range = range(*indices)
slice_result = [self._load_memory_value(x) for x in indice_range]
return slice_result
def __delitem__(self, index):
if index in self.data:
del self.data[index]
class IntcodeInstruction(object):
def __init__(self, method, param_count=0):
self.method = method
self.param_count = param_count
def __repr__(self):
return f"Instruction({self.method.__name__}, param_count={self.param_count})"
class IntcodeComputer(object):
def __init__(self, program_code=None, input_array=None, output_array=None):
self._running = False # Whether or not the program is running.
self._should_halt = False
self._head_jump = None
self.is_terminated = False # True, if opcode 99 was encountered.
self.original_code = None # Last program code loaded into the computer.
self.program = None # Currently loaded/running program.
self.input_array = input_array if input_array is not None else []
self.output_array = output_array if output_array is not None else []
self.head = 0 # Program execution position.
self.relative_base = 0
self.instructions = {
1: IntcodeInstruction(self.add, param_count=3),
2: IntcodeInstruction(self.multiply, param_count=3),
3: IntcodeInstruction(self.read, param_count=1),
4: IntcodeInstruction(self.write, param_count=1),
5: IntcodeInstruction(self.jump_if_true, param_count=2),
6: IntcodeInstruction(self.jump_if_false, param_count=2),
7: IntcodeInstruction(self.less_than, param_count=3),
8: IntcodeInstruction(self.equals, param_count=3),
9: IntcodeInstruction(self.adjust_relative_base, param_count=1),
99: IntcodeInstruction(self.terminate),
}
if program_code is not None:
self.load_program(program_code)
def load_program(self, program_code=None):
if program_code is not None:
self.original_code = program_code
if self.original_code is not None:
parsed_code = [int(num.strip()) for num in program_code.split(",")]
self.program = IntcodeMemory(parsed_code)
def get_parameters(self, *values, literals=[]):
parameters = []
for index, (value, mode) in enumerate(zip(values, self._param_modes)):
if index in literals:
if mode == 0 or mode == 1:
parameters.append(value)
else:
parameters.append(value + self.relative_base)
else:
if mode == 0: # position mode
parameters.append(self.program[value])
elif mode == 1: # immediate mode
parameters.append(value)
elif mode == 2: # relative mode
parameters.append(self.program[value + self.relative_base])
return tuple(parameters) if len(parameters) > 1 else parameters[0]
def write_value_to_program(self, index, value):
self.program[index] = value
# 1
def add(self, arg1, arg2, position):
value1, value2, value3 = self.get_parameters(arg1, arg2, position, literals=[2])
self.write_value_to_program(value3, value1 + value2)
# 2
def multiply(self, arg1, arg2, position):
value1, value2, value3 = self.get_parameters(arg1, arg2, position, literals=[2])
self.write_value_to_program(value3, value1 * value2)
# 3
def read(self, position):
if len(self.input_array) > 0:
value = self.input_array.pop(0)
param = self.get_parameters(position, literals=[0])
self.program[param] = value
self._should_halt = False
else:
self._should_halt = True
# 4
def write(self, position):
output = self.get_parameters(position)
self.output_array.append(output)
# 5
def jump_if_true(self, value, jump_position):
value, jump_position = self.get_parameters(value, jump_position)
if value != 0:
self._head_jump = jump_position
# 6
def jump_if_false(self, value, jump_position):
value, jump_position = self.get_parameters(value, jump_position)
if value == 0:
self._head_jump = jump_position
# 7
def less_than(self, arg1, arg2, position):
value1, value2, value3 = self.get_parameters(arg1, arg2, position, literals=[2])
self.program[value3] = 1 if value1 < value2 else 0
# 8
def equals(self, arg1, arg2, position):
value1, value2, value3 = self.get_parameters(arg1, arg2, position, literals=[2])
self.program[value3] = 1 if value1 == value2 else 0
# 9
def adjust_relative_base(self, arg1):
adjustment_value = self.get_parameters(arg1)
self.relative_base += adjustment_value
# 99
def terminate(self):
self._running = False
self.is_terminated = True
def _parse_instruction_head(self):
head = f"{self.program[self.head]:05}"
return int(head[-2:]), list(map(int, head[2::-1]))
def run(self):
self._running = True
self._should_halt = False
while self._running:
opcode, self._param_modes = self._parse_instruction_head()
instruction = self.instructions[opcode]
if instruction.param_count > 0:
param_start_index = self.head + 1
param_end_index = self.head + instruction.param_count + 1
method_args = self.program[param_start_index:param_end_index]
instruction.method(*method_args)
else:
instruction.method()
if self._should_halt:
self._running = False
else:
self.head = (
self.head + instruction.param_count + 1
if self._head_jump is None
else self._head_jump
)
self._head_jump = None
|
the-stack_106_20567
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
BLOCK_REWARD = 50
class WalletTest (NavCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
print(self.options.tmpdir)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,2,0)
self.is_network_split = False
def run_test(self):
self.nodes[0].staking(False)
self.nodes[1].staking(False)
self.nodes[2].staking(False)
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
slow_gen(self.nodes[0], 1)
self.sync_all()
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 59800000)
assert_equal(walletinfo['balance'], 0)
slow_gen(self.nodes[1], 56)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 59800000)
assert_equal(self.nodes[1].getbalance(), 2550)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 51)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 NAV from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
slow_gen(self.nodes[0], 1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 10 blocks (so node0 can recover the fee)
slow_gen(self.nodes[1], 10)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
slow_gen(self.nodes[1], 1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(str(self.nodes[2].getbalance()), "59800043.99980000")
assert_equal(str(self.nodes[2].getbalance("from1")), "59800022.99980000")
# Send 10 NAV normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10)
slow_gen(self.nodes[2], 1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('59800033.99980000'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 NAV with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", "", True)
slow_gen(self.nodes[2], 1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 NAV
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
slow_gen(self.nodes[2], 1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 NAV with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
slow_gen(self.nodes[2], 1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
self.nodes[3].staking(False)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
slow_gen(self.nodes[1], 1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_navcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,2,0)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
slow_gen(self.nodes[1], 1) #mine a block, tx should not be in there
self.sync_all()
# We need to adjust the balance since new block/s got confirmed
node_2_bal += BLOCK_REWARD
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
slow_gen(self.nodes[1], 1)
self.sync_all()
# We need to adjust the balance since new block/s got confirmed
# And we sent 2 NAV to it
node_2_bal += BLOCK_REWARD + 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
block_count = self.nodes[0].getblockcount()
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_navcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [['-staking=0'], ['-staking=0'], ['-staking=0']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,2,0)
slow_gen(self.nodes[0], 2)
self.sync_all()
block_count_new = self.nodes[0].getblockcount()
# We need to adjust the balance since new block/s got confirmed
# And we sent 2 NAV to it
node_2_bal += (block_count_new - block_count) * BLOCK_REWARD + 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
my_function_failed = False
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
my_function_failed = True
assert(my_function_failed)
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
slow_gen(self.nodes[0], 1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = slow_gen(self.nodes[0], 2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
# maintenance = [
# '-rescan',
# '-reindex',
# '-zapwallettxes=1',
# '-zapwallettxes=2',
# # disabled until issue is fixed: https://github.com/navcoin/navcoin/issues/7463
# # '-salvagewallet',
# ]
# for m in maintenance:
# print("check " + m)
# stop_nodes(self.nodes)
# wait_navcoinds()
# self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
# while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# # reindex will leave rpc warm up "early"; Wait for it to finish
# time.sleep(0.1)
# assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
#
# # Exercise listsinceblock with the last two blocks
# coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
# assert_equal(coinbase_tx_1["lastblock"], blocks[1])
# assert_equal(len(coinbase_tx_1["transactions"]), 1)
# assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
# assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
if __name__ == '__main__':
WalletTest().main()
|
the-stack_106_20568
|
'''
Write a Python program to find the occurrence and position of the substrings
within a string.
'''
import re
sub=input()
n=input()
for i in re.finditer(pattern=sub,string=n):
print(i.start(),n[i.start():i.end()])
|
the-stack_106_20570
|
# -*- coding: utf-8 -*-
from irc3 import dec
import venusian
import functools
def plugin(wrapped):
"""register a class as server plugin"""
setattr(wrapped, '__irc3_plugin__', False)
setattr(wrapped, '__irc3d_plugin__', True)
return wrapped
class event(dec.event):
"""same as :class:`~irc3.dec.event` but for servers"""
def __init__(self, regexp, *args, **kwargs):
kwargs.setdefault('venusian_category', 'irc3d.rfc1459')
regexp = getattr(regexp, 'server', None) or regexp
super(event, self).__init__(regexp, *args, **kwargs)
def extend(func):
"""same as :func:`~irc3.dec.extend` but for servers"""
def callback(context, name, ob):
obj = context.context
if info.scope == 'class':
@functools.wraps(func)
def f(self, *args, **kwargs):
plugin = obj.get_plugin(ob)
return getattr(plugin, func.__name__)(*args, **kwargs)
setattr(obj, func.__name__, f.__get__(obj, obj.__class__))
else:
setattr(obj, func.__name__, func.__get__(obj, obj.__class__))
info = venusian.attach(func, callback, category='irc3d.extend')
return func
|
the-stack_106_20572
|
#!/usr/bin/env python
# /export/covey1/CMIP5/Precipitation/DiurnalCycle/HistoricalRuns/compositeDiurnalStatisticsWrapped.py
# This modifiction of ./compositeDiurnalStatistics.py will have the PMP Parser "wrapped" around it,
# so that it can be executed with input parameters in the Unix command line, for example:
# ---> python compositeDiurnalStatisticsWrapped.py -t "sample_data_%(variable)_%(model).nc" -m 7
# These are the models with CMIP5 historical run output at 3h frequency, which this script is designed to process:
# 'ACCESS1-0', 'ACCESS1-3', 'bcc-csm1-1', 'bcc-csm1-1-m', 'BNU-ESM',
# 'CCSM4', 'CMCC-CM', 'CNRM-CM5', 'EC-EARTH',
# 'FGOALS-g2', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R',
# 'GISS-E2-H', 'inmcm4', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR',
# 'MIROC4h', 'MIROC5', 'MIROC-ESM', 'MIROC-ESM-CHEM'
from __future__ import print_function, division
import cdms2
import genutil
import MV2
import os
import glob
import cdtime
import cdp
import multiprocessing as mp
from pcmdi_metrics.diurnal.common import monthname_d, P, populateStringConstructor, INPUT
def main():
def compute(params):
fileName = params.fileName
month = params.args.month
monthname = params.monthname
varbname = params.varname
template = populateStringConstructor(args.filename_template, args)
template.variable = varbname
# Units on output (*may be converted below from the units of input*)
outunits = 'mm/d'
startime = 1.5 # GMT value for starting time-of-day
dataname = params.args.model
if dataname is None or dataname.find("*") != -1:
# model not passed or passed as *
reverted = template.reverse(os.path.basename(fileName))
print("REVERYING", reverted, dataname)
dataname = reverted["model"]
if dataname not in args.skip:
try:
print('Data source:', dataname)
print('Opening %s ...' % fileName)
f = cdms2.open(fileName)
# Composite-mean and composite-s.d diurnal cycle for month and year(s):
iYear = 0
for year in range(args.firstyear, args.lastyear + 1):
print('Year %s:' % year)
startTime = cdtime.comptime(year, month)
# Last possible second to get all tpoints
finishtime = startTime.add(
1, cdtime.Month).add(-1, cdtime.Minute)
print('Reading %s from %s for time interval %s to %s ...' % (varbname, fileName, startTime,
finishtime))
# Transient variable stores data for current year's month.
tvarb = f(varbname, time=(startTime, finishtime))
# *HARD-CODES conversion from kg/m2/sec to mm/day.
tvarb *= 86400
print('Shape:', tvarb.shape)
# The following tasks need to be done only once, extracting
# metadata from first-year file:
if year == args.firstyear:
tc = tvarb.getTime().asComponentTime()
print("DATA FROM:", tc[0], "to", tc[-1])
day1 = cdtime.comptime(tc[0].year, tc[0].month)
day1 = tc[0]
firstday = tvarb(
time=(
day1,
day1.add(
1.,
cdtime.Day),
"con"))
dimensions = firstday.shape
print(' Shape = ', dimensions)
# Number of time points in the selected month for one year
N = dimensions[0]
nlats = dimensions[1]
nlons = dimensions[2]
deltaH = 24. / N
dayspermo = tvarb.shape[0] // N
print(' %d timepoints per day, %d hr intervals between timepoints' % (N, deltaH))
comptime = firstday.getTime()
modellons = tvarb.getLongitude()
modellats = tvarb.getLatitude()
# Longitude values are needed later to compute Local Solar
# Times.
lons = modellons[:]
print(' Creating temporary storage and output fields ...')
# Sorts tvarb into separate GMTs for one year
tvslice = MV2.zeros((N, dayspermo, nlats, nlons))
# Concatenates tvslice over all years
concatenation = MV2.zeros(
(N, dayspermo * nYears, nlats, nlons))
LSTs = MV2.zeros((N, nlats, nlons))
for iGMT in range(N):
hour = iGMT * deltaH + startime
print(' Computing Local Standard Times for GMT %5.2f ...' % hour)
for j in range(nlats):
for k in range(nlons):
LSTs[iGMT, j, k] = (hour + lons[k] / 15) % 24
for iGMT in range(N):
hour = iGMT * deltaH + startime
print(' Choosing timepoints with GMT %5.2f ...' % hour)
print("days per mo :", dayspermo)
# Transient-variable slice: every Nth tpoint gets all of
# the current GMT's tpoints for current year:
tvslice[iGMT] = tvarb[iGMT::N]
concatenation[iGMT, iYear *
dayspermo: (iYear +
1) *
dayspermo] = tvslice[iGMT]
iYear += 1
f.close()
# For each GMT, take mean and standard deviation over all years for
# the chosen month:
avgvalues = MV2.zeros((N, nlats, nlons))
stdvalues = MV2.zeros((N, nlats, nlons))
for iGMT in range(N):
hour = iGMT * deltaH + startime
print('Computing mean and standard deviation over all GMT %5.2f timepoints ...' % hour)
# Assumes first dimension of input ("axis#0") is time
avgvalues[iGMT] = MV2.average(concatenation[iGMT], axis=0)
stdvalues[iGMT] = genutil.statistics.std(concatenation[iGMT])
avgvalues.id = 'diurnalmean'
stdvalues.id = 'diurnalstd'
LSTs.id = 'LST'
avgvalues.units = outunits
# Standard deviation has same units as mean (not so for
# higher-moment stats).
stdvalues.units = outunits
LSTs.units = 'hr'
LSTs.longname = 'Local Solar Time'
avgvalues.setAxis(0, comptime)
avgvalues.setAxis(1, modellats)
avgvalues.setAxis(2, modellons)
stdvalues.setAxis(0, comptime)
stdvalues.setAxis(1, modellats)
stdvalues.setAxis(2, modellons)
LSTs.setAxis(0, comptime)
LSTs.setAxis(1, modellats)
LSTs.setAxis(2, modellons)
avgoutfile = ('%s_%s_%s_%s-%s_diurnal_avg.nc') % (varbname,
dataname, monthname,
str(args.firstyear), str(args.lastyear))
stdoutfile = ('%s_%s_%s_%s-%s_diurnal_std.nc') % (varbname,
dataname, monthname, str(
args.firstyear),
str(args.lastyear))
LSToutfile = ('%s_%s_LocalSolarTimes.nc' % (varbname, dataname))
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
f = cdms2.open(
os.path.join(
args.results_dir,
avgoutfile),
'w')
g = cdms2.open(
os.path.join(
args.results_dir,
stdoutfile),
'w')
h = cdms2.open(
os.path.join(
args.results_dir,
LSToutfile),
'w')
f.write(avgvalues)
g.write(stdvalues)
h.write(LSTs)
f.close()
g.close()
h.close()
except Exception as err:
print("Failed for model %s with erro: %s" % (dataname, err))
print('done')
args = P.get_parameter()
month = args.month # noqa: F841
monthname = monthname_d[args.month] # noqa: F841
# -------------------------------------HARD-CODED INPUT (add to command line later?):
# These models have been processed already (or tried and found wanting,
# e.g. problematic time coordinates):
skipMe = args.skip # noqa: F841
# Choose only one ensemble member per model, with the following ensemble-member code (for definitions, see
# http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf):
# NOTE--These models do not supply 3hr data from the 'r1i1p1' ensemble member,
# but do supply it from other ensemble members:
# bcc-csm1-1 (3hr data is from r2i1p1)
# CCSM4 (3hr data is from r6i1p1)
# GFDL-CM3 (3hr data is from r2i1p1, r3i1p1, r4i1p1, r5i1p1)
# GISS-E2-H (3hr data is from r6i1p1, r6i1p3)
# GISS-E2-R (3hr data is from r6i1p2)
varbname = 'pr'
# Note that CMIP5 specifications designate (01:30, 04:30, 07:30, ..., 22:30) GMT for 3hr flux fields, but
# *WARNING* some GMT timepoints are actually (0, 3, 6,..., 21) in submitted CMIP5 data, despite character strings in
# file names (and time axis metadata) to the contrary. See CMIP5 documentation and errata! Overrides to
# correct these problems are given below:
# startGMT = '0:0:0.0' # Include 00Z as a possible starting time, to accomodate (0, 3, 6,..., 21)GMT in the input
# data.
# startime = -1.5 # Subtract 1.5h from (0, 3, 6,..., 21)GMT input data. This is needed for BNU-ESM, CCSM4 and
# CNRM-CM5.
# startime = -3.0 # Subtract 1.5h from (0, 3, 6,..., 21)GMT input
# data. This is needed for CMCC-CM.
# -------------------------------------
nYears = args.lastyear - args.firstyear + 1
template = populateStringConstructor(args.filename_template, args)
template.variable = varbname
print("TEMPLATE:", template())
fileList = glob.glob(os.path.join(args.modpath, template()))
print("FILES:", fileList)
params = [INPUT(args, name, template) for name in fileList]
print("PARAMS:", params)
cdp.cdp_run.multiprocess(compute, params, num_workers=args.num_workers)
# Good practice to place contents of script under this check
if __name__ == '__main__':
# Related to script being installed as executable
mp.freeze_support()
main()
|
the-stack_106_20573
|
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import json
import logging
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .activations import gelu_new, swish
from .configuration_openai import OpenAIGPTConfig
from .file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_callable,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutput, CausalLMOutput
from .modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
logger = logging.getLogger(__name__)
_CONFIG_FOR_DOC = "OpenAIGPTConfig"
_TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer"
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"openai-gpt",
# See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt
]
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
if ".ckpt" in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))
with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
names = json.load(names_handle)
with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
shapes = json.load(shapes_handle)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + "/params_{}.npy".format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "w":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu_new}
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
attn_outputs = self.attn(
x, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions,
)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class OpenAIGPTPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
load_tf_weights = load_tf_weights_in_openai_gpt
base_model_prefix = "transformer"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class OpenAIGPTDoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
Multiple choice classification loss.
lm_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
lm_loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
lm_logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
OPENAI_GPT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.OpenAIGPTTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.register_buffer("position_ids", torch.arange(config.n_positions))
self.init_weights()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrice from position and token embeddings
position_ids = self.position_ids[None, : input_shape[-1]]
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions)
hidden_states = outputs[0]
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
hidden_states = hidden_states.view(*output_shape)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions,
)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss,
logits=lm_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1]``.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Return:
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt', return_dict=True)
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!)
model.resize_token_embeddings(len(tokenizer))
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_logits = outputs.lm_logits
mc_logits = outputs.mc_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
lm_loss = None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
mc_loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return OpenAIGPTDoubleHeadsModelOutput(
lm_loss=lm_loss,
mc_loss=mc_loss,
lm_logits=lm_logits,
mc_logits=mc_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
|
the-stack_106_20575
|
"""Persistence details for Model Classes"""
from __future__ import unicode_literals # isort:skip
from future import standard_library # isort:skip
standard_library.install_aliases() # noqa: E402
from io import StringIO
import json
import os
from flask import current_app
from sqlalchemy import exc
from ..database import db
from ..date_tools import FHIR_datetime
from ..dict_tools import dict_match
from ..models.identifier import Identifier
from ..trace import trace
def require(obj, attr, serial_form):
"""Validation function to assure required attribute is defined"""
if attr not in serial_form:
raise ValueError(
"missing lookup_field {} in serial form of {}".format(
attr, obj))
class ModelPersistence(object):
"""Adapter class to handle persistence of model tables"""
VERSION = '0.2'
def __init__(
self, model_class, lookup_field='id', sequence_name=None,
target_dir=None):
"""Initialize adapter for given model class"""
self.model = model_class
self.lookup_field = lookup_field
self.sequence_name = sequence_name
self.target_dir = target_dir
def persistence_filename(self):
"""Returns the configured persistence file
Looks for a config variable named `PERSISTENCE_DIR`,
which should define a path relative to the `portal/config`
directory such as `eproms`.
:returns: full path to persistence file
"""
scope = self.model.__name__ if self.model else 'site_persistence_file'
# product level config file - use presence of env var or config setting
persistence_dir = current_app.config.get("PERSISTENCE_DIR")
filename = os.path.join(
os.path.dirname(__file__), persistence_dir, '{scope}.json'.format(
scope=scope))
if self.target_dir:
# Blindly attempt to use target dir if named
filename = os.path.join(
self.target_dir, '{scope}.json'.format(scope=scope))
elif not os.path.exists(filename):
raise ValueError(
'File not found: {} Check value of environment variable `PERSISTENCE_DIR` '
'Should be a relative path from portal root.'.format(filename))
return filename
@staticmethod
def _log(msg):
msg = msg.encode('utf-8')
current_app.logger.info(str(msg))
trace(str(msg))
def __header__(self, data):
data['resourceType'] = 'Bundle'
data['id'] = 'SitePersistence v{}'.format(self.VERSION)
data['meta'] = {'fhir_comments': [
"export of dynamic site data from host",
"{}".format(current_app.config.get('SERVER_NAME'))],
'lastUpdated': FHIR_datetime.now()}
data['type'] = 'document'
return data
def __read__(self):
self.filename = self.persistence_filename()
with open(self.filename, 'r') as f:
try:
data = json.load(f)
except ValueError as e:
msg = "Ill formed JSON in {}".format(self.filename)
self._log(msg)
raise ValueError(e, msg)
self.__verify_header__(data)
return data
def __iter__(self):
"""Iterate over objects in persistence file"""
data = self.__read__()
for o in data['entry']:
if not o.get('resourceType') == self.model.__name__:
# Hard code exception for resourceType: Patient being a User
if (o.get('resourceType') == 'Patient'
and self.model.__name__ == 'User'):
pass
else:
raise ValueError(
"Import {} error, Found unexpected '{}' resource".format(
self.model.__name__, o.get('resourceType')))
yield o
def __write__(self, data):
self.filename = self.persistence_filename()
if data:
with open(self.filename, 'w') as f:
f.write(json.dumps(data, indent=2, sort_keys=True,
separators=(',', ': ')))
self._log("Wrote site persistence to `{}`".format(self.filename))
def __verify_header__(self, data):
"""Make sure header conforms to what we're looking for"""
if data.get('resourceType') != 'Bundle':
raise ValueError("expected 'Bundle' resourceType not found")
if data.get('id') != 'SitePersistence v{}'.format(self.VERSION):
raise ValueError("unexpected SitePersistence version {}".format(
data.get('id')))
def export(self):
d = self.__header__({})
d['entry'] = self.serialize()
self.__write__(data=d)
def import_(self, keep_unmentioned):
objs_seen = []
for o in self:
result = self.update(o)
db.session.commit()
if hasattr(result, 'id'):
objs_seen.append(result.id)
index_field = 'id'
else:
objs_seen.append(getattr(result, self.lookup_field))
index_field = self.lookup_field
# Delete any not named
if not keep_unmentioned:
query = self.model.query.filter(
~getattr(self.model, index_field).in_(
objs_seen)) if objs_seen else []
for obj in query:
current_app.logger.info(
"Deleting {} not mentioned in "
"persistence file".format(obj))
if query:
query.delete(synchronize_session=False)
self.update_sequence()
trace("Import of {} complete".format(self.model.__name__))
@property
def query(self):
"""Return ready query to obtain objects for persistence"""
return self.model.query
def require_lookup_field(self, obj, serial_form):
"""Validate and return serial form of object"""
if isinstance(self.lookup_field, tuple):
for attr in self.lookup_field:
require(obj, attr, serial_form)
else:
require(obj, self.lookup_field, serial_form)
return serial_form
def serialize(self):
if hasattr(self.model, 'as_fhir'):
serialize = 'as_fhir'
else:
serialize = 'as_json'
results = []
if isinstance(self.lookup_field, tuple):
order_col = tuple(
self.model.__table__.c[field].asc() for field in
self.lookup_field)
for item in self.query.order_by(*order_col).all():
serial_form = self.require_lookup_field(
item, getattr(item, serialize)())
results.append(serial_form)
else:
order_col = (
self.model.__table__.c[self.lookup_field].asc()
if self.lookup_field != "identifier" else "id")
for item in self.query.order_by(order_col).all():
serial_form = self.require_lookup_field(
item, getattr(item, serialize)())
results.append(serial_form)
return results
def lookup_existing(self, new_obj, new_data):
match, field_description = None, None
if self.lookup_field == 'id':
field_description = str(new_obj.id)
match = (
self.model.query.get(new_obj.id)
if new_obj.id is not None else None)
elif self.lookup_field == 'identifier':
ids = new_data.get('identifier')
if len(ids) == 1:
id = Identifier.from_fhir(ids[0]).add_if_not_found()
field_description = str(id)
match = self.model.find_by_identifier(id) if id else None
elif len(ids) > 1:
raise ValueError(
"Multiple identifiers for {} "
"don't know which to match on".format(new_data))
elif isinstance(self.lookup_field, tuple):
# Composite key case
args = {k: new_data.get(k) for k in self.lookup_field}
field_description = str(args)
match = self.model.query.filter_by(**args).first()
else:
args = {self.lookup_field: new_data[self.lookup_field]}
field_description = getattr(new_obj, self.lookup_field)
match = self.model.query.filter_by(**args).first()
return match, field_description
def update(self, new_data):
if hasattr(self.model, 'from_fhir'):
from_method = self.model.from_fhir
update = 'update_from_fhir'
serialize = 'as_fhir'
else:
from_method = self.model.from_json
update = 'update_from_json'
serialize = 'as_json'
merged = None
# Generate an empty but complete serialized form of the object
# so that regardless of shorthand in the persistence file (say
# ignoring empty fields), a valid representation is available.
empty_instance = self.model()
complete_form = getattr(empty_instance, serialize)()
# Now overwrite any values present in persistence version
complete_form.update(new_data)
new_obj = from_method(complete_form)
existing, id_description = self.lookup_existing(
new_obj=new_obj, new_data=complete_form)
if existing:
details = StringIO()
serialized_existing = getattr(existing, serialize)()
# Several differences test false positive prior to the merge,
# so merge first to get an accurate delta
merged = getattr(existing, update)(complete_form)
if not dict_match(
getattr(merged, serialize)(),
serialized_existing, details):
self._log(
"{type} {id} collision on import. {details}".format(
type=self.model.__name__,
id=id_description,
details=details.getvalue()))
if hasattr(merged, 'invalidation_hook'):
merged.invalidation_hook()
else:
self._log("{type} {id} not found - importing".format(
type=self.model.__name__,
id=id_description))
db.session.add(new_obj)
if hasattr(new_obj, 'invalidation_hook'):
new_obj.invalidation_hook()
return merged or new_obj
def update_sequence(self):
""" Bump sequence numbers if necessary
As the import/update methods don't use the sequences, best
to manually set it to a value greater than the current max,
to avoid unique constraint violations in the future.
"""
if not self.sequence_name:
return
max_known = db.engine.execute(
"SELECT MAX(id) FROM {table}".format(
table=self.model.__tablename__)).fetchone()[0]
if max_known:
db.engine.execute(
"SELECT SETVAL('{}', {})".format(
self.sequence_name, max_known))
|
the-stack_106_20577
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for config.json and Prow configuration."""
import unittest
import collections
import json
import os
import re
import config_sort
import env_gc
import yaml
# pylint: disable=too-many-public-methods, too-many-branches, too-many-locals, too-many-statements
class JobTest(unittest.TestCase):
excludes = [
'BUILD', # For bazel
'config.json', # For --json mode
'validOwners.json', # Contains a list of current sigs; sigs are allowed to own jobs
'config_sort.py', # Tool script to sort config.json
'config_test.py', # Script for testing config.json and Prow config.
'env_gc.py', # Tool script to garbage collect unused .env files.
'move_extract.py',
]
# also exclude .pyc
excludes.extend(e + 'c' for e in excludes if e.endswith('.py'))
yaml_suffix = {
'jenkins/job-configs/bootstrap-maintenance.yaml' : 'suffix',
'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-pull-json.yaml' : 'jsonsuffix',
'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-security-pull.yaml' : 'suffix',
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci.yaml' : 'suffix',
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-commit.yaml' : 'commit-suffix',
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-repo.yaml' : 'repo-suffix',
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-soak.yaml' : 'soak-suffix',
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-dockerpush.yaml' : 'dockerpush-suffix'
}
prow_config = '../prow/config.yaml'
realjobs = {}
prowjobs = []
@property
def jobs(self):
"""[(job, job_path)] sequence"""
for path, _, filenames in os.walk(config_sort.test_infra('jobs')):
for job in [f for f in filenames if f not in self.excludes]:
job_path = os.path.join(path, job)
yield job, job_path
def test_config_is_sorted(self):
"""Test jobs/config.json, prow/config.yaml and boskos/resources.json are sorted."""
with open(config_sort.test_infra('jobs/config.json')) as fp:
original = fp.read()
expect = config_sort.sorted_job_config().getvalue()
if original != expect:
self.fail('jobs/config.json is not sorted, please run '
'`bazel run //jobs:config_sort`')
with open(config_sort.test_infra('prow/config.yaml')) as fp:
original = fp.read()
expect = config_sort.sorted_prow_config().getvalue()
if original != expect:
self.fail('prow/config.yaml is not sorted, please run '
'`bazel run //jobs:config_sort`')
with open(config_sort.test_infra('boskos/resources.json')) as fp:
original = fp.read()
expect = config_sort.sorted_boskos_config().getvalue()
if original != expect:
self.fail('boskos/resources.json is not sorted, please run '
'`bazel run //jobs:config_sort`')
def test_orphaned_env(self):
orphans = env_gc.find_orphans()
if orphans:
self.fail('the following .env files are not referenced ' +
'in config.json, please run `bazel run //jobs:env_gc: ' +
' '.join(orphans))
def test_bootstrap_maintenance_yaml(self):
def check(job, name):
job_name = 'maintenance-%s' % name
self.assertIn('frequency', job)
self.assertIn('repo-name', job)
self.assertIn('.', job['repo-name']) # Has domain
self.assertGreater(job['timeout'], 0)
return job_name
self.check_bootstrap_yaml('jenkins/job-configs/bootstrap-maintenance.yaml', check)
def test_bootstrap_pull_json_yaml(self):
def check(job, name):
job_name = 'pull-%s' % name
self.assertIn('max-total', job)
self.assertIn('repo-name', job)
self.assertIn('.', job['repo-name']) # Has domain
self.assertIn('timeout', job)
self.assertNotIn('json', job)
self.assertGreater(job['timeout'], 0)
return job_name
self.check_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-pull-json.yaml', check)
def test_bootstrap_security_pull(self):
def check(job, name):
job_name = 'pull-%s' % name
self.assertIn('max-total', job)
self.assertIn('repo-name', job)
self.assertIn('.', job['repo-name']) # Has domain
self.assertIn('timeout', job)
self.assertNotIn('json', job)
self.assertGreater(job['timeout'], 0)
return job_name
self.check_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-security-pull.yaml', check)
def test_bootstrap_security_match(self):
json_jobs = self.load_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-pull-json.yaml')
sec_jobs = self.load_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-security-pull.yaml')
for name, job in sec_jobs.iteritems():
self.assertIn(name, json_jobs)
job2 = json_jobs[name]
for attr in job:
if attr == 'repo-name':
continue
self.assertEquals(job[attr], job2[attr])
def test_bootstrap_ci_yaml(self):
def check(job, name):
job_name = 'ci-%s' % name
self.assertIn('frequency', job)
self.assertIn('trigger-job', job)
self.assertNotIn('branch', job)
self.assertNotIn('json', job)
self.assertGreater(job['timeout'], 0, job_name)
self.assertGreaterEqual(job['jenkins-timeout'], job['timeout']+100, job_name)
return job_name
self.check_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci.yaml',
check)
def test_bootstrap_ci_commit_yaml(self):
def check(job, name):
job_name = 'ci-%s' % name
self.assertIn('branch', job)
self.assertIn('commit-frequency', job)
self.assertIn('giturl', job)
self.assertIn('repo-name', job)
self.assertIn('timeout', job)
self.assertNotIn('use-logexporter', job)
self.assertGreater(job['timeout'], 0, job)
return job_name
self.check_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-commit.yaml',
check)
def test_bootstrap_ci_repo_yaml(self):
def check(job, name):
job_name = 'ci-%s' % name
self.assertIn('branch', job)
self.assertIn('frequency', job)
self.assertIn('repo-name', job)
self.assertIn('timeout', job)
self.assertNotIn('json', job)
self.assertNotIn('use-logexporter', job)
self.assertGreater(job['timeout'], 0, name)
return job_name
self.check_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-repo.yaml',
check)
def test_bootstrap_ci_soak_yaml(self):
def check(job, name):
job_name = 'ci-%s' % name
self.assertIn('blocker', job)
self.assertIn('frequency', job)
self.assertIn('scan', job)
self.assertNotIn('repo-name', job)
self.assertNotIn('branch', job)
self.assertIn('timeout', job)
self.assertIn('soak-repos', job)
self.assertNotIn('use-logexporter', job)
self.assertGreater(job['timeout'], 0, name)
return job_name
self.check_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-soak.yaml',
check)
def test_bootstrap_ci_dockerpush(self):
def check(job, name):
job_name = 'ci-%s' % name
self.assertIn('branch', job)
self.assertIn('frequency', job)
self.assertIn('repo-name', job)
self.assertIn('timeout', job)
self.assertNotIn('use-logexporter', job)
self.assertGreater(job['timeout'], 0, name)
return job_name
self.check_bootstrap_yaml(
'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-dockerpush.yaml',
check)
def check_job_template(self, tmpl):
builders = tmpl.get('builders')
if not isinstance(builders, list):
self.fail(tmpl)
self.assertEquals(1, len(builders), builders)
shell = builders[0]
if not isinstance(shell, dict):
self.fail(tmpl)
self.assertEquals(1, len(shell), tmpl)
if 'raw' in shell:
self.assertEquals('maintenance-all-{suffix}', tmpl['name'])
return
cmd = shell.get('shell')
if not isinstance(cmd, basestring):
self.fail(tmpl)
self.assertIn('--service-account=', cmd)
self.assertIn('--upload=', cmd)
if 'kubernetes-security' in cmd:
self.assertIn('--upload=\'gs://kubernetes-security-jenkins/pr-logs\'', cmd)
elif '${{PULL_REFS}}' in cmd:
self.assertIn('--upload=\'gs://kubernetes-jenkins/pr-logs\'', cmd)
else:
self.assertIn('--upload=\'gs://kubernetes-jenkins/logs\'', cmd)
def add_prow_job(self, job):
name = job.get('name')
real_job = {}
real_job['name'] = name
if 'spec' in job:
spec = job.get('spec')
for container in spec.get('containers'):
if 'args' in container:
for arg in container.get('args'):
match = re.match(r'--timeout=(\d+)', arg)
if match:
real_job['timeout'] = match.group(1)
if 'pull-' not in name and name in self.realjobs and name not in self.prowjobs:
self.fail('CI job %s exist in both Jenkins and Prow congfig!' % name)
if name not in self.realjobs:
self.realjobs[name] = real_job
self.prowjobs.append(name)
if 'run_after_success' in job:
for sub in job.get('run_after_success'):
self.add_prow_job(sub)
def load_prow_yaml(self, path):
with open(os.path.join(
os.path.dirname(__file__), path)) as fp:
doc = yaml.safe_load(fp)
if 'periodics' not in doc:
self.fail('No periodics in prow config!')
if 'presubmits' not in doc:
self.fail('No presubmits in prow config!')
for item in doc.get('periodics'):
self.add_prow_job(item)
if 'postsubmits' not in doc:
self.fail('No postsubmits in prow config!')
presubmits = doc.get('presubmits')
postsubmits = doc.get('postsubmits')
for _repo, joblist in presubmits.items() + postsubmits.items():
for job in joblist:
self.add_prow_job(job)
def load_bootstrap_yaml(self, path):
with open(config_sort.test_infra(path)) as fp:
doc = yaml.safe_load(fp)
project = None
defined_templates = set()
for item in doc:
if not isinstance(item, dict):
continue
if isinstance(item.get('job-template'), dict):
defined_templates.add(item['job-template']['name'])
self.check_job_template(item['job-template'])
if not isinstance(item.get('project'), dict):
continue
project = item['project']
self.assertIn('bootstrap-', project.get('name'))
break
else:
self.fail('Could not find bootstrap-pull-jobs project')
self.assertIn('jobs', project)
used_templates = {j for j in project['jobs']}
msg = '\nMissing templates: %s\nUnused templates: %s' % (
','.join(used_templates - defined_templates),
','.join(defined_templates - used_templates))
self.assertEquals(defined_templates, used_templates, msg)
self.assertIn(path, self.yaml_suffix)
jobs = project.get(self.yaml_suffix[path])
if not jobs or not isinstance(jobs, list):
self.fail('Could not find suffix list in %s' % (project))
real_jobs = {}
for job in jobs:
# Things to check on all bootstrap jobs
if not isinstance(job, dict):
self.fail('suffix items should be dicts: %s' % jobs)
self.assertEquals(1, len(job), job)
name = job.keys()[0]
real_job = job[name]
self.assertNotIn(name, real_jobs, 'duplicate job: %s' % name)
real_jobs[name] = real_job
real_name = real_job.get('job-name', 'unset-%s' % name)
if real_name not in self.realjobs:
self.realjobs[real_name] = real_job
return real_jobs
def check_bootstrap_yaml(self, path, check):
for name, real_job in self.load_bootstrap_yaml(path).iteritems():
# Things to check on all bootstrap jobs
for key, value in real_job.items():
if not isinstance(value, (basestring, int)):
self.fail('Jobs may not contain child objects %s: %s' % (
key, value))
if '{' in str(value):
self.fail('Jobs may not contain {expansions} - %s: %s' % (
key, value)) # Use simple strings
# Things to check on specific flavors.
job_name = check(real_job, name)
self.assertTrue(job_name)
self.assertEquals(job_name, real_job.get('job-name'))
def get_real_bootstrap_job(self, job):
key = os.path.splitext(job.strip())[0]
if not key in self.realjobs:
for yamlf in self.yaml_suffix:
self.load_bootstrap_yaml(yamlf)
self.load_prow_yaml(self.prow_config)
self.assertIn(key, sorted(self.realjobs)) # sorted for clearer error message
return self.realjobs.get(key)
def test_valid_timeout(self):
"""All jobs set a timeout less than 120m or set DOCKER_TIMEOUT."""
default_timeout = 60
bad_jobs = set()
with open(config_sort.test_infra('jobs/config.json')) as fp:
config = json.loads(fp.read())
for job, job_path in self.jobs:
job_name = job.rsplit('.', 1)[0]
modern = config.get(job_name, {}).get('scenario') in [
'kubernetes_e2e',
'kubernetes_kops_aws',
]
valids = [
'kubernetes-e2e-',
'kubernetes-kubemark-',
'kubernetes-soak-',
'kubernetes-federation-e2e-',
'kops-e2e-',
]
if not re.search('|'.join(valids), job):
continue
with open(job_path) as fp:
lines = list(l for l in fp if not l.startswith('#'))
container_timeout = default_timeout
kubetest_timeout = None
for line in lines: # Validate old pattern no longer used
if line.startswith('### Reporting'):
bad_jobs.add(job)
if '{rc}' in line:
bad_jobs.add(job)
self.assertFalse(job.endswith('.sh'), job)
self.assertTrue(modern, job)
realjob = self.get_real_bootstrap_job(job)
self.assertTrue(realjob)
self.assertIn('timeout', realjob, job)
container_timeout = int(realjob['timeout'])
for line in lines:
if 'DOCKER_TIMEOUT=' in line:
self.fail('Set container timeout in prow and/or bootstrap yaml: %s' % job)
if 'KUBEKINS_TIMEOUT=' in line:
self.fail(
'Set kubetest --timeout in config.json, not KUBEKINS_TIMEOUT: %s'
% job
)
for arg in config[job_name]['args']:
if arg == '--timeout=None':
bad_jobs.add(('Must specify a timeout', job, arg))
mat = re.match(r'--timeout=(\d+)m', arg)
if not mat:
continue
kubetest_timeout = int(mat.group(1))
if kubetest_timeout is None:
self.fail('Missing timeout: %s' % job)
if kubetest_timeout > container_timeout:
bad_jobs.add((job, kubetest_timeout, container_timeout))
elif kubetest_timeout + 20 > container_timeout:
bad_jobs.add((
'insufficient kubetest leeway',
job, kubetest_timeout, container_timeout
))
if bad_jobs:
self.fail('\n'.join(str(s) for s in bad_jobs))
def test_valid_job_config_json(self):
"""Validate jobs/config.json."""
self.load_prow_yaml(self.prow_config)
config = config_sort.test_infra('jobs/config.json')
owners = config_sort.test_infra('jobs/validOwners.json')
with open(config) as fp, open(owners) as ownfp:
config = json.loads(fp.read())
valid_owners = json.loads(ownfp.read())
for job in config:
# onwership assertions
self.assertIn('sigOwners', config[job], job)
self.assertIsInstance(config[job]['sigOwners'], list, job)
self.assertTrue(config[job]['sigOwners'], job) # non-empty
owners = config[job]['sigOwners']
for owner in owners:
self.assertIsInstance(owner, basestring, job)
self.assertIn(owner, valid_owners, job)
# env assertions
self.assertTrue('scenario' in config[job], job)
scenario = config_sort.test_infra('scenarios/%s.py' % config[job]['scenario'])
self.assertTrue(os.path.isfile(scenario), job)
self.assertTrue(os.access(scenario, os.X_OK|os.R_OK), job)
args = config[job].get('args', [])
use_shared_build_in_args = False
extract_in_args = False
build_in_args = False
for arg in args:
if arg.startswith('--use-shared-build'):
use_shared_build_in_args = True
elif arg.startswith('--build'):
build_in_args = True
elif arg.startswith('--extract'):
extract_in_args = True
match = re.match(r'--env-file=([^\"]+)\.env', arg)
if match:
path = config_sort.test_infra('%s.env' % match.group(1))
self.assertTrue(
os.path.isfile(path),
'%s does not exist for %s' % (path, job))
elif 'kops' not in job:
match = re.match(r'--cluster=([^\"]+)', arg)
if match:
cluster = match.group(1)
self.assertLessEqual(
len(cluster), 20,
'Job %r, --cluster should be 20 chars or fewer' % job
)
# these args should not be combined:
# --use-shared-build and (--build or --extract)
self.assertFalse(use_shared_build_in_args and build_in_args)
self.assertFalse(use_shared_build_in_args and extract_in_args)
if config[job]['scenario'] == 'kubernetes_e2e':
if job in self.prowjobs:
for arg in args:
# --mode=local is default now
self.assertNotIn('--mode', args, job)
else:
self.assertIn('--mode=docker', args, job)
for arg in args:
if "--env=" in arg:
self._check_env(job, arg.split("=", 1)[1])
if '--provider=gke' in args:
self.assertTrue('--deployment=gke' in args,
'%s must use --deployment=gke' % job)
self.assertFalse(any('--gcp-master-image' in a for a in args),
'%s cannot use --gcp-master-image on GKE' % job)
self.assertFalse(any('--gcp-nodes' in a for a in args),
'%s cannot use --gcp-nodes on GKE' % job)
if '--deployment=gke' in args:
self.assertTrue(any('--gcp-node-image' in a for a in args), job)
self.assertNotIn('--charts-tests', args) # Use --charts
if any('--check_version_skew' in a for a in args):
self.fail('Use --check-version-skew, not --check_version_skew in %s' % job)
if '--check-leaked-resources=true' in args:
self.fail('Use --check-leaked-resources (no value) in %s' % job)
if '--check-leaked-resources==false' in args:
self.fail(
'Remove --check-leaked-resources=false (default value) from %s' % job)
if (
'--env-file=jobs/pull-kubernetes-e2e.env' in args
and '--check-leaked-resources' in args):
self.fail('PR job %s should not check for resource leaks' % job)
# Consider deleting any job with --check-leaked-resources=false
if (
'--provider=gce' not in args
and '--provider=gke' not in args
and '--check-leaked-resources' in args
and 'generated' not in config[job].get('tags', [])):
self.fail('Only GCP jobs can --check-leaked-resources, not %s' % job)
if '--mode=local' in args:
self.fail('--mode=local is default now, drop that for %s' % job)
extracts = [a for a in args if '--extract=' in a]
shared_builds = [a for a in args if '--use-shared-build' in a]
node_e2e = [a for a in args if '--deployment=node' in a]
if shared_builds and extracts:
self.fail(('e2e jobs cannot have --use-shared-build'
' and --extract: %s %s') % (job, args))
elif not extracts and not shared_builds and not node_e2e:
self.fail(('e2e job needs --extract or'
' --use-shared-build: %s %s') % (job, args))
if shared_builds or node_e2e:
expected = 0
elif any(s in job for s in [
'upgrade', 'skew', 'downgrade', 'rollback',
'ci-kubernetes-e2e-gce-canary',
]):
expected = 2
else:
expected = 1
if len(extracts) != expected:
self.fail('Wrong number of --extract args (%d != %d) in %s' % (
len(extracts), expected, job))
has_image_family = any(
[x for x in args if x.startswith('--image-family')])
has_image_project = any(
[x for x in args if x.startswith('--image-project')])
docker_mode = any(
[x for x in args if x.startswith('--mode=docker')])
if (
(has_image_family or has_image_project)
and docker_mode):
self.fail('--image-family / --image-project is not '
'supported in docker mode: %s' % job)
if has_image_family != has_image_project:
self.fail('--image-family and --image-project must be'
'both set or unset: %s' % job)
if job.startswith('pull-kubernetes-'):
self.assertIn('--cluster=', args)
if 'gke' in job:
stage = 'gs://kubernetes-release-dev/ci'
suffix = True
elif 'kubeadm' in job:
# kubeadm-based jobs use out-of-band .deb artifacts,
# not the --stage flag.
continue
else:
stage = 'gs://kubernetes-release-pull/ci/%s' % job
suffix = False
if not shared_builds:
self.assertIn('--stage=%s' % stage, args)
self.assertEquals(
suffix,
any('--stage-suffix=' in a for a in args),
('--stage-suffix=', suffix, job, args))
def test_valid_env(self):
for job, job_path in self.jobs:
with open(job_path) as fp:
data = fp.read()
if 'kops' in job: # TODO(fejta): update this one too
continue
self.assertNotIn(
'JENKINS_USE_LOCAL_BINARIES=',
data,
'Send --extract=local to config.json, not JENKINS_USE_LOCAL_BINARIES in %s' % job)
self.assertNotIn(
'JENKINS_USE_EXISTING_BINARIES=',
data,
'Send --extract=local to config.json, not JENKINS_USE_EXISTING_BINARIES in %s' % job) # pylint: disable=line-too-long
def test_only_jobs(self):
"""Ensure that everything in jobs/ is a valid job name and script."""
for job, job_path in self.jobs:
# Jobs should have simple names: letters, numbers, -, .
self.assertTrue(re.match(r'[.0-9a-z-_]+.(sh|env)', job), job)
# Jobs should point to a real, executable file
# Note: it is easy to forget to chmod +x
self.assertTrue(os.path.isfile(job_path), job_path)
self.assertFalse(os.path.islink(job_path), job_path)
if job.endswith('.sh'):
self.assertTrue(os.access(job_path, os.X_OK|os.R_OK), job_path)
else:
self.assertTrue(os.access(job_path, os.R_OK), job_path)
def test_all_project_are_unique(self):
# pylint: disable=line-too-long
allowed_list = {
# The cos image validation jobs intentionally share projects.
'ci-kubernetes-e2e-gce-cosdev-k8sdev-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sdev-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sdev-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sstable1-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sstable1-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sstable1-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sbeta-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sbeta-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosdev-k8sbeta-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sdev-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sdev-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sdev-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sbeta-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sbeta-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sbeta-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable1-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable1-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable1-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable2-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable2-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable2-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable3-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable3-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosbeta-k8sstable3-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sdev-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sdev-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sdev-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sbeta-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sbeta-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sbeta-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable1-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable1-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable1-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable2-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable2-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable2-slow.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable3-default.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable3-serial.env': 'ci-kubernetes-e2e-gce-cos*',
'ci-kubernetes-e2e-gce-cosstable1-k8sstable3-slow.env': 'ci-kubernetes-e2e-gce-cos*',
# The ubuntu image validation jobs intentionally share projects.
'ci-kubernetes-e2e-gce-ubuntudev-k8sdev-default.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sdev-serial.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sdev-slow.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sbeta-default.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sbeta-serial.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sbeta-slow.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sstable1-default.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sstable1-serial.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntudev-k8sstable1-slow.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sdev-default.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sdev-serial.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sdev-slow.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable1-default.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable1-serial.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable1-slow.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable2-default.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable2-serial.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable2-slow.env': 'ci-kubernetes-e2e-gce-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-alphafeatures.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-autoscaling.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-default.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-flaky.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-ingress.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-reboot.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-serial.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-slow.env': 'ci-kubernetes-e2e-gke-ubuntu*',
'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-updown.env': 'ci-kubernetes-e2e-gke-ubuntu*',
# The 1.5 and 1.6 scalability jobs intentionally share projects.
'ci-kubernetes-e2e-gce-scalability-release-1-7.env': 'ci-kubernetes-e2e-gce-scalability-release-*',
'ci-kubernetes-e2e-gce-scalability-release-1-6.env': 'ci-kubernetes-e2e-gce-scalability-release-*',
'ci-kubernetes-e2e-gci-gce-scalability-release-1-7.env': 'ci-kubernetes-e2e-gci-gce-scalability-release-*',
'ci-kubernetes-e2e-gci-gce-scalability-release-1-6.env': 'ci-kubernetes-e2e-gci-gce-scalability-release-*',
'ci-kubernetes-e2e-gce-scalability.env': 'ci-kubernetes-e2e-gce-scalability-*',
'ci-kubernetes-e2e-gce-scalability-canary.env': 'ci-kubernetes-e2e-gce-scalability-*',
# TODO(fejta): remove these (found while migrating jobs)
'ci-kubernetes-kubemark-100-gce.env': 'ci-kubernetes-kubemark-*',
'ci-kubernetes-kubemark-5-gce.env': 'ci-kubernetes-kubemark-*',
'ci-kubernetes-kubemark-high-density-100-gce.env': 'ci-kubernetes-kubemark-*',
'ci-kubernetes-kubemark-gce-scale.env': 'ci-kubernetes-scale-*',
'pull-kubernetes-kubemark-e2e-gce-big.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gce-large-manual-up.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gce-large-manual-down.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gce-large-correctness.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gce-large-performance.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gce-scale-correctness.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gce-scale-performance.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gke-large-correctness.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gke-large-performance.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gke-large-deploy.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gke-large-teardown.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-e2e-gke-scale-correctness.env': 'ci-kubernetes-scale-*',
'ci-kubernetes-federation-build.sh': 'ci-kubernetes-federation-*',
'ci-kubernetes-e2e-gce-federation.env': 'ci-kubernetes-federation-*',
'pull-kubernetes-federation-e2e-gce.env': 'pull-kubernetes-federation-e2e-gce-*',
'ci-kubernetes-pull-gce-federation-deploy.env': 'pull-kubernetes-federation-e2e-gce-*',
'pull-kubernetes-federation-e2e-gce-canary.env': 'pull-kubernetes-federation-e2e-gce-*',
'ci-kubernetes-pull-gce-federation-deploy-canary.env': 'pull-kubernetes-federation-e2e-gce-*',
'pull-kubernetes-e2e-gce.env': 'pull-kubernetes-e2e-gce-*',
'pull-kubernetes-e2e-gce-canary.env': 'pull-kubernetes-e2e-gce-*',
'ci-kubernetes-e2e-gce.env': 'ci-kubernetes-e2e-gce-*',
'ci-kubernetes-e2e-gce-canary.env': 'ci-kubernetes-e2e-gce-*',
}
for soak_prefix in [
'ci-kubernetes-soak-gce-1.5',
'ci-kubernetes-soak-gce-1-7',
'ci-kubernetes-soak-gce-1.4',
'ci-kubernetes-soak-gce-1.6',
'ci-kubernetes-soak-gce-2',
'ci-kubernetes-soak-gce',
'ci-kubernetes-soak-gci-gce-1.5',
'ci-kubernetes-soak-gce-gci',
'ci-kubernetes-soak-gke-gci',
'ci-kubernetes-soak-gce-federation',
'ci-kubernetes-soak-gci-gce-1.4',
'ci-kubernetes-soak-gci-gce-1.6',
'ci-kubernetes-soak-gci-gce-1-7',
'ci-kubernetes-soak-cos-docker-validation',
'ci-kubernetes-soak-gke',
]:
allowed_list['%s-deploy.env' % soak_prefix] = '%s-*' % soak_prefix
allowed_list['%s-test.env' % soak_prefix] = '%s-*' % soak_prefix
# pylint: enable=line-too-long
projects = collections.defaultdict(set)
boskos = []
with open(config_sort.test_infra('boskos/resources.json')) as fp:
for rtype in json.loads(fp.read()):
if rtype['type'] == 'gce-project' or rtype['type'] == 'gke-project':
for name in rtype['names']:
boskos.append(name)
with open(config_sort.test_infra('jobs/config.json')) as fp:
job_config = json.load(fp)
for job, job_path in self.jobs:
with open(job_path) as fp:
lines = list(fp)
project = ''
for line in lines:
line = line.strip()
if not line.startswith('PROJECT='):
continue
if '-soak-' in job: # Soak jobs have deploy/test pairs
job = job.replace('-test', '-*').replace('-deploy', '-*')
if job.startswith('ci-kubernetes-node-'):
job = 'ci-kubernetes-node-*'
if not line.startswith('#') and job.endswith('.sh'):
self.assertIn('export', line, line)
if job.endswith('.sh'):
project = re.search(r'PROJECT="([^"]+)"', line).group(1)
else:
project = re.search(r'PROJECT=([^"]+)', line).group(1)
if project in boskos:
self.fail('Project %s cannot be in boskos/resources.json!' % project)
cfg = job_config.get(job.rsplit('.', 1)[0], {})
if not project and cfg.get('scenario') == 'kubernetes_e2e':
for arg in cfg.get('args', []):
if not arg.startswith('--gcp-project='):
continue
project = arg.split('=', 1)[1]
if project:
projects[project].add(allowed_list.get(job, job))
duplicates = [(p, j) for p, j in projects.items() if len(j) > 1]
if duplicates:
self.fail('Jobs duplicate projects:\n %s' % (
'\n '.join('%s: %s' % t for t in duplicates)))
def test_jobs_do_not_source_shell(self):
for job, job_path in self.jobs:
if job.startswith('pull-'):
continue # No clean way to determine version
with open(job_path) as fp:
script = fp.read()
self.assertFalse(re.search(r'\Wsource ', script), job)
self.assertNotIn('\n. ', script, job)
def test_all_bash_jobs_have_errexit(self):
options = {
'errexit',
'nounset',
'pipefail',
}
for job, job_path in self.jobs:
if not job.endswith('.sh'):
continue
with open(job_path) as fp:
lines = list(fp)
for option in options:
expected = 'set -o %s\n' % option
self.assertIn(
expected, lines,
'%s not found in %s' % (expected, job_path))
def _check_env(self, job, setting):
if not re.match(r'[0-9A-Z_]+=[^\n]*', setting):
self.fail('[%r]: Env %r: need to follow FOO=BAR pattern' % (job, setting))
if '#' in setting:
self.fail('[%r]: Env %r: No inline comments' % (job, setting))
if '"' in setting or '\'' in setting:
self.fail('[%r]: Env %r: No quote in env' % (job, setting))
if '$' in setting:
self.fail('[%r]: Env %r: Please resolve variables in env' % (job, setting))
if '{' in setting or '}' in setting:
self.fail('[%r]: Env %r: { and } are not allowed in env' % (job, setting))
# also test for https://github.com/kubernetes/test-infra/issues/2829
# TODO(fejta): sort this list
black = [
('CHARTS_TEST=', '--charts-tests'),
('CLUSTER_IP_RANGE=', '--test_args=--cluster-ip-range=FOO'),
('CLOUDSDK_BUCKET=', '--gcp-cloud-sdk=gs://foo'),
('CLUSTER_NAME=', '--cluster=FOO'),
('E2E_CLEAN_START=', '--test_args=--clean-start=true'),
('E2E_DOWN=', '--down=true|false'),
('E2E_MIN_STARTUP_PODS=', '--test_args=--minStartupPods=FOO'),
('E2E_NAME=', '--cluster=whatever'),
('E2E_PUBLISH_PATH=', '--publish=gs://FOO'),
('E2E_REPORT_DIR=', '--test_args=--report-dir=FOO'),
('E2E_REPORT_PREFIX=', '--test_args=--report-prefix=FOO'),
('E2E_TEST=', '--test=true|false'),
('E2E_UPGRADE_TEST=', '--upgrade_args=FOO'),
('E2E_UP=', '--up=true|false'),
('E2E_OPT=', 'Send kubetest the flags directly'),
('FAIL_ON_GCP_RESOURCE_LEAK=', '--check-leaked-resources=true|false'),
('FEDERATION_DOWN=', '--down=true|false'),
('FEDERATION_UP=', '--up=true|false'),
('GINKGO_TEST_ARGS=', '--test_args=FOO'),
('GINKGO_UPGRADE_TEST_ARGS=', '--upgrade_args=FOO'),
('JENKINS_FEDERATION_PREFIX=', '--stage=gs://FOO'),
('JENKINS_GCI_PATCH_K8S=', 'Unused, see --extract docs'),
('JENKINS_PUBLISHED_VERSION=', '--extract=V'),
('JENKINS_PUBLISHED_SKEW_VERSION=', '--extract=V'),
('JENKINS_USE_SKEW_KUBECTL=', 'SKEW_KUBECTL=y'),
('JENKINS_USE_SKEW_TESTS=', '--skew'),
('JENKINS_SOAK_MODE', '--soak'),
('JENKINS_SOAK_PREFIX', '--stage=gs://FOO'),
('JENKINS_USE_EXISTING_BINARIES=', '--extract=local'),
('JENKINS_USE_LOCAL_BINARIES=', '--extract=none'),
('JENKINS_USE_SERVER_VERSION=', '--extract=gke'),
('JENKINS_USE_GCI_VERSION=', '--extract=gci/FAMILY'),
('JENKINS_USE_GCI_HEAD_IMAGE_FAMILY=', '--extract=gci/FAMILY'),
('KUBE_GKE_NETWORK=', '--gcp-network=FOO'),
('KUBE_GCE_NETWORK=', '--gcp-network=FOO'),
('KUBE_GCE_ZONE=', '--gcp-zone=FOO'),
('KUBEKINS_TIMEOUT=', '--timeout=XXm'),
('KUBEMARK_TEST_ARGS=', '--test_args=FOO'),
('KUBEMARK_TESTS=', '--test_args=--ginkgo.focus=FOO'),
('KUBEMARK_MASTER_SIZE=', '--kubemark-master-size=FOO'),
('KUBEMARK_NUM_NODES=', '--kubemark-nodes=FOO'),
('KUBERNETES_PROVIDER=', '--provider=FOO'),
('PERF_TESTS=', '--perf'),
('PROJECT=', '--gcp-project=FOO'),
('SKEW_KUBECTL=', '--test_args=--kubectl-path=FOO'),
('USE_KUBEMARK=', '--kubemark'),
('ZONE=', '--gcp-zone=FOO'),
]
for env, fix in black:
if 'kops' in job and env in [
'JENKINS_PUBLISHED_VERSION=',
'JENKINS_USE_LOCAL_BINARIES=',
'GINKGO_TEST_ARGS=',
'KUBERNETES_PROVIDER=',
]:
continue # TOOD(fejta): migrate kops jobs
if setting.startswith(env):
self.fail('[%s]: Env %s: Convert %s to use %s in jobs/config.json' % (
job, setting, env, fix))
def test_envs_no_export(self):
for job, job_path in self.jobs:
if not job.endswith('.env'):
continue
with open(job_path) as fp:
lines = list(fp)
for line in lines:
line = line.strip()
self.assertFalse(line.endswith('\\'))
if not line:
continue
if line.startswith('#'):
continue
self._check_env(job, line)
def test_no_bad_vars_in_jobs(self):
"""Searches for jobs that contain ${{VAR}}"""
for job, job_path in self.jobs:
with open(job_path) as fp:
script = fp.read()
bad_vars = re.findall(r'(\${{.+}})', script)
if bad_vars:
self.fail('Job %s contains bad bash variables: %s' % (job, ' '.join(bad_vars)))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_20580
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from basecls.configs import RegNetConfig
_cfg = dict(
model=dict(
name="regnety_080",
),
)
class Cfg(RegNetConfig):
def __init__(self, values_or_file=None, **kwargs):
super().__init__(_cfg)
self.merge(values_or_file, **kwargs)
|
the-stack_106_20582
|
"""
Given a char array representing tasks CPU need to do.
It contains capital letters A to Z where different letters represent different tasks.
Tasks could be done without original order. Each task could be done in one interval.
For each interval, CPU could finish one task or just be idle.
However, there is a non-negative cooling interval n that means between two same tasks,
there must be at least n intervals that CPU are doing different tasks or just be idle.
You need to return the least number of intervals the CPU will take to finish all the given tasks.
Example 1:
Input: tasks = ["A","A","A","B","B","B"], n = 2
Output: 8
Explanation: A -> B -> idle -> A -> B -> idle -> A -> B.
1
2
3
Note:
The number of tasks is in the range [1, 10000].
The integer n is in the range [0, 100].
"""
# V0
# pattern :
# =============================================================================
# -> task_time = (max_mission_count - 1) * (n + 1) + (number_of_max_mission)
# =============================================================================
#
# -> Example 1) :
# -> AAAABBBBCCD, n=3
# => THE EXPECTED tuned missions is like : ABXXABXXABXXAB
# -> (4 - 1) * (3 + 1) + 2 = 14
# -> 4 is the "how many missions the max mission has" (AAAA or BBBB)
# -> 3 is n
# -> 2 is "how many mission have max mission count" -> A and B. so it's 2
# -> in sum,
# -> (4 - 1) * (3 + 1) is for ABXXABXXABXX
# -> and 2 is for AB
#
# -> Example 2) :
# -> AAABBB, n = 2
# -> THE EXPECTED tuned missions is like : ABXABXAB
# -> (3 - 1) * (2 + 1) + (2) = 8
class Solution(object):
def leastInterval(self, tasks, n):
count = collections.Counter(tasks)
most = list(count.values())[0]
num_most = len([i for i, v in count.items() if v == most])
time = (most - 1) * (n + 1) + num_most
return max(time, len(tasks))
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/81947087
# NOTE : However, there is a non-negative cooling interval n that means between two same tasks, there must be at least n intervals that CPU are doing different tasks or just be idle.
# IDEA : MISSION_TIME = ( # of most mission -1 ) * (n+1) + (# of how many missions with same mission count)
# -> ANS = max(MISSION_TIME , tasks) (since every mission need to be run at least once)
class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
count = collections.Counter(tasks)
most = count.most_common()[0][1]
num_most = len([i for i, v in count.items() if v == most])
time = (most - 1) * (n + 1) + num_most
return max(time, len(tasks)) # be aware of it
# V1'
# https://www.jiuzhang.com/solution/task-scheduler/#tag-highlight-lang-python
class Solution:
"""
@param tasks: the given char array representing tasks CPU need to do
@param n: the non-negative cooling interval
@return: the least number of intervals the CPU will take to finish all the given tasks
"""
def leastInterval(self, tasks, n):
# write your code here
d = collections.Counter(tasks)
counts = list(d.values())
longest = max(counts)
ans = (longest - 1) * (n + 1) + counts.count(longest)
return max(len(tasks), ans)
# V2
# Time: O(n)
# Space: O(26) = O(1)
from collections import Counter
class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
counter = Counter(tasks)
_, max_count = counter.most_common(1)[0]
result = (max_count-1) * (n+1)
for count in counter.values():
if count == max_count:
result += 1
return max(result, len(tasks))
|
the-stack_106_20583
|
import traceback
from celery import shared_task
from celery.utils.log import get_task_logger
# NOTE: do not import `models` to avoid recursive imports
logger = get_task_logger(__name__)
def _safe_execution(func, *args, **kwargs):
"""Execute a task and return any tracebacks that occur as a string."""
try:
func(*args, **kwargs)
return ''
except Exception as exc:
logger.exception(f'Internal error run `{func.__name__}`: {exc}')
return traceback.format_exc()
def _run_with_failure_reason(model, func, *args, **kwargs):
"""Run a function that will update the model's `failure_reason`."""
from .models.mixins import Status
model.status = Status.RUNNING
model.save(update_fields=['status'])
model.failure_reason = _safe_execution(func, *args, **kwargs)
if model.failure_reason:
model.status = Status.FAILED
else:
model.status = Status.SUCCEEDED
model.save(update_fields=['failure_reason', 'status'])
@shared_task(time_limit=86400)
def task_read_image_file(file_id):
from .models.imagery import ImageFile
from .models.imagery.etl import read_image_file
image_file = ImageFile.objects.get(id=file_id)
_run_with_failure_reason(image_file, read_image_file, file_id)
@shared_task(time_limit=86400)
def task_read_geometry_archive(archive_id):
from .models.geometry.etl import GeometryArchive, read_geometry_archive
archive = GeometryArchive.objects.get(id=archive_id)
_run_with_failure_reason(archive, read_geometry_archive, archive_id)
@shared_task(time_limit=86400)
def task_populate_raster_entry(raster_id):
from .models.imagery import RasterEntry
from .models.imagery.etl import populate_raster_entry
raster_entry = RasterEntry.objects.get(id=raster_id)
_run_with_failure_reason(raster_entry, populate_raster_entry, raster_id)
@shared_task(time_limit=86400)
def task_populate_raster_footprint(raster_id):
from .models.imagery import RasterEntry
from .models.imagery.etl import populate_raster_footprint
raster_entry = RasterEntry.objects.get(id=raster_id)
_run_with_failure_reason(raster_entry, populate_raster_footprint, raster_id)
@shared_task(time_limit=86400)
def task_populate_raster_outline(raster_id):
from .models.imagery import RasterEntry
from .models.imagery.etl import populate_raster_outline
raster_entry = RasterEntry.objects.get(id=raster_id)
_run_with_failure_reason(raster_entry, populate_raster_outline, raster_id)
@shared_task(time_limit=86400)
def task_load_kwcoco_dataset(kwcoco_dataset_id):
from .models.imagery import KWCOCOArchive
from .models.imagery.kwcoco_etl import load_kwcoco_dataset
ds_entry = KWCOCOArchive.objects.get(id=kwcoco_dataset_id)
_run_with_failure_reason(ds_entry, load_kwcoco_dataset, kwcoco_dataset_id)
@shared_task(time_limit=86400)
def task_read_fmv_file(file_id):
from .models.fmv import FMVFile
from .models.fmv.etl import read_fmv_file
fmv_file = FMVFile.objects.get(id=file_id)
_run_with_failure_reason(fmv_file, read_fmv_file, file_id)
@shared_task(time_limit=86400)
def task_convert_to_cog(conv_id):
from .models.imagery import ConvertedImageFile
from .models.imagery.subsample import convert_to_cog
cog = ConvertedImageFile.objects.get(id=conv_id)
_run_with_failure_reason(cog, convert_to_cog, conv_id)
@shared_task(time_limit=86400)
def task_populate_subsampled_image(subsampled_id):
from .models.imagery import SubsampledImage
from .models.imagery.subsample import populate_subsampled_image
cog = SubsampledImage.objects.get(id=subsampled_id)
_run_with_failure_reason(cog, populate_subsampled_image, subsampled_id)
@shared_task(time_limit=86400)
def task_checksum_file_post_save(checksumfile_id):
from .models.common import ChecksumFile
obj = ChecksumFile.objects.get(id=checksumfile_id)
_run_with_failure_reason(obj, obj.post_save_job)
@shared_task(time_limit=86400)
def task_read_point_cloud_file(pc_file_id):
from .models.threed.etl import read_point_cloud_file
from .models.threed.point_cloud import PointCloudFile
pc_file = PointCloudFile.objects.get(id=pc_file_id)
_run_with_failure_reason(pc_file, read_point_cloud_file, pc_file_id)
|
the-stack_106_20585
|
from datetime import datetime, timedelta
import jwt
import pytest
from flask import testing
from werkzeug.datastructures import Headers
# pylint: disable=wrong-import-position
# We need to override the database
from yeti.common.config import yeti_config
yeti_config.arangodb.database = yeti_config.arangodb.database + '__tests'
from yeti.auth.local import user_management
# Async jobs
from yeti.core import asyncjob
from yeti.core.entities.entity import Entity
from yeti.core.entities.malware import Malware
from yeti.core.indicators.indicator import Indicator
from yeti.core.indicators.regex import Regex
from yeti.core.indicators.yara import Yara
# Make sure we are not deleting the user's database when running tests
from yeti.core.model.arango import db
# Settings
from yeti.core.model.settings.setting import Setting
from yeti.core.model.settings.vocabs import Vocabs
from yeti.core.model.user import User
from yeti.core.observables.hostname import Hostname
from yeti.core.observables.ip import IP
from yeti.core.observables.observable import Observable
from yeti.core.observables.tag import Tag
from yeti.core.observables.url import URL
from yeti.core.relationships import Relationship
from yeti.webapp import app
class FastDummyFeed(asyncjob.AsyncJob):
def execute(self):
return 5
class SlowDummyFeed(asyncjob.AsyncJob):
def execute(self):
import time
time.sleep(3)
return 10
asyncjob.functions['FastDummyFeed'] = FastDummyFeed
asyncjob.functions['SlowDummyFeed'] = SlowDummyFeed
@pytest.fixture
def populate_feeds():
return [
FastDummyFeed(),
SlowDummyFeed()
]
@pytest.fixture
def clean_db():
# pylint: disable=protected-access
# We need to access the collections to make sure they are in the cache
Entity._get_collection()
Indicator._get_collection()
Malware._get_collection()
Observable._get_collection()
Hostname._get_collection()
Tag._get_collection()
Vocabs._get_collection()
Relationship._get_collection()
User._get_collection()
db.clear()
@pytest.fixture
def populate_settings():
v = Setting(name='malware-label-ov', type='vocab').save()
v.set_vocab(sorted([
'adware',
'backdoor'
]))
return [v]
@pytest.fixture
def populate_hostnames():
hostnames = []
for num in range(10):
hostname = Hostname.get_or_create(value='asd{0:d}.com'.format(num))
hostnames.append(hostname)
return hostnames
@pytest.fixture
def populate_urls():
urls = []
for num in range(10):
url = URL.get_or_create(value='http://asd{0:d}.com'.format(num))
urls.append(url)
return urls
@pytest.fixture
def populate_ips():
ips = []
for num in range(10):
ip = IP.get_or_create(value='127.0.0.{0:d}'.format(num))
ips.append(ip)
return ips
@pytest.fixture
def populate_malware():
malware = []
m1 = Malware(name='Gootkit', labels=['banker']).save()
malware.append(m1)
m2 = Malware(name='Sofacy', labels=['apt']).save()
malware.append(m2)
m3 = Malware(name='Zeus', labels=['trojan']).save()
malware.append(m3)
return malware
@pytest.fixture
def populate_malware_large():
malware = []
for i in range(100):
malware.append(Malware(name=f'Malware{i:03}', labels=['trojan']).save())
return malware
@pytest.fixture
def populate_regex():
r1 = Regex(
name='Zeus C2',
labels=['malicious-activity'],
description='This is how C2 URLs for Zeus usually end.',
pattern=r'gate\.php$',
valid_from='2016-01-01T00:00:00Z',
valid_until='2017-01-01T00:00:00Z',
kill_chain_phases=[
{
'kill_chain_name': 'lockheed-martin-cyber-kill-chain',
'phase_name': 'reconnaissance'
}
]
).save()
r2 = Regex(
name='AppData',
labels=['persistence'],
description='AppData directory',
pattern=r'Roaming\\AppData\\\w+$',
valid_from='2016-01-01T00:00:00Z',
valid_until='2017-01-01T00:00:00Z',
kill_chain_phases=[
{
'kill_chain_name': 'lockheed-martin-cyber-kill-chain',
'phase_name': 'reconnaissance'
}
]
).save()
return [r1, r2]
TEST_RULE = """rule yeti_rule
{
meta:
description = "Test rule"
strings:
$MZ = { 4D 5A }
condition:
$MZ
}"""
@pytest.fixture
def populate_yara_rules():
y = Yara(
name='MZ',
labels=['binary-data'],
description='This is how PEs usually start with.',
pattern=TEST_RULE,
valid_from='2016-01-01T00:00:00Z',
valid_until='2017-01-01T00:00:00Z',
kill_chain_phases=[
{
'kill_chain_name': 'lockheed-martin-cyber-kill-chain',
'phase_name': 'reconnaissance'
}
]
).save()
return [y]
# Users and authentication
@pytest.fixture
def populate_users():
admin = User(email='[email protected]', admin=True).save()
user_management.set_password(admin, 'admin')
admin.save()
user = User(email='[email protected]',).save()
user_management.set_password(user, 'user')
user.save()
return [admin, user]
# Prepare authenticated Flask testing client
app.testing = True
class AuthenticatedFlaskClient(testing.FlaskClient):
token = None
def open(self, *args, **kwargs):
api_key_headers = Headers({
'Authorization': f'Bearer: {self.token}'
})
headers = kwargs.pop('headers', Headers())
headers.extend(api_key_headers)
kwargs['headers'] = headers
return super().open(*args, **kwargs)
# pylint: disable=unused-argument,redefined-outer-name
@pytest.fixture
def authenticated_client(populate_users):
token = jwt.encode({
'sub': populate_users[0].email,
'iat': populate_users[0].last_password_change + timedelta(seconds=1),
'exp': datetime.utcnow() + timedelta(minutes=30),
}, yeti_config.core.secret_key).decode('UTF-8')
AuthenticatedFlaskClient.token = token
app.test_client_class = AuthenticatedFlaskClient
return app.test_client()
|
the-stack_106_20586
|
from window import Window
from dialogs.dialog import FormDialog
from dialogs.text_widget import TextWidget
from dialogs.wlist import ListWidget
from utils import center_rect
from color import color_name
import config
class ColorDialog(FormDialog):
def __init__(self):
super().__init__(Window(center_rect(60, 20)), ['Close'])
self._current_pair = 0
self._pair_list = ListWidget(self.subwin(2, 2, 18, 14))
self._pair_list.set_title('Palette')
self.add_widget(self._pair_list)
self.sample = TextWidget(self.subwin(22, 3, 10, 1))
self.sample.set_color(0)
self.sample.set_text('Sample')
self.sample.disable_border()
self.add_widget(self.sample)
for i in range(1, 32):
name = color_name(i)
self._pair_list.add_item(name)
self._pair_list.listen('selection_changed', self._set_current_pair)
from config import get_app
self._query = get_app().query_colors
self._foreground_list = ListWidget(self.subwin(35, 2, 10, 10))
self._foreground_list.set_title('Fore')
self._foreground_list.listen('selection_changed', self._change_fore)
for name in get_app().get_color_names():
self._foreground_list.add_item(name)
self.add_widget(self._foreground_list)
self._background_list = ListWidget(self.subwin(47, 2, 10, 10))
self._background_list.set_title('Back')
self._background_list.listen('selection_changed', self._change_back)
for name in get_app().get_color_names():
self._background_list.add_item(name)
self.add_widget(self._background_list)
self._set_current_pair()
self.set_focus(self._pair_list)
def _set_current_pair(self):
self._current_pair = self._pair_list.get_selection()[1] + 1
pair = self._query(self._current_pair)
self._foreground_list.set_selection(pair[0])
self._background_list.set_selection(pair[1])
self.sample.set_color(self._current_pair)
def _change_fore(self):
c = self._foreground_list.get_selection()[1]
config.set_value(f'fg{self._current_pair}', c)
config.get_app().update_color(self._current_pair)
def _change_back(self):
c = self._background_list.get_selection()[1]
config.set_value(f'bg{self._current_pair}', c)
config.get_app().update_color(self._current_pair)
|
the-stack_106_20589
|
from flask_wtf import FlaskForm
from wtforms import FieldList, FormField, HiddenField, SelectField, \
StringField, SubmitField, TextAreaField, ValidationError
from wtforms.validators import DataRequired, Optional
class GroupForm(FlaskForm):
"""Subform for groups"""
group_id = HiddenField('Gruppen-ID', validators=[DataRequired()])
group_name = HiddenField('Gruppe', validators=[Optional()])
class RoleForm(FlaskForm):
"""Subform for roles"""
role_id = HiddenField('Rollen-ID', validators=[DataRequired()])
role_name = HiddenField('Rolle', validators=[Optional()])
class UserForm(FlaskForm):
"""Main form for User GUI"""
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Beschreibung', validators=[DataRequired()])
groups = FieldList(FormField(GroupForm))
group = SelectField(
'Zugeordnete Gruppen', coerce=int, validators=[Optional()]
)
roles = FieldList(FormField(RoleForm))
role = SelectField(
'Zugeordnete Rollen', coerce=int, validators=[Optional()]
)
submit = SubmitField('Speichern')
def __init__(self, config_models, **kwargs):
"""Constructor
:param ConfigModels config_models: Helper for ORM models
"""
self.config_models = config_models
self.User = self.config_models.model('user')
# store any provided user object
self.obj = kwargs.get('obj')
super(UserForm, self).__init__(**kwargs)
def validate_name(self, field):
"""Validate uniqueness of name"""
# check if user name exists
session = self.config_models.session()
query = session.query(self.User).filter_by(name=field.data)
if self.obj:
# ignore current user
query = query.filter(self.User.id != self.obj.id)
user = query.first()
session.close()
if user is not None:
raise ValidationError(
'Ein Benutzer mit diesem Namen ist bereits vorhanden'
)
|
the-stack_106_20590
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common import service_client
class DataProcessingClient(service_client.ServiceClient):
def _request_and_check_resp(self, request_func, uri, resp_status):
"""Make a request using specified request_func and check response
status code.
It returns a ResponseBody.
"""
resp, body = request_func(uri)
self.expected_success(resp_status, resp.status)
return service_client.ResponseBody(resp, body)
def _request_and_check_resp_data(self, request_func, uri, resp_status):
"""Make a request using specified request_func and check response
status code.
It returns pair: resp and response data.
"""
resp, body = request_func(uri)
self.expected_success(resp_status, resp.status)
return resp, body
def _request_check_and_parse_resp(self, request_func, uri, resp_status,
resource_name, *args, **kwargs):
"""Make a request using specified request_func, check response status
code and parse response body.
It returns a ResponseBody.
"""
headers = {'Content-Type': 'application/json'}
resp, body = request_func(uri, headers=headers, *args, **kwargs)
self.expected_success(resp_status, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body[resource_name])
def _request_check_and_parse_resp_list(self, request_func, uri,
resp_status, resource_name,
*args, **kwargs):
"""Make a request using specified request_func, check response status
code and parse response body.
It returns a ResponseBodyList.
"""
headers = {'Content-Type': 'application/json'}
resp, body = request_func(uri, headers=headers, *args, **kwargs)
self.expected_success(resp_status, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body[resource_name])
def list_node_group_templates(self):
"""List all node group templates for a user."""
uri = 'node-group-templates'
return self._request_check_and_parse_resp_list(self.get, uri,
200,
'node_group_templates')
def get_node_group_template(self, tmpl_id):
"""Returns the details of a single node group template."""
uri = 'node-group-templates/%s' % tmpl_id
return self._request_check_and_parse_resp(self.get, uri,
200, 'node_group_template')
def create_node_group_template(self, name, plugin_name, hadoop_version,
node_processes, flavor_id,
node_configs=None, **kwargs):
"""Creates node group template with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'node-group-templates'
body = kwargs.copy()
body.update({
'name': name,
'plugin_name': plugin_name,
'hadoop_version': hadoop_version,
'node_processes': node_processes,
'flavor_id': flavor_id,
'node_configs': node_configs or dict(),
})
return self._request_check_and_parse_resp(self.post, uri, 202,
'node_group_template',
body=json.dumps(body))
def delete_node_group_template(self, tmpl_id):
"""Deletes the specified node group template by id."""
uri = 'node-group-templates/%s' % tmpl_id
return self._request_and_check_resp(self.delete, uri, 204)
def list_plugins(self):
"""List all enabled plugins."""
uri = 'plugins'
return self._request_check_and_parse_resp_list(self.get,
uri, 200, 'plugins')
def get_plugin(self, plugin_name, plugin_version=None):
"""Returns the details of a single plugin."""
uri = 'plugins/%s' % plugin_name
if plugin_version:
uri += '/%s' % plugin_version
return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
def list_cluster_templates(self):
"""List all cluster templates for a user."""
uri = 'cluster-templates'
return self._request_check_and_parse_resp_list(self.get, uri,
200,
'cluster_templates')
def get_cluster_template(self, tmpl_id):
"""Returns the details of a single cluster template."""
uri = 'cluster-templates/%s' % tmpl_id
return self._request_check_and_parse_resp(self.get,
uri, 200, 'cluster_template')
def create_cluster_template(self, name, plugin_name, hadoop_version,
node_groups, cluster_configs=None,
**kwargs):
"""Creates cluster template with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'cluster-templates'
body = kwargs.copy()
body.update({
'name': name,
'plugin_name': plugin_name,
'hadoop_version': hadoop_version,
'node_groups': node_groups,
'cluster_configs': cluster_configs or dict(),
})
return self._request_check_and_parse_resp(self.post, uri, 202,
'cluster_template',
body=json.dumps(body))
def delete_cluster_template(self, tmpl_id):
"""Deletes the specified cluster template by id."""
uri = 'cluster-templates/%s' % tmpl_id
return self._request_and_check_resp(self.delete, uri, 204)
def list_data_sources(self):
"""List all data sources for a user."""
uri = 'data-sources'
return self._request_check_and_parse_resp_list(self.get,
uri, 200,
'data_sources')
def get_data_source(self, source_id):
"""Returns the details of a single data source."""
uri = 'data-sources/%s' % source_id
return self._request_check_and_parse_resp(self.get,
uri, 200, 'data_source')
def create_data_source(self, name, data_source_type, url, **kwargs):
"""Creates data source with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'data-sources'
body = kwargs.copy()
body.update({
'name': name,
'type': data_source_type,
'url': url
})
return self._request_check_and_parse_resp(self.post, uri,
202, 'data_source',
body=json.dumps(body))
def delete_data_source(self, source_id):
"""Deletes the specified data source by id."""
uri = 'data-sources/%s' % source_id
return self._request_and_check_resp(self.delete, uri, 204)
def list_job_binary_internals(self):
"""List all job binary internals for a user."""
uri = 'job-binary-internals'
return self._request_check_and_parse_resp_list(self.get,
uri, 200, 'binaries')
def get_job_binary_internal(self, job_binary_id):
"""Returns the details of a single job binary internal."""
uri = 'job-binary-internals/%s' % job_binary_id
return self._request_check_and_parse_resp(self.get, uri,
200, 'job_binary_internal')
def create_job_binary_internal(self, name, data):
"""Creates job binary internal with specified params."""
uri = 'job-binary-internals/%s' % name
return self._request_check_and_parse_resp(self.put, uri, 202,
'job_binary_internal', data)
def delete_job_binary_internal(self, job_binary_id):
"""Deletes the specified job binary internal by id."""
uri = 'job-binary-internals/%s' % job_binary_id
return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_internal_data(self, job_binary_id):
"""Returns data of a single job binary internal."""
uri = 'job-binary-internals/%s/data' % job_binary_id
return self._request_and_check_resp_data(self.get, uri, 200)
def list_job_binaries(self):
"""List all job binaries for a user."""
uri = 'job-binaries'
return self._request_check_and_parse_resp_list(self.get,
uri, 200, 'binaries')
def get_job_binary(self, job_binary_id):
"""Returns the details of a single job binary."""
uri = 'job-binaries/%s' % job_binary_id
return self._request_check_and_parse_resp(self.get,
uri, 200, 'job_binary')
def create_job_binary(self, name, url, extra=None, **kwargs):
"""Creates job binary with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'job-binaries'
body = kwargs.copy()
body.update({
'name': name,
'url': url,
'extra': extra or dict(),
})
return self._request_check_and_parse_resp(self.post, uri,
202, 'job_binary',
body=json.dumps(body))
def delete_job_binary(self, job_binary_id):
"""Deletes the specified job binary by id."""
uri = 'job-binaries/%s' % job_binary_id
return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_data(self, job_binary_id):
"""Returns data of a single job binary."""
uri = 'job-binaries/%s/data' % job_binary_id
return self._request_and_check_resp_data(self.get, uri, 200)
def list_jobs(self):
"""List all jobs for a user."""
uri = 'jobs'
return self._request_check_and_parse_resp_list(self.get,
uri, 200, 'jobs')
def get_job(self, job_id):
"""Returns the details of a single job."""
uri = 'jobs/%s' % job_id
return self._request_check_and_parse_resp(self.get, uri, 200, 'job')
def create_job(self, name, job_type, mains, libs=None, **kwargs):
"""Creates job with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'jobs'
body = kwargs.copy()
body.update({
'name': name,
'type': job_type,
'mains': mains,
'libs': libs or list(),
})
return self._request_check_and_parse_resp(self.post, uri, 202,
'job', body=json.dumps(body))
def delete_job(self, job_id):
"""Deletes the specified job by id."""
uri = 'jobs/%s' % job_id
return self._request_and_check_resp(self.delete, uri, 204)
|
the-stack_106_20592
|
from collections import deque
from itertools import combinations
def add_list_to_dict(target_dict, key, value):
if key in target_dict.keys():
target_dict[key].append(value)
else:
target_dict[key] = [value]
class Node:
def __init__(self, player, terminal, eu=0):
self.children = {}
self.player = player
self.terminal = terminal
self.private_cards = []
self.history = []
self.information = ((), ()) # (private card, history)
self.pi = 0
self.pi_mi = 0 # pi_-i
self.pi_i = 0 # pi_i
self.true_pi_mi = 0 # pi_-i following current average strategy profile
self.eu = eu
self.cv = 0
self.cfr = {} # counter-factual regret of not taking action a at history h(not information I)
self.pi_i_sum = 0 # denominator of average strategy
self.pi_sigma_sum = {} # numerator of average strategy
self.num_updates = 0
def expand_child_node(self, action, next_player, terminal, utility=0, private_cards=None):
next_node = Node(next_player, terminal, utility)
self.children[action] = next_node
self.cfr[action] = 0
self.pi_sigma_sum[action] = 0
next_node.private_cards = self.private_cards if private_cards is None else private_cards
next_node.history = self.history + [action] if self.player != -1 else self.history
next_node.information = (next_node.private_cards[next_player], tuple(next_node.history))
return next_node
class Card:
def __init__(self, rank, suit=None):
self.rank = rank
self.suit = suit
def __str__(self):
if self.suit is None:
return str(self.rank)
else:
return str(self.rank) + str(self.suit)
class KuhnPoker:
def __init__(self):
self.num_players = 2
self.deck = [i for i in range(3)]
self.information_sets = {player: {} for player in range(-1, self.num_players)}
self.root = self._build_game_tree()
def _build_game_tree(self):
stack = deque()
next_player = -1
root = Node(next_player, False)
add_list_to_dict(self.information_sets[next_player], root.information, root)
for hand_0 in combinations(self.deck, 1):
for hand_1 in combinations(self.deck, 1):
if set(hand_0) & set(hand_1):
continue
private_cards = [hand_0, hand_1, ()] # p1, p2, chance player
next_player = 0
node = root.expand_child_node(str(*hand_0) + ',' + str(*hand_1), next_player, False, private_cards=private_cards)
add_list_to_dict(self.information_sets[next_player], node.information, node)
stack.append(node)
for action in ['check', 'bet']: # player 0 actions
next_player = 1
node = node.expand_child_node(action, next_player, False)
add_list_to_dict(self.information_sets[next_player], node.information, node)
stack.append(node)
if action == 'check':
for action in ['check', 'bet']: # player 1 actions
if action == 'check':
utility = self._compute_utility(action, next_player, hand_0, hand_1)
next_player = -1
node = node.expand_child_node(action, next_player, True, utility)
add_list_to_dict(self.information_sets[next_player], node.information, node)
node = stack.pop()
if action == 'bet':
next_player = 0
node = node.expand_child_node(action, next_player, False)
add_list_to_dict(self.information_sets[next_player], node.information, node)
stack.append(node)
for action in ['fold', 'call']: # player 0 actions
utility = self._compute_utility(action, next_player, hand_0, hand_1)
next_player = -1
node = node.expand_child_node(action, next_player, True, utility)
add_list_to_dict(self.information_sets[next_player], node.information, node)
node = stack.pop()
if action == 'bet':
stack.append(node)
for action in ['fold', 'call']: # player 1 actions
utility = self._compute_utility(action, next_player, hand_0, hand_1)
next_player = -1
node = node.expand_child_node(action, next_player, True, utility)
add_list_to_dict(self.information_sets[next_player], node.information, node)
node = stack.pop()
return root
def get_nash_equilibrium(self, node: Node, strategy_profile=None):
"""
nash equilibrium (alpha = 0)
"""
if node.terminal:
return strategy_profile
if strategy_profile is None:
strategy_profile = {player: {} for player in range(-1, self.num_players)}
if node.information not in strategy_profile[node.player]:
hand = node.information[0]
if len(hand) == 0:
strategy_profile[node.player][node.information] = {action: 1 / len(node.children) for action in node.children}
else:
card = hand[0]
for action, child in node.children.items():
if node.player == 0:
if action == "bet":
p = 0
elif action == "check":
p = 1
elif action == "call":
if card == 0:
p = 0
elif card == 1:
p = 1/3
else:
p = 1
else:
if card == 0:
p = 1
elif card == 1:
p = 2/3
else:
p = 0
else:
if action == "bet":
if card == 0:
p = 1/3
elif card == 1:
p = 0
else:
p = 1
elif action == "check":
if card == 0:
p = 2/3
elif card == 1:
p = 1
else:
p = 0
elif action == "call":
if card == 0:
p = 0
elif card == 1:
p = 1/3
else:
p = 1
else:
if card == 0:
p = 1
elif card == 1:
p = 2/3
else:
p = 0
if node.information not in strategy_profile[node.player]:
strategy_profile[node.player][node.information] = {}
strategy_profile[node.player][node.information][action] = p
for child in node.children.values():
strategy_profile = self.get_nash_equilibrium(child, strategy_profile=strategy_profile)
return strategy_profile
def _compute_utility(self, action, player, hand_0, hand_1):
card_0, card_1 = hand_0[0], hand_1[0]
is_win = card_0 > card_1
if action == "fold":
utility = 1 if player == 1 else -1
elif action == "check":
utility = 1 if is_win else -1
elif action == "call":
utility = 2 if is_win else -2
else:
utility = 0
return utility
if __name__ == "__main__":
kuhn_poker = KuhnPoker()
|
the-stack_106_20593
|
# Lib
import logging
import numpy as np
import pandas as pd
from ..utils.progress_bar import * # checks environment and imports tqdm appropriately.
from collections import Counter
from pathlib import Path
import pickle
# App
from ..files import Manifest, get_sample_sheet, create_sample_sheet
from ..models import Channel, MethylationDataset, ArrayType
from ..utils import ensure_directory_exists, is_file_like
from .postprocess import (
calculate_beta_value,
calculate_m_value,
calculate_copy_number,
consolidate_values_for_sheet,
consolidate_control_snp,
one_sample_control_snp,
consolidate_mouse_probes,
merge_batches,
)
from .preprocess import preprocess_noob
from .raw_dataset import get_raw_datasets
from .p_value_probe_detection import _pval_sesame_preprocess
__all__ = ['SampleDataContainer', 'get_manifest', 'run_pipeline', 'consolidate_values_for_sheet']
LOGGER = logging.getLogger(__name__)
def get_manifest(raw_datasets, array_type=None, manifest_filepath=None):
"""Generates a SampleSheet instance for a given directory of processed data.
Arguments:
raw_datasets {list(RawDataset)} -- Collection of RawDataset instances that
require a manifest file for the related array_type.
Keyword Arguments:
array_type {ArrayType} -- The type of array to process. If not provided, it
will be inferred from the number of probes in the IDAT file. (default: {None})
manifest_filepath {path-like} -- Path to the manifest file. If not provided,
it will be inferred from the array_type and downloaded if necessary (default: {None})
Returns:
[Manifest] -- A Manifest instance.
"""
if array_type is None:
array_types = {dataset.array_type for dataset in raw_datasets}
if len(array_types) == 0:
raise ValueError('could not identify array type from IDATs')
elif len(array_types) != 1:
raise ValueError('IDATs with varying array types')
array_type = array_types.pop()
return Manifest(array_type, manifest_filepath)
def run_pipeline(data_dir, array_type=None, export=False, manifest_filepath=None,
sample_sheet_filepath=None, sample_name=None,
betas=False, m_value=False, make_sample_sheet=False, batch_size=None,
save_uncorrected=False, save_control=False, meta_data_frame=True,
bit='float32', poobah=False, export_poobah=False,
poobah_decimals=3, poobah_sig=0.05):
"""The main CLI processing pipeline. This does every processing step and returns a data set.
Arguments:
data_dir [required]
path where idat files can be found, and samplesheet csv.
array_type [default: autodetect]
27k, 450k, EPIC, EPIC+
If omitted, this will autodetect it.
export [default: False]
if True, exports a CSV of the processed data for each idat file in sample.
betas
if True, saves a pickle (beta_values.pkl) of beta values for all samples
m_value
if True, saves a pickle (m_values.pkl) of beta values for all samples
Note on meth/unmeth:
if either betas or m_value is True, this will also save two additional files:
'meth_values.pkl' and 'unmeth_values.pkl' with the same dataframe structure,
representing raw, uncorrected meth probe intensities for all samples. These are useful
in some methylcheck functions and load/produce results 100X faster than loading from
processed CSV output.
manifest_filepath [optional]
if you want to provide a custom manifest, provide the path. Otherwise, it will download
the appropriate one for you.
sample_sheet_filepath [optional]
it will autodetect if ommitted.
sample_name [optional, list]
if you don't want to process all samples, you can specify individual as a list.
if sample_names are specified, this will not also do batch sizes (large batches must process all samples)
make_sample_sheet [optional]
if True, generates a sample sheet from idat files called 'samplesheet.csv', so that processing will work.
From CLI pass in "--no_sample_sheet" to trigger sample sheet auto-generation.
batch_size [optional]
if set to any integer, samples will be processed and saved in batches no greater than
the specified batch size. This will yield multiple output files in the format of
"beta_values_1.pkl ... beta_values_N.pkl".
save_uncorrected [optional]
if True, adds two additional columns to the processed.csv per sample (meth and unmeth).
does not apply noob correction to these values.
save_control [optional]
if True, adds all Control and SnpI type probe values to a separate pickled dataframe,
with probes in rows and sample_name in the first column.
These non-CpG probe names are excluded from processed data and must be stored separately.
bit [optional]
Change the processed beta or m_value data_type from float64 to float16 or float32.
This will make files smaller, often with no loss in precision, if it works.
sometimes using float16 will cause an overflow error and files will have "inf" instead of numbers. Use float32 instead.
poobah [False]
If specified as True, the pipeline will run Sesame's p-value probe detection method (poobah)
on samples to remove probes that fail the signal/noise ratio on their fluorescence channels.
These will appear as NaNs in the resulting dataframes (beta_values.pkl or m_values.pkl).
All probes, regardless of p-value cutoff, will be retained in CSVs, but there will be a 'poobah_pval'
column in CSV files that methylcheck.load uses to exclude failed probes upon import at a later step.
poobah_sig [default: 0.05]
the p-value level of significance, above which, will exclude probes from output (typical range of 0.001 to 0.1)
poobah_decimals [default: 3]
The number of decimal places to round p-value column in the processed CSV output files.
Returns:
By default, if called as a function, a list of SampleDataContainer objects is returned.
betas
if True, will return a single data frame of betavalues instead of a list of SampleDataContainer objects.
Format is a "wide matrix": columns contain probes and rows contain samples.
m_value
if True, will return a single data frame of m_factor values instead of a list of SampleDataContainer objects.
Format is a "wide matrix": columns contain probes and rows contain samples.
if batch_size is set to more than ~600 samples, nothing is returned but all the files are saved. You can recreate/merge output files by loading the files using methylcheck.load().
Processing note:
The sample_sheet parser will ensure every sample has a unique name and assign one (e.g. Sample1) if missing, or append a number (e.g. _1) if not unique.
This may cause sample_sheets and processed data in dataframes to not match up. Will fix in future version.
"""
LOGGER.info('Running pipeline in: %s', data_dir)
if bit not in ('float64','float32','float16'):
raise ValueError("Input 'bit' must be one of ('float64','float32','float16') or ommitted.")
if sample_name:
LOGGER.info('Sample names: {0}'.format(sample_name))
if make_sample_sheet:
create_sample_sheet(data_dir)
sample_sheet = get_sample_sheet(data_dir, filepath=sample_sheet_filepath)
samples = sample_sheet.get_samples()
if sample_sheet.renamed_fields != {}:
show_fields = []
for k,v in sample_sheet.renamed_fields.items():
if v != k:
show_fields.append(f"{k} --> {v}\n")
else:
show_fields.append(f"{k}\n")
LOGGER.info(f"Found {len(show_fields)} additional fields in sample_sheet:\n{''.join(show_fields)}")
batches = []
batch = []
sample_id_counter = 1
if batch_size:
if type(batch_size) != int or batch_size < 1:
raise ValueError('batch_size must be an integer greater than 0')
for sample in samples:
if sample_name and sample.name not in sample_name:
continue
# batch uses Sample_Name, so ensure these exist
if sample.name in (None,''):
sample.name = f'Sample_{sample_id_counter}'
sample_id_counter += 1
# and are unique.
if Counter((s.name for s in samples)).get(sample.name) > 1:
sample.name = f'{sample.name}_{sample_id_counter}'
sample_id_counter += 1
if len(batch) < batch_size:
batch.append(sample.name)
else:
batches.append(batch)
batch = []
batch.append(sample.name)
batches.append(batch)
else:
for sample in samples:
if sample_name and sample.name not in sample_name:
continue
# batch uses Sample_Name, so ensure these exist
if sample.name in (None,''):
sample.name = f'Sample_{sample_id_counter}'
sample_id_counter += 1
# and are unique.
if Counter((s.name for s in samples)).get(sample.name) > 1:
sample.name = f'{sample.name}_{sample_id_counter}'
sample_id_counter += 1
batch.append(sample.name)
batches.append(batch)
temp_data_pickles = []
control_snps = {}
#data_containers = [] # returned when this runs in interpreter, and < 200 samples
# v1.3.0 memory fix: save each batch_data_containers object to disk as temp, then load and combine at end.
# 200 samples still uses 4.8GB of memory/disk space (float64)
for batch_num, batch in enumerate(batches, 1):
raw_datasets = get_raw_datasets(sample_sheet, sample_name=batch)
manifest = get_manifest(raw_datasets, array_type, manifest_filepath) # this allows each batch to be a different array type; but not implemented yet. common with older GEO sets.
batch_data_containers = []
export_paths = set() # inform CLI user where to look
for raw_dataset in tqdm(raw_datasets, total=len(raw_datasets), desc="Processing samples"):
data_container = SampleDataContainer(
raw_dataset=raw_dataset,
manifest=manifest,
retain_uncorrected_probe_intensities=save_uncorrected,
bit=bit,
pval=poobah,
poobah_decimals=poobah_decimals,
)
# data_frame['noob'] doesn't exist at this point.
data_container.process_all()
if export:
output_path = data_container.sample.get_export_filepath()
data_container.export(output_path)
export_paths.add(output_path)
if save_control: # Process and consolidate now. Keep in memory. These files are small.
sample_id = f"{data_container.sample.sentrix_id}_{data_container.sample.sentrix_position}"
control_df = one_sample_control_snp(data_container)
control_snps[sample_id] = control_df
# now I can drop all the unneeded stuff from each SampleDataContainer (400MB per sample becomes 92MB)
# these are stored in SampleDataContainer.__data_frame for processing.
del data_container.manifest
del data_container.raw_dataset
del data_container.methylated
del data_container.unmethylated
batch_data_containers.append(data_container)
LOGGER.info('[finished SampleDataContainer processing]')
if betas:
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='beta_value', bit=bit, poobah=poobah)
if not batch_size:
pkl_name = 'beta_values.pkl'
else:
pkl_name = f'beta_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if m_value:
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='m_value', bit=bit, poobah=poobah)
if not batch_size:
pkl_name = 'm_values.pkl'
else:
pkl_name = f'm_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if betas or m_value:
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='noob_meth', bit=bit)
if not batch_size:
pkl_name = 'noob_meth_values.pkl'
else:
pkl_name = f'noob_meth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
# TWO PARTS
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='noob_unmeth', bit=bit)
if not batch_size:
pkl_name = 'noob_unmeth_values.pkl'
else:
pkl_name = f'noob_unmeth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if (betas or m_value) and save_uncorrected:
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='meth', bit=bit)
if not batch_size:
pkl_name = 'meth_values.pkl'
else:
pkl_name = f'meth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
# TWO PARTS
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='unmeth', bit=bit)
if not batch_size:
pkl_name = 'unmeth_values.pkl'
else:
pkl_name = f'unmeth_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
df = df.astype('float32') if df.isna().sum().sum() > 0 else df.astype('int16')
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
if manifest.array_type == ArrayType.ILLUMINA_MOUSE:
# save mouse specific probes
if not batch_size:
mouse_probe_filename = f'mouse_probes.pkl'
else:
mouse_probe_filename = f'mouse_probes_{batch_num}.pkl'
consolidate_mouse_probes(batch_data_containers, Path(data_dir, mouse_probe_filename))
LOGGER.info(f"saved {mouse_probe_filename}")
if export:
export_path_parents = list(set([str(Path(e).parent) for e in export_paths]))
LOGGER.info(f"[!] Exported results (csv) to: {export_path_parents}")
if export_poobah:
# this option will save a pickled dataframe of the pvalues for all samples, with sample_ids in the column headings and probe names in index.
# this sets poobah to false in kwargs, otherwise some pvalues would be NaN I think.
df = consolidate_values_for_sheet(batch_data_containers, postprocess_func_colname='poobah_pval', bit=bit, poobah=False, poobah_sig=poobah_sig)
if not batch_size:
pkl_name = 'poobah_values.pkl'
else:
pkl_name = f'poobah_values_{batch_num}.pkl'
if df.shape[1] > df.shape[0]:
df = df.transpose() # put probes as columns for faster loading.
pd.to_pickle(df, Path(data_dir,pkl_name))
LOGGER.info(f"saved {pkl_name}")
# v1.3.0 fixing mem probs: pickling each batch_data_containers object then reloading it later.
# consolidating data_containers this will break with really large sample sets, so skip here.
#if batch_size and batch_size >= 200:
# continue
#data_containers.extend(batch_data_containers)
pkl_name = f"_temp_data_{batch_num}.pkl"
with open(Path(data_dir,pkl_name), 'wb') as temp_data:
pickle.dump(batch_data_containers, temp_data)
temp_data_pickles.append(pkl_name)
del batch_data_containers
if meta_data_frame == True:
#sample_sheet.fields is a complete mapping of original and renamed_fields
cols = list(sample_sheet.fields.values()) + ['Sample_ID']
meta_frame = pd.DataFrame(columns=cols)
field_classattr_lookup = {
'Sentrix_ID': 'sentrix_id',
'Sentrix_Position': 'sentrix_position',
'Sample_Group': 'group',
'Sample_Name': 'name',
'Sample_Plate': 'plate',
'Pool_ID': 'pool',
'Sample_Well': 'well',
'GSM_ID': 'GSM_ID',
'Sample_Type': 'type',
'Sub_Type': 'sub_type',
'Control': 'is_control',
}
# row contains the renamed fields, and pulls in the original data from sample_sheet
for sample in samples:
row = {}
for field in sample_sheet.fields.keys():
if sample_sheet.fields[field] in field_classattr_lookup:
row[ sample_sheet.fields[field] ] = getattr(sample, field_classattr_lookup[sample_sheet.fields[field]] )
elif field in sample_sheet.renamed_fields:
row[ sample_sheet.fields[field] ] = getattr(sample, sample_sheet.renamed_fields[field])
else:
LOGGER.info(f"extra column: {field} ignored")
# row[ sample_sheet.fields[field] ] = getattr(sample, field)
# add the UID that matches m_value/beta value pickles
#... unless there's a GSM_ID too
# appears that methylprep m_value and beta files only include ID_Position as column names.
#if row.get('GSM_ID') != None:
# row['Sample_ID'] = f"{row['GSM_ID']}_{row['Sentrix_ID']}_{row['Sentrix_Position']}"
#else:
row['Sample_ID'] = f"{row['Sentrix_ID']}_{row['Sentrix_Position']}"
meta_frame = meta_frame.append(row, ignore_index=True)
meta_frame_filename = f'sample_sheet_meta_data.pkl'
meta_frame.to_pickle(Path(data_dir,meta_frame_filename))
LOGGER.info(f"Exported meta data to {meta_frame_filename}")
# FIXED in v1.3.0
# moved consolidate_control_snp() from this spot to earlier in pipeline, because it uses
# raw_dataset and this gets removed before pickling _temp files. Here I pickle.dump the SNPS.
if save_control:
control_filename = f'control_probes.pkl'
with open(Path(data_dir, control_filename), 'wb') as control_file:
pickle.dump(control_snps, control_file)
LOGGER.info(f"saved {control_filename}")
# batch processing done; consolidate and return data. This uses much more memory, but not called if in batch mode.
if batch_size and batch_size >= 50:
print("Because the batch size was >=50 samples, files are saved but no data objects are returned.")
del batch_data_containers
for temp_data in temp_data_pickles:
temp_file = Path(data_dir, temp_data)
temp_file.unlink(missing_ok=True) # delete it
return
# consolidate batches and delete parts, if possible
for file_type in ['beta_values', 'm_values', 'meth_values', 'unmeth_values',
'noob_meth_values', 'noob_unmeth_values', 'mouse_probes', 'poobah_values']:
test_parts = list([str(temp_file) for temp_file in Path(data_dir).rglob(f'{file_type}*.pkl')])
num_batches = len(test_parts)
# ensures that only the file_types that appear to be selected get merged.
#print(f"DEBUG num_batches {num_batches}, batch_size {batch_size}, file_type {file_type}")
if batch_size and num_batches >= 1: #--- if the batch size was larger than the number of total samples, this will still drop the _1
merge_batches(num_batches, data_dir, file_type)
# reload all the big stuff -- after everything important is done.
# attempts to consolidate all the batch_files below, if they'll fit in memory.
data_containers = []
for temp_data in temp_data_pickles:
temp_file = Path(data_dir, temp_data)
if temp_file.exists(): #possibly user deletes file while processing, since these are big
with open(temp_file,'rb') as _file:
batch_data_containers = pickle.load(_file)
data_containers.extend(batch_data_containers)
del batch_data_containers
temp_file.unlink() # delete it after loading.
if betas:
return consolidate_values_for_sheet(data_containers, postprocess_func_colname='beta_value')
elif m_value:
return consolidate_values_for_sheet(data_containers, postprocess_func_colname='m_value')
else:
return data_containers
class SampleDataContainer():
"""Wrapper that provides easy access to slices of data for a Sample,
its RawDataset, and the pre-configured MethylationDataset subsets of probes.
Arguments:
raw_dataset {RawDataset} -- A sample's RawDataset for a single well on the processed array.
manifest {Manifest} -- The Manifest for the correlated RawDataset's array type.
bit (default: float64) -- option to store data as float16 or float32 to save space.
pval (default: False) -- whether to apply p-value-detection algorithm to remove
unreliable probes (based on signal/noise ratio of fluoresence)
uses the sesame method (pOOBah) based on out of band background levels
Jan 2020: added .snp_(un)methylated property. used in postprocess.consolidate_crontrol_snp()
Mar 2020: added p-value detection option
Mar 2020: added mouse probe post-processing separation
"""
__data_frame = None
def __init__(self, raw_dataset, manifest, retain_uncorrected_probe_intensities=False,
bit='float32', pval=False, poobah_decimals=3):
self.manifest = manifest
self.pval = pval
self.poobah_decimals = poobah_decimals
self.raw_dataset = raw_dataset
self.sample = raw_dataset.sample
self.retain_uncorrected_probe_intensities=retain_uncorrected_probe_intensities
self.methylated = MethylationDataset.methylated(raw_dataset, manifest)
self.unmethylated = MethylationDataset.unmethylated(raw_dataset, manifest)
self.snp_methylated = MethylationDataset.snp_methylated(raw_dataset, manifest)
self.snp_unmethylated = MethylationDataset.snp_unmethylated(raw_dataset, manifest)
# mouse probes are processed within the normals meth/unmeth sets, then split at end of preprocessing step.
#self.mouse_methylated = MethylationDataset.mouse_methylated(raw_dataset, manifest)
#self.mouse_unmethylated = MethylationDataset.mouse_unmethylated(raw_dataset, manifest)
self.oob_controls = raw_dataset.get_oob_controls(manifest)
self.data_type = bit #(float64, float32, or float16)
if self.data_type == None:
self.data_type = 'float32'
if self.data_type not in ('float64','float32','float16'):
raise ValueError(f"invalid data_type: {self.data_type} should be one of ('float64','float32','float16')")
@property
def fg_green(self):
return self.raw_dataset.get_fg_values(self.manifest, Channel.GREEN)
@property
def fg_red(self):
return self.raw_dataset.get_fg_values(self.manifest, Channel.RED)
@property
def ctrl_green(self):
return self.raw_dataset.get_fg_controls(self.manifest, Channel.GREEN)
@property
def ctrl_red(self):
return self.raw_dataset.get_fg_controls(self.manifest, Channel.RED)
@property
def oob_green(self):
return self.oob_controls[Channel.GREEN]
@property
def oob_red(self):
return self.oob_controls[Channel.RED]
def preprocess(self):
""" combines the methylated and unmethylated columns from the SampleDataContainer. """
if not self.__data_frame:
if self.retain_uncorrected_probe_intensities == True:
uncorrected_meth = self.methylated.data_frame.copy()['mean_value'].astype('float32')
uncorrected_unmeth = self.unmethylated.data_frame.copy()['mean_value'].astype('float32')
# could be int16, if missing values didn't happen (cuts file size in half)
if uncorrected_meth.isna().sum() == 0 and uncorrected_unmeth.isna().sum() == 0:
uncorrected_meth = uncorrected_meth.astype('int16')
uncorrected_unmeth = uncorrected_unmeth.astype('int16')
if self.pval == True:
pval_probes_df = _pval_sesame_preprocess(self)
# output: df with one column named 'poobah_pval'
preprocess_noob(self) # apply corrections: bg subtract, then noob (in preprocess.py)
methylated = self.methylated.data_frame[['noob']]
unmethylated = self.unmethylated.data_frame[['noob']]
self.__data_frame = methylated.join(
unmethylated,
lsuffix='_meth',
rsuffix='_unmeth',
)
if self.pval == True:
self.__data_frame = self.__data_frame.merge(pval_probes_df, how='inner', left_index=True, right_index=True)
if self.retain_uncorrected_probe_intensities == True:
self.__data_frame['meth'] = uncorrected_meth
self.__data_frame['unmeth'] = uncorrected_unmeth
# reduce to float32 during processing. final output may be 16,32,64 in _postprocess() + export()
self.__data_frame = self.__data_frame.astype('float32')
if self.poobah_decimals != 3 and 'poobah_pval' in self.__data_frame.columns:
other_columns = list(self.__data_frame.columns)
other_columns.remove('poobah_pval')
other_columns = {column:3 for column in other_columns}
self.__data_frame = self.__data_frame.round(other_columns)
self.__data_frame = self.__data_frame.round({'poobah_pval': self.poobah_decimals})
else:
self.__data_frame = self.__data_frame.round(3)
# here, separate the mouse from normal probes and store mouse separately.
# normal_probes_mask = (self.manifest.data_frame.index.str.startswith('cg', na=False)) | (self.manifest.data_frame.index.str.startswith('ch', na=False))
mouse_probes_mask = (self.manifest.data_frame.index.str.startswith('mu', na=False)) | (self.manifest.data_frame.index.str.startswith('rp', na=False))
mouse_probes = self.manifest.data_frame[mouse_probes_mask]
mouse_probe_count = mouse_probes.shape[0]
self.mouse_data_frame = self.__data_frame[self.__data_frame.index.isin(mouse_probes.index)]
if mouse_probe_count > 0:
LOGGER.debug(f"{mouse_probe_count} mouse probes ->> {self.mouse_data_frame.shape[0]} in idat")
# now remove these from normal list. confirmed they appear in the processed.csv if this line is not here.
self.__data_frame = self.__data_frame[~self.__data_frame.index.isin(mouse_probes.index)]
return self.__data_frame
def process_m_value(self, input_dataframe):
"""Calculate M value from methylation data"""
return self._postprocess(input_dataframe, calculate_m_value, 'm_value')
def process_beta_value(self, input_dataframe):
"""Calculate Beta value from methylation data"""
return self._postprocess(input_dataframe, calculate_beta_value, 'beta_value')
def process_copy_number(self, input_dataframe):
"""Calculate copy number value from methylation data"""
return self._postprocess(input_dataframe, calculate_copy_number, 'cm_value')
def process_all(self):
"""Runs all pre and post-processing calculations for the dataset."""
data_frame = self.preprocess()
# also creates a self.mouse_data_frame for mouse specific probes with 'noob_meth' and 'noob_unmeth' columns here.
data_frame = self.process_beta_value(data_frame)
data_frame = self.process_m_value(data_frame)
self.__data_frame = data_frame
if self.manifest.array_type == ArrayType.ILLUMINA_MOUSE:
self.mouse_data_frame = self.process_beta_value(self.mouse_data_frame)
self.mouse_data_frame = self.process_m_value(self.mouse_data_frame)
self.mouse_data_frame = self.process_copy_number(self.mouse_data_frame)
return data_frame
def export(self, output_path):
ensure_directory_exists(output_path)
# ensure smallest possible csv files
self.__data_frame = self.__data_frame.round({'noob_meth':0, 'noob_unmeth':0, 'm_value':3, 'beta_value':3,
'meth':0, 'unmeth':0, 'poobah_pval':self.poobah_decimals})
try:
self.__data_frame['noob_meth'] = self.__data_frame['noob_meth'].astype(int, copy=False)
self.__data_frame['noob_unmeth'] = self.__data_frame['noob_unmeth'].astype(int, copy=False)
except ValueError as e:
num_missing = self.__data_frame['noob_unmeth'].isna().sum() + self.__data_frame['noob_meth'].isna().sum()
LOGGER.warning(f'{output_path} contains {num_missing} missing/infinite NOOB meth/unmeth probe values')
# these are the raw, uncorrected values
if 'meth' in self.__data_frame.columns and 'unmeth' in self.__data_frame.columns:
try:
self.__data_frame['meth'] = self.__data_frame['meth'].astype('float16', copy=False)
self.__data_frame['unmeth'] = self.__data_frame['unmeth'].astype('float16', copy=False)
except ValueError as e:
num_missing = self.__data_frame['meth'].isna().sum() + self.__data_frame['unmeth'].isna().sum()
LOGGER.warning(f'{output_path} contains {num_missing} missing/infinite RAW meth/unmeth probe values')
self.__data_frame.to_csv(output_path)
def _postprocess(self, input_dataframe, postprocess_func, header):
input_dataframe[header] = postprocess_func(
input_dataframe['noob_meth'].values,
input_dataframe['noob_unmeth'].values,
)
if self.data_type != 'float64':
#np.seterr(over='raise', divide='raise')
try:
LOGGER.debug('Converting %s to %s: %s', header, self.data_type, self.raw_dataset.sample)
input_dataframe[header] = input_dataframe[header].astype(self.data_type)
except Exception as e:
LOGGER.warning(f'._postprocess: {e}')
LOGGER.info('%s failed for %s, using float64 instead: %s', self.data_type, header, self.raw_dataset.sample)
input_dataframe[header] = input_dataframe[header].astype('float64')
return input_dataframe
|
the-stack_106_20594
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <[email protected]>
# License: http://pysnmp.sf.net/license.html
#
# Copyright (C) 2008 Truelite Srl <[email protected]>
# Author: Filippo Giunchedi <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the BSD 2-Clause License as shipped with pysnmp.
#
# Description: Transport dispatcher based on twisted.internet.reactor
#
import sys, time, traceback
from twisted.internet import reactor, task
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class TwistedDispatcher(AbstractTransportDispatcher):
"""TransportDispatcher based on twisted.internet.reactor"""
def __init__(self, *args, **kwargs):
AbstractTransportDispatcher.__init__(self)
self.__transportCount = 0
if 'timeout' in kwargs:
self.setTimerResolution(kwargs['timeout'])
self.loopingcall = task.LoopingCall(
lambda self=self: self.handleTimerTick(time.time())
)
def runDispatcher(self, timeout=0.0):
if not reactor.running:
try:
reactor.run()
except KeyboardInterrupt:
raise
except:
raise PySnmpError('reactor error: %s' % ';'.join(traceback.format_exception(*sys.exc_info())))
# jobstarted/jobfinished might be okay as-is
def registerTransport(self, tDomain, transport):
if not self.loopingcall.running and self.getTimerResolution() > 0:
self.loopingcall.start(self.getTimerResolution(), now=False)
AbstractTransportDispatcher.registerTransport(
self, tDomain, transport
)
self.__transportCount += 1
def unregisterTransport(self, tDomain):
t = AbstractTransportDispatcher.getTransport(self, tDomain)
if t is not None:
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
self.__transportCount -= 1
# The last transport has been removed, stop the timeout
if self.__transportCount == 0 and self.loopingcall.running:
self.loopingcall.stop()
|
the-stack_106_20595
|
import argparse
import os
import platform
import sys
import socket
from typing import List, Optional, Union, Callable
import requests
from pygments import __version__ as pygments_version
from requests import __version__ as requests_version
from . import __version__ as httpie_version
from .cli.constants import OUT_REQ_BODY
from .cli.nested_json import HTTPieSyntaxError
from .client import collect_messages
from .context import Environment
from .downloads import Downloader
from .models import (
RequestsMessageKind,
OutputOptions,
)
from .output.writer import write_message, write_stream, MESSAGE_SEPARATOR_BYTES
from .plugins.registry import plugin_manager
from .status import ExitStatus, http_status_to_exit_status
from .utils import unwrap_context
# noinspection PyDefaultArgument
def raw_main(
parser: argparse.ArgumentParser,
main_program: Callable[[argparse.Namespace, Environment], ExitStatus],
args: List[Union[str, bytes]] = sys.argv,
env: Environment = Environment()
) -> ExitStatus:
program_name, *args = args
env.program_name = os.path.basename(program_name)
args = decode_raw_args(args, env.stdin_encoding)
plugin_manager.load_installed_plugins(env.config.plugins_dir)
if env.config.default_options:
args = env.config.default_options + args
include_debug_info = '--debug' in args
include_traceback = include_debug_info or '--traceback' in args
def handle_generic_error(e, annotation=None):
msg = str(e)
if hasattr(e, 'request'):
request = e.request
if hasattr(request, 'url'):
msg = (
f'{msg} while doing a {request.method}'
f' request to URL: {request.url}'
)
if annotation:
msg += annotation
env.log_error(f'{type(e).__name__}: {msg}')
if include_traceback:
raise
if include_debug_info:
print_debug_info(env)
if args == ['--debug']:
return ExitStatus.SUCCESS
exit_status = ExitStatus.SUCCESS
try:
parsed_args = parser.parse_args(
args=args,
env=env,
)
except HTTPieSyntaxError as exc:
env.stderr.write(str(exc) + "\n")
if include_traceback:
raise
exit_status = ExitStatus.ERROR
except KeyboardInterrupt:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR_CTRL_C
except SystemExit as e:
if e.code != ExitStatus.SUCCESS:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR
else:
try:
exit_status = main_program(
args=parsed_args,
env=env,
)
except KeyboardInterrupt:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR_CTRL_C
except SystemExit as e:
if e.code != ExitStatus.SUCCESS:
env.stderr.write('\n')
if include_traceback:
raise
exit_status = ExitStatus.ERROR
except requests.Timeout:
exit_status = ExitStatus.ERROR_TIMEOUT
env.log_error(f'Request timed out ({parsed_args.timeout}s).')
except requests.TooManyRedirects:
exit_status = ExitStatus.ERROR_TOO_MANY_REDIRECTS
env.log_error(
f'Too many redirects'
f' (--max-redirects={parsed_args.max_redirects}).'
)
except requests.exceptions.ConnectionError as exc:
annotation = None
original_exc = unwrap_context(exc)
if isinstance(original_exc, socket.gaierror):
if original_exc.errno == socket.EAI_AGAIN:
annotation = '\nCouldn’t connect to a DNS server. Please check your connection and try again.'
elif original_exc.errno == socket.EAI_NONAME:
annotation = '\nCouldn’t resolve the given hostname. Please check the URL and try again.'
propagated_exc = original_exc
else:
propagated_exc = exc
handle_generic_error(propagated_exc, annotation=annotation)
exit_status = ExitStatus.ERROR
except Exception as e:
# TODO: Further distinction between expected and unexpected errors.
handle_generic_error(e)
exit_status = ExitStatus.ERROR
return exit_status
def main(
args: List[Union[str, bytes]] = sys.argv,
env: Environment = Environment()
) -> ExitStatus:
"""
The main function.
Pre-process args, handle some special types of invocations,
and run the main program with error handling.
Return exit status code.
"""
from .cli.definition import parser
return raw_main(
parser=parser,
main_program=program,
args=args,
env=env
)
def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
"""
The main program without error handling.
"""
# TODO: Refactor and drastically simplify, especially so that the separator logic is elsewhere.
exit_status = ExitStatus.SUCCESS
downloader = None
initial_request: Optional[requests.PreparedRequest] = None
final_response: Optional[requests.Response] = None
def separate():
getattr(env.stdout, 'buffer', env.stdout).write(MESSAGE_SEPARATOR_BYTES)
def request_body_read_callback(chunk: bytes):
should_pipe_to_stdout = bool(
# Request body output desired
OUT_REQ_BODY in args.output_options
# & not `.read()` already pre-request (e.g., for compression)
and initial_request
# & non-EOF chunk
and chunk
)
if should_pipe_to_stdout:
msg = requests.PreparedRequest()
msg.is_body_upload_chunk = True
msg.body = chunk
msg.headers = initial_request.headers
msg_output_options = OutputOptions.from_message(msg, body=True, headers=False)
write_message(requests_message=msg, env=env, args=args, output_options=msg_output_options)
try:
if args.download:
args.follow = True # --download implies --follow.
downloader = Downloader(output_file=args.output_file, progress_file=env.stderr, resume=args.download_resume)
downloader.pre_request(args.headers)
messages = collect_messages(env, args=args,
request_body_read_callback=request_body_read_callback)
force_separator = False
prev_with_body = False
# Process messages as they’re generated
for message in messages:
output_options = OutputOptions.from_message(message, args.output_options)
do_write_body = output_options.body
if prev_with_body and output_options.any() and (force_separator or not env.stdout_isatty):
# Separate after a previous message with body, if needed. See test_tokens.py.
separate()
force_separator = False
if output_options.kind is RequestsMessageKind.REQUEST:
if not initial_request:
initial_request = message
if output_options.body:
is_streamed_upload = not isinstance(message.body, (str, bytes))
do_write_body = not is_streamed_upload
force_separator = is_streamed_upload and env.stdout_isatty
else:
final_response = message
if args.check_status or downloader:
exit_status = http_status_to_exit_status(http_status=message.status_code, follow=args.follow)
if exit_status != ExitStatus.SUCCESS and (not env.stdout_isatty or args.quiet == 1):
env.log_error(f'HTTP {message.raw.status} {message.raw.reason}', level='warning')
write_message(requests_message=message, env=env, args=args, output_options=output_options._replace(
body=do_write_body
))
prev_with_body = output_options.body
# Cleanup
if force_separator:
separate()
if downloader and exit_status == ExitStatus.SUCCESS:
# Last response body download.
download_stream, download_to = downloader.start(
initial_url=initial_request.url,
final_response=final_response,
)
write_stream(stream=download_stream, outfile=download_to, flush=False)
downloader.finish()
if downloader.interrupted:
exit_status = ExitStatus.ERROR
env.log_error(
f'Incomplete download: size={downloader.status.total_size};'
f' downloaded={downloader.status.downloaded}'
)
return exit_status
finally:
if downloader and not downloader.finished:
downloader.failed()
if args.output_file and args.output_file_specified:
args.output_file.close()
def print_debug_info(env: Environment):
env.stderr.writelines([
f'HTTPie {httpie_version}\n',
f'Requests {requests_version}\n',
f'Pygments {pygments_version}\n',
f'Python {sys.version}\n{sys.executable}\n',
f'{platform.system()} {platform.release()}',
])
env.stderr.write('\n\n')
env.stderr.write(repr(env))
env.stderr.write('\n\n')
env.stderr.write(repr(plugin_manager))
env.stderr.write('\n')
def decode_raw_args(
args: List[Union[str, bytes]],
stdin_encoding: str
) -> List[str]:
"""
Convert all bytes args to str
by decoding them using stdin encoding.
"""
return [
arg.decode(stdin_encoding)
if type(arg) is bytes else arg
for arg in args
]
|
the-stack_106_20597
|
"""
NVLAMB optimizer.
"""
import collections
import math
import torch
from tensorboardX import SummaryWriter
from torch.optim import Optimizer
# not finished yet
def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
"""Log a histogram of trust ratio scalars in across layers."""
results = collections.defaultdict(list)
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
if i in state:
results[i].append(state[i])
for k, v in results.items():
event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count)
class NVLAMB(Optimizer):
"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(NVLAMB, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
|
the-stack_106_20598
|
from PyQt5.QtWidgets import QFileDialog
from ...i18n import _
from ...plugin import run_hook
from .util import ButtonsTextEdit, MessageBoxMixin, ColorScheme
class ShowQRTextEdit(ButtonsTextEdit):
def __init__(self, text=None):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(1)
icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.addButton(icon, self.qr_show, _("Show as QR code"))
run_hook('show_text_edit', self)
def qr_show(self):
from .qrcodewidget import QRDialog
try:
s = str(self.toPlainText())
except:
s = self.toPlainText()
QRDialog(s).exec_()
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Show as QR code"), self.qr_show)
m.exec_(e.globalPos())
class ScanQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text="", allow_multi=False):
ButtonsTextEdit.__init__(self, text)
self.allow_multi = allow_multi
self.setReadOnly(0)
self.addButton("file.png", self.file_input, _("Read file"))
icon = "camera_white.png" if ColorScheme.dark_scheme else "camera_dark.png"
self.addButton(icon, self.qr_input, _("Read QR code"))
run_hook('scan_text_edit', self)
def file_input(self):
fileName, __ = QFileDialog.getOpenFileName(self, 'select file')
if not fileName:
return
try:
with open(fileName, "r") as f:
data = f.read()
except BaseException as e:
self.show_error(_('Error opening file') + ':\n' + str(e))
else:
self.setText(data)
def qr_input(self):
from ... import qrscanner, get_config
try:
data = qrscanner.scan_barcode(get_config().get_video_device())
except BaseException as e:
self.show_error(str(e))
data = ''
if not data:
data = ''
if self.allow_multi:
new_text = self.text() + data + '\n'
else:
new_text = data
self.setText(new_text)
return data
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.qr_input)
m.exec_(e.globalPos())
|
the-stack_106_20599
|
# Author: Hansheng Zhao <[email protected]> (https://www.zhs.me)
# import required setup libraries
from setuptools import setup, find_packages
from codecs import open
from os import path
# import library for metadata
from decibel import __author__, __license__, __version__
# project absolute directory
DIRECTORY = path.abspath(path.dirname(__file__))
# project readme file content
with open(
path.join(DIRECTORY, 'README.rst'), encoding = 'UTF8'
) as file_descriptor:
PROJECT_README = file_descriptor.read()
# project required dependencies
with open(
path.join(DIRECTORY, 'requirements.txt'), encoding = 'UTF8'
) as file_descriptor:
REQUIREMENTS = tuple(line for line in file_descriptor if line)
# project setup parameters
setup(
name = 'Decibel',
version = __version__,
description = 'Python database management thin wrapper.',
long_description = PROJECT_README,
url = 'https://www.github.com/copyrighthero/Decibel',
download_url = 'https://www.github.com/copyrighthero/Decibel',
author = __author__,
author_email = '[email protected]',
license = __license__,
classifiers = (
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development',
'Topic :: System',
'Topic :: Utilities'
),
keywords = 'Database Management Thin-wrapper Library',
py_modules = ("decibel", ),
packages = find_packages(exclude = ()),
install_requires = REQUIREMENTS,
package_data = {},
data_files = (),
entry_points = {},
project_urls = {
'Source': 'https://www.github.com/copyrighthero/Decibel'
}
)
|
the-stack_106_20601
|
import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="{{ cookiecutter.package_name }}",
version="{{ cookiecutter.package_version }}",
url="{{ cookiecutter.package_url }}",
license='MIT',
author="{{ cookiecutter.author_name }}",
author_email="{{ cookiecutter.author_email }}",
description="{{ cookiecutter.package_description }}",
long_description=read("README.rst"),
packages=find_packages(exclude=('tests',)),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
the-stack_106_20605
|
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
@PIPELINES.register_module
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={})'.format(self.keys)
@PIPELINES.register_module
class ImageToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={})'.format(self.keys)
@PIPELINES.register_module
class Transpose(object):
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={}, order={})'.format(
self.keys, self.order)
@PIPELINES.register_module
class Collect(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img_group", "gt_labels".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- div_255 - bool indicating if pixel value div 255
- to_rgb - bool indicating if bgr was converted to rgb
"""
def __init__(self,
keys,
meta_keys=('label', 'ori_shape', 'img_shape',
'modality', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
if len(self.meta_keys) != 0:
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
# data['img_meta'] = img_meta
data['img_meta'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + '(keys={}, meta_keys={})'.format(
self.keys, self.meta_keys)
@PIPELINES.register_module
class FormatShape(object):
"""Format final imgs shape to the given input_format
Required keys are "img_group", "num_clips" and "clip_len",
added or modified keys are "img_group" and "input_shape".
Attributes:
input_format (str): define the final imgs format.
"""
def __init__(self, input_format='NCHW'):
assert input_format in ['NCHW', 'NCTHW']
# final input_format is BNCHW OR BNCTHW
self.input_format = input_format
def __call__(self, results):
img_group = results['img_group']
# transpose
if results['modality'] == 'Flow':
assert len(img_group[0].shape) == 2
img_group = [np.stack((flow_x, flow_y), axis=2)
for flow_x, flow_y in zip(
img_group[0::2], img_group[1::2])]
img_group = [img.transpose(2, 0, 1) for img in img_group]
# Stack into numpy.array
img_group = np.stack(img_group, axis=0)
# [M x C x H x W]
# M = 1 * N_oversample * N_clips * L
num_clips = results['num_clips']
clip_len = results['clip_len']
if self.input_format == 'NCTHW':
if clip_len == 1 and num_clips > 1:
# uniform sampling, num_clips mean clip_len
img_group = img_group.reshape(
(-1, num_clips) + img_group.shape[1:])
# N_over x N_clips x C x H x W
img_group = np.transpose(img_group, (0, 2, 1, 3, 4))
# N_over x C x N_clips x H x W
else:
img_group = img_group.reshape(
(-1, num_clips, clip_len) + img_group.shape[1:])
# N_over x N_clips x L x C x H x W
img_group = np.transpose(img_group, (0, 1, 3, 2, 4, 5))
# N_over x N_clips x C x L x H x W
img_group = img_group.reshape((-1, ) + img_group.shape[2:])
# M' x C x L x H x W
# M' = N_over x N_clips
results['img_group'] = img_group
results['input_shape'] = img_group.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(input_format={})'.format(self.input_format)
|
the-stack_106_20606
|
"""
Given two arrays, write a function to compute their intersection.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2,2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [4,9]
Note:
Each element in the result should appear as many times as it shows in both arrays.
The result can be in any order.
Follow up:
What if the given array is already sorted? How would you optimize your algorithm?
What if nums1's size is small compared to nums2's size? Which algorithm is better?
What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?
Solution:
Hashtable
"""
# Hashtable
# Time: O(m+n), where m, n are the numbers of number in nums1 and nums2
# Space: O(m+n)
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
res = []
d2 = dict()
for num in nums2:
d2[num] = d2.get(num, 0) + 1
for num in nums1:
if num in d2 and d2[num] > 0:
res.append(num)
d2[num] -= 1
return res
|
the-stack_106_20607
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import find_packages
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
if sys.argv[-1] == "test":
try:
__import__("py")
except ImportError:
print("py.test required.")
sys.exit(1)
errors = os.system("py.test")
sys.exit(bool(errors))
# yapf: disable
install = [
'recipe>=0.7.2',
'six',
'sqlalchemy',
'redis',
'dogpile.cache',
'structlog',
]
# yapf: enable
setup(
name="recipe_caching",
version="0.6.0",
description="caching for recipe library",
long_description=(open("README.rst").read()),
author="Chris Gemignani",
author_email="[email protected]",
url="https://github.com/juiceinc/recipe_caching",
packages=find_packages(),
include_package_data=True,
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
tests_require=["pytest", "pytest-cov"],
install_requires=install,
entry_points={
"recipe.oven.drivers": [
"caching = recipe_caching.oven.drivers.caching_oven:CachingOven",
],
"recipe.hooks.modify_query": [
"caching = recipe_caching.hooks.modify_query:CachingQueryHook",
],
},
)
|
the-stack_106_20610
|
r1 = float(input('Primeiro Segmento: '))
r2 = float(input('Segundo Segmento: '))
r3 = float(input('Terceiro Segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os segmentos PODEM FORMAR um triângulo ', end='')
if r1 == r2 == r3:
print('EQUILATERO.')
elif r1 != r2 != r3 != r1:
print('ESCALENO.')
else:
print('ISÓSELES.')
else:
print('Os segmento NÃO PODEM FORMAR um triângulo.')
|
the-stack_106_20612
|
import asyncio
import time
import trio
import joulehunter
def do_nothing():
pass
def busy_wait(duration):
end_time = time.time() + duration
while time.time() < end_time:
do_nothing()
async def say(what, when, profile=False):
if profile:
p = joulehunter.Profiler()
p.start()
busy_wait(0.1)
sleep_start = time.time()
await trio.sleep(when)
print(f"slept for {time.time() - sleep_start:.3f} seconds")
busy_wait(0.1)
print(what)
if profile:
p.stop()
p.print(show_all=True)
async def task():
async with trio.open_nursery() as nursery:
nursery.start_soon(say, "first hello", 2, True)
nursery.start_soon(say, "second hello", 1, True)
nursery.start_soon(say, "third hello", 3, True)
trio.run(task)
|
the-stack_106_20613
|
import astropy.units as u
import numpy as np
import pytest
from astropy import time
from astropy.constants import c
from astropy.coordinates import (SkyCoord, EarthLocation, ICRS, GCRS, Galactic,
CartesianDifferential, SpectralCoord,
get_body_barycentric_posvel,
FK5, CartesianRepresentation)
from ..spectra.spectral_axis import SpectralAxis
from ..spectra.spectrum1d import Spectrum1D
from astropy.tests.helper import assert_quantity_allclose
def get_greenwich_earthlocation():
"""
A helper function to get an EarthLocation for greenwich (without trying to
do a download)
"""
site_registry = EarthLocation._get_site_registry(force_builtin=True)
return site_registry.get('greenwich')
def test_create_spectral_axis():
site = get_greenwich_earthlocation()
obstime = time.Time('2018-12-13 9:00')
observer_gcrs = site.get_gcrs(obstime)
wavelengths = np.linspace(500, 2500, 1001) * u.AA
spectral_axis = SpectralAxis(wavelengths, observer=observer_gcrs)
assert isinstance(spectral_axis, u.Quantity)
assert len(spectral_axis) == 1001
assert spectral_axis.bin_edges[0] == 499*u.AA
def test_create_with_bin_edges():
wavelengths = np.linspace(500, 2500, 1001) * u.AA
spectral_axis = SpectralAxis(wavelengths, bin_specification="edges")
assert np.all(spectral_axis.bin_edges == wavelengths)
assert spectral_axis[0] == 501*u.AA
# Test irregular bin edges
wavelengths = np.array([500, 510, 550, 560, 590])*u.AA
spectral_axis = SpectralAxis(wavelengths, bin_specification="edges")
assert np.all(spectral_axis.bin_edges == wavelengths)
assert np.all(spectral_axis == [505., 530., 555., 575.]*u.AA)
# GENERAL TESTS
# We first run through a series of cases to test different ways of initializing
# the observer and target for SpectralAxis, including for example frames,
# SkyCoords, and making sure that SpectralAxis is not sensitive to the actual
# frame or representation class.
# Local Standard of Rest
LSRD = Galactic(u=0 * u.km, v=0 * u.km, w=0 * u.km,
U=9 * u.km / u.s, V=12 * u.km / u.s, W=7 * u.km / u.s,
representation_type='cartesian', differential_type='cartesian')
LSRD_EQUIV = [
LSRD,
SkyCoord(LSRD), # as a SkyCoord
LSRD.transform_to(ICRS), # different frame
LSRD.transform_to(ICRS).transform_to(Galactic) # different representation
]
@pytest.fixture(params=[None] + LSRD_EQUIV)
def observer(request):
return request.param
# Target located in direction of motion of LSRD with no velocities
LSRD_DIR_STATIONARY = Galactic(u=9 * u.km, v=12 * u.km, w=7 * u.km,
representation_type='cartesian')
LSRD_DIR_STATIONARY_EQUIV = [
LSRD_DIR_STATIONARY,
SkyCoord(LSRD_DIR_STATIONARY), # as a SkyCoord
LSRD_DIR_STATIONARY.transform_to(FK5()), # different frame
LSRD_DIR_STATIONARY.transform_to(ICRS()).transform_to(Galactic()) # different representation
]
@pytest.fixture(params=[None] + LSRD_DIR_STATIONARY_EQUIV)
def target(request):
return request.param
def test_create_from_spectral_coord(observer, target):
"""
Checks that parameters are correctly copied from the SpectralCoord object
to the SpectralAxis object
"""
spec_coord = SpectralCoord([100, 200, 300] * u.nm, observer=observer,
target=target, doppler_convention = 'optical',
doppler_rest = 6000*u.AA)
spec_axis = SpectralAxis(spec_coord)
assert spec_coord.observer == spec_axis.observer
assert spec_coord.target == spec_axis.target
assert spec_coord.radial_velocity == spec_axis.radial_velocity
assert spec_coord.doppler_convention == spec_axis.doppler_convention
assert spec_coord.doppler_rest == spec_axis.doppler_rest
def test_create_from_spectral_axis(observer, target):
"""
Checks that parameters are correctly copied to the new SpectralAxis object
"""
spec_axis1 = SpectralAxis([100, 200, 300] * u.nm, observer=observer,
target=target, doppler_convention = 'optical',
doppler_rest = 6000*u.AA)
spec_axis2 = SpectralAxis(spec_axis1)
assert spec_axis1.observer == spec_axis2.observer
assert spec_axis1.target == spec_axis2.target
assert spec_axis1.radial_velocity == spec_axis2.radial_velocity
assert spec_axis1.doppler_convention == spec_axis2.doppler_convention
assert spec_axis1.doppler_rest == spec_axis2.doppler_rest
def test_change_radial_velocity():
wave = np.linspace(100, 200, 100) * u.AA
flux = np.ones(100) * u.one
spec = Spectrum1D(spectral_axis=wave, flux=flux,
radial_velocity=0 * u.km / u.s)
assert spec.radial_velocity == 0 * u.km/u.s
spec.radial_velocity = 1 * u.km / u.s
assert spec.radial_velocity == 1 * u.km/u.s
spec = Spectrum1D(spectral_axis=wave, flux=flux,
radial_velocity=10 * u.km / u.s)
assert spec.radial_velocity == 10 * u.km / u.s
spec.radial_velocity = 5 * u.km / u.s
assert spec.radial_velocity == 5 * u.km / u.s
def test_change_redshift():
wave = np.linspace(100, 200, 100) * u.AA
flux = np.ones(100) * u.one
spec = Spectrum1D(spectral_axis=wave, flux=flux, redshift=0)
assert spec.redshift.unit.physical_type == 'dimensionless'
assert_quantity_allclose(spec.redshift, u.Quantity(0))
assert type(spec.spectral_axis) == SpectralAxis
spec.redshift = 0.1
assert spec.redshift.unit.physical_type == 'dimensionless'
assert_quantity_allclose(spec.redshift, u.Quantity(0.1))
assert type(spec.spectral_axis) == SpectralAxis
spec = Spectrum1D(spectral_axis=wave, flux=flux, redshift=0.2)
assert spec.redshift.unit.physical_type == 'dimensionless'
assert_quantity_allclose(spec.redshift, u.Quantity(0.2))
assert type(spec.spectral_axis) == SpectralAxis
spec.redshift = 0.4
assert spec.redshift.unit.physical_type == 'dimensionless'
assert_quantity_allclose(spec.redshift, u.Quantity(0.4))
assert type(spec.spectral_axis) == SpectralAxis
|
the-stack_106_20615
|
"""empty message
Revision ID: 37dc079188db
Revises: None
Create Date: 2015-01-19 20:32:38.647835
"""
# revision identifiers, used by Alembic.
revision = '37dc079188db'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_email', 'users', ['email'], unique=True)
op.create_index('ix_users_username', 'users', ['username'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_users_username', 'users')
op.drop_index('ix_users_email', 'users')
op.drop_table('users')
op.drop_table('roles')
### end Alembic commands ###
|
the-stack_106_20616
|
import os
import subprocess
def cmd(cmd, listed=False):
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
correct_output = output.communicate()[0].decode().replace('\'', '').splitlines()
if listed:
return correct_output
else:
return correct_output[0]
ids = cmd(["docker", "ps", "-q"], True)
images = cmd(["docker", "ps", "--format", "'{{.Image}}'"], True)
names = cmd(["docker", "ps", "--format", "'{{.Names}}'"], True)
counter = 0
print("\n")
for id in ids:
source = cmd(['docker', 'inspect', "--format='{{ (index .Mounts 0).Source }}'", id])
destination = cmd(['docker', 'inspect', "--format='{{ (index .Mounts 0).Destination }}'", id])
if source and destination:
print("{} ({}) from {} :".format(names[counter], id, images[counter]))
print("\t-Source: {}".format(source))
print("\t-Destination: {}".format(destination))
counter += 1
print("\n")
|
the-stack_106_20619
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
"""
This module provides "nested Polygon" detection for multiple paths.
Terminology
-----------
exterior
creates a filled area, has counter-clockwise (ccw) winding in matplotlib
exterior := Path
hole
creates an unfilled area, has clockwise winding (cw) in matplotlib,
hole := Polygon
polygon
list of nested paths:
polygon without a hole: [path]
polygon with 1 hole: [path, [path]]
polygon with 2 separated holes: [path, [path], [path]]
polygon with 2 nested holes: [path, [path, [path]]]
polygon := [exterior, hole*]
The result is a list of polygons:
1 polygon returns: [[ext-path]]
2 separated polygons returns: [[ext-path], [ext-path, [hole-path]]]
A hole is just another polygon, for a correct visualisation in
matplotlib the winding of the nested paths have to follow the alternating
order ccw-cw-ccw-cw... :
[Exterior-ccw,
[Hole-Exterior-cw,
[Sub-Hole-ccw],
[Sub-Hole-ccw],
],
[Hole-Exterior-cw],
[Hole-Exterior-cw],
]
The implementation has to do some expensive tests, like check if a path is
inside of another path or if paths do overlap. A goal is to reduce this costs
by using proxy objects:
Bounding Box Proxy
------------------
Use the bounding box, this is very fast but not accurate, but could handle
most of the real world scenarios, in the assumption that most HATCHES are
created from non-overlapping boundary paths.
Overlap detection and resolving is not possible.
Bounding Box Construction:
- Fast: use bounding box from control vertices
- Accurate: use bounding box from flattened curve
Inside Check:
- Fast: center point of the bounding box
- Slow: use all corner points of the bounding box
Convex Hull Proxy
-----------------
Use the convex hull of the path, this is more accurate but also
much slower. Overlap detection and resolving is not possible.
Convex Hull construction:
- Fast: use convex hull from control vertices
- Accurate: use convex hull from flattened curve
Inside Check:
- Fast: center point of convex hull
- Slow: use all points of the convex hull
Flattened Curve
---------------
Use the flattened curve vertices, this is the most accurate solution and also
the slowest. Overlap detection and resolving is possible: exterior is the
union of two overlapping paths, hole is the intersection of this two paths,
the hole vertices have to be subtracted from the exterior vertices.
Sort by Area
------------
It is not possible for a path to contain another path with a larger area.
"""
from typing import TypeVar, Tuple, Optional, List, Iterable
from collections import namedtuple
from .path import Path
from ezdxf.math import BoundingBox2d
Exterior = Path
Polygon = TypeVar('Polygon')
Hole = Polygon
Polygon = Tuple[Exterior, Optional[List[Hole]]]
BoxStruct = namedtuple('BoxStruct', 'bbox, path')
def fast_bbox_detection(paths: Iterable[Path]) -> List[Polygon]:
""" Create a nested polygon structure from iterable `paths`, using 2D
bounding boxes as fast detection objects.
"""
# Implements fast bounding box construction and fast inside check.
def area(item: BoxStruct) -> float:
width, height = item.bbox.size
return width * height
def separate(exterior: BoundingBox2d, candidates: List[BoxStruct]
) -> Tuple[List[BoxStruct], List[BoxStruct]]:
holes = []
outside = []
for candidate in candidates:
# Fast inside check:
(holes if exterior.inside(candidate.bbox.center)
else outside).append(candidate)
return holes, outside
def polygon_structure(outside: List[BoxStruct]) -> List[List]:
polygons = []
while outside:
exterior = outside.pop() # path with largest area
# Get holes inside of exterior and returns the remaining paths
# outside of exterior:
holes, outside = separate(exterior.bbox, outside)
if holes:
# build nested hole structure:
# the largest hole could contain the smaller holes,
# and so on ...
holes = polygon_structure(holes)
polygons.append([exterior, *holes])
return polygons
def as_nested_paths(polygons) -> List:
return [
polygon.path if isinstance(polygon, BoxStruct)
else as_nested_paths(polygon)
for polygon in polygons
]
boxed_paths = [
# Fast bounding box construction:
BoxStruct(BoundingBox2d(path.control_vertices()), path)
for path in paths if len(path)
]
boxed_paths.sort(key=area)
return as_nested_paths(polygon_structure(boxed_paths))
def winding_deconstruction(polygons: List[Polygon]
) -> Tuple[List[Path], List[Path]]:
""" Flatten the nested polygon structure in a tuple of two lists,
the first list contains the paths which should be counter-clockwise oriented
and the second list contains the paths which should be clockwise oriented.
The paths are not converted to this orientation.
"""
def deconstruct(polygons_, level):
for polygon in polygons_:
if isinstance(polygon, Path):
# level 0 is the list of polygons
# level 1 = ccw, 2 = cw, 3 = ccw, 4 = cw, ...
(ccw_paths if (level % 2) else cw_paths).append(polygon)
else:
deconstruct(polygon, level + 1)
cw_paths = []
ccw_paths = []
deconstruct(polygons, 0)
return ccw_paths, cw_paths
def flatten_polygons(polygons: Polygon) -> Iterable[Path]:
""" Yield a flat representation of the given nested polygons. """
for polygon in polygons:
if isinstance(polygon, Path):
yield polygon
else:
yield from flatten_polygons(polygon)
def group_paths(paths: Iterable[Path]) -> List[List[Path]]:
""" Group separated paths and their inner holes as flat lists. """
polygons = fast_bbox_detection(paths)
return [list(flatten_polygons(polygon)) for polygon in polygons]
|
the-stack_106_20620
|
# -*- coding: utf-8 -*-
# This module is responsible for communicating with the outside of the yolo package.
# Outside the package, someone can use yolo detector accessing with this module.
import os
import numpy as np
from yolo.backend.decoder import YoloDecoder
from yolo.backend.loss import YoloLoss
from yolo.backend.network import create_yolo_network
from yolo.backend.batch_gen import create_batch_generator
from yolo.backend.utils.fit import train
from yolo.backend.utils.annotation import get_train_annotations, get_unique_labels
from yolo.backend.utils.box import to_minmax
def get_object_labels(ann_directory):
files = os.listdir(ann_directory)
files = [os.path.join(ann_directory, fname) for fname in files]
return get_unique_labels(files)
def create_yolo(architecture,
labels,
input_size = (224, 224),
anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
coord_scale=1.0,
class_scale=1.0,
object_scale=5.0,
no_object_scale=1.0,
weights = None,
strip_size = 32):
n_classes = len(labels)
n_boxes = int(len(anchors)/2)
yolo_network = create_yolo_network(architecture, input_size, n_classes, n_boxes, weights, strip_size=strip_size)
yolo_loss = YoloLoss(yolo_network.get_grid_size(),
n_classes,
anchors,
coord_scale,
class_scale,
object_scale,
no_object_scale)
yolo_decoder = YoloDecoder(anchors)
yolo = YOLO(yolo_network, yolo_loss, yolo_decoder, labels, input_size)
return yolo
class YOLO(object):
def __init__(self,
yolo_network,
yolo_loss,
yolo_decoder,
labels,
input_size = (224, 224)):
"""
# Args
feature_extractor : BaseFeatureExtractor instance
"""
self._yolo_network = yolo_network
self._yolo_loss = yolo_loss
self._yolo_decoder = yolo_decoder
self._labels = labels
# Batch를 생성할 때만 사용한다.
self._input_size = input_size
def load_weights(self, weight_path, by_name=False):
if os.path.exists(weight_path):
print("Loading pre-trained weights in", weight_path)
self._yolo_network.load_weights(weight_path, by_name=by_name)
else:
print("Fail to load pre-trained weights. Make sure weight file path.")
def predict(self, image, threshold=0.3):
"""
# Args
image : 3d-array (BGR ordered)
# Returns
boxes : array, shape of (N, 4)
probs : array, shape of (N, nb_classes)
"""
def _to_original_scale(boxes):
height, width = image.shape[:2]
minmax_boxes = to_minmax(boxes)
minmax_boxes[:,0] *= width
minmax_boxes[:,2] *= width
minmax_boxes[:,1] *= height
minmax_boxes[:,3] *= height
return minmax_boxes.astype(np.int)
netout = self._yolo_network.forward(image)
boxes, probs = self._yolo_decoder.run(netout, threshold)
if len(boxes) > 0:
boxes = _to_original_scale(boxes)
return boxes, probs
else:
return [], []
def train(self,
img_folder,
ann_folder,
img_in_mem, # datasets in mem, format: list
ann_in_mem, # datasets's annotation in mem, format: list
nb_epoch,
save_best_weights_path,
save_final_weights_path,
batch_size=8,
jitter=True,
learning_rate=1e-4,
train_times=1,
valid_times=1,
valid_img_folder="",
valid_ann_folder="",
valid_img_in_mem = None,
valid_ann_in_mem = None,
first_trainable_layer = None,
is_only_detect = False,
progress_callbacks = []):
# 1. get annotations
train_annotations, valid_annotations = get_train_annotations(self._labels,
img_folder,
ann_folder,
valid_img_folder,
valid_ann_folder,
img_in_mem,
ann_in_mem,
valid_img_in_mem,
valid_ann_in_mem,
is_only_detect)
# 1. get batch generator
train_batch_generator = self._get_batch_generator(train_annotations, batch_size, train_times, jitter=jitter)
valid_batch_generator = self._get_batch_generator(valid_annotations, batch_size, valid_times, jitter=False)
# 2. To train model get keras model instance & loss function
model = self._yolo_network.get_model(first_trainable_layer)
loss = self._get_loss_func(batch_size)
# 3. Run training loop
history = train(model,
loss,
train_batch_generator,
valid_batch_generator,
learning_rate = learning_rate,
nb_epoch = nb_epoch,
save_best_weights_path = save_best_weights_path,
save_final_weights_path = save_final_weights_path,
progress_callbacks = progress_callbacks)
return history
def _get_loss_func(self, batch_size):
return self._yolo_loss.custom_loss(batch_size)
def _get_batch_generator(self, annotations, batch_size, repeat_times=1, jitter=True):
"""
# Args
annotations : Annotations instance
batch_size : int
jitter : bool
# Returns
batch_generator : BatchGenerator instance
"""
batch_generator = create_batch_generator(annotations,
self._input_size,
self._yolo_network.get_grid_size(),
batch_size,
self._yolo_loss.anchors,
repeat_times,
jitter=jitter,
norm=self._yolo_network.get_normalize_func())
return batch_generator
|
the-stack_106_20621
|
from interfaces.interface import Resource
from enum import Enum
class Code(object):
enum = {"RELIANCE": "RI",
"CIPLA": "C",
"BIOCON": "BL03",
"AXISBANK": "UTI10",
"HDFCBANK": "HDF01",
"BAJFINANCE": "BAF",
"HEROMOTOCO": "HHM",
"DIVISLAB": "DL03",
"KOTAKBANK": "KMF",
"HINDALCO": "HI",
"M&M": "MM",
"ITC": "ITC"
}
@staticmethod
def stock_code_scrap(code):
return Code.enum[code]
|
the-stack_106_20622
|
# Natural Language Toolkit: evaluation of dependency parser
#
# Author: Long Duong <[email protected]>
#
# Copyright (C) 2001-2018 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import division
import unicodedata
class DependencyEvaluator(object):
"""
Class for measuring labelled and unlabelled attachment score for
dependency parsing. Note that the evaluation ignores punctuation.
>>> from nltk.parse import DependencyGraph, DependencyEvaluator
>>> gold_sent = DependencyGraph(\"""
... Pierre NNP 2 NMOD
... Vinken NNP 8 SUB
... , , 2 P
... 61 CD 5 NMOD
... years NNS 6 AMOD
... old JJ 2 NMOD
... , , 2 P
... will MD 0 ROOT
... join VB 8 VC
... the DT 11 NMOD
... board NN 9 OBJ
... as IN 9 VMOD
... a DT 15 NMOD
... nonexecutive JJ 15 NMOD
... director NN 12 PMOD
... Nov. NNP 9 VMOD
... 29 CD 16 NMOD
... . . 9 VMOD
... \""")
>>> parsed_sent = DependencyGraph(\"""
... Pierre NNP 8 NMOD
... Vinken NNP 1 SUB
... , , 3 P
... 61 CD 6 NMOD
... years NNS 6 AMOD
... old JJ 2 NMOD
... , , 3 AMOD
... will MD 0 ROOT
... join VB 8 VC
... the DT 11 AMOD
... board NN 9 OBJECT
... as IN 9 NMOD
... a DT 15 NMOD
... nonexecutive JJ 15 NMOD
... director NN 12 PMOD
... Nov. NNP 9 VMOD
... 29 CD 16 NMOD
... . . 9 VMOD
... \""")
>>> de = DependencyEvaluator([parsed_sent],[gold_sent])
>>> las, uas = de.eval()
>>> las
0.8...
>>> abs(uas - 0.6) < 0.00001
True
"""
def __init__(self, parsed_sents, gold_sents):
"""
:param parsed_sents: the list of parsed_sents as the output of parser
:type parsed_sents: list(DependencyGraph)
"""
self._parsed_sents = parsed_sents
self._gold_sents = gold_sents
def _remove_punct(self, inStr):
"""
Function to remove punctuation from Unicode string.
:param input: the input string
:return: Unicode string after remove all punctuation
"""
punc_cat = set(["Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"])
return "".join(x for x in inStr if unicodedata.category(x) not in punc_cat)
def eval(self):
"""
Return the Labeled Attachment Score (LAS) and Unlabeled Attachment Score (UAS)
:return : tuple(float,float)
"""
if (len(self._parsed_sents) != len(self._gold_sents)):
raise ValueError(" Number of parsed sentence is different with number of gold sentence.")
corr = 0
corrL = 0
total = 0
for i in range(len(self._parsed_sents)):
parsed_sent_nodes = self._parsed_sents[i].nodes
gold_sent_nodes = self._gold_sents[i].nodes
if (len(parsed_sent_nodes) != len(gold_sent_nodes)):
raise ValueError("Sentences must have equal length.")
for parsed_node_address, parsed_node in parsed_sent_nodes.items():
gold_node = gold_sent_nodes[parsed_node_address]
if parsed_node["word"] is None:
continue
if parsed_node["word"] != gold_node["word"]:
raise ValueError("Sentence sequence is not matched.")
# Ignore if word is punctuation by default
# if (parsed_sent[j]["word"] in string.punctuation):
if self._remove_punct(parsed_node["word"]) == "":
continue
total += 1
if parsed_node["head"] == gold_node["head"]:
corr += 1
if parsed_node["rel"] == gold_node["rel"]:
corrL += 1
return corr / total, corrL / total
|
the-stack_106_20625
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several older modules and functions:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is false, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument.
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import time
import signal
import builtins
import warnings
import errno
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
else:
import _posixsubprocess
import select
import selectors
try:
import threading
except ImportError:
import dummy_threading as threading
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_PopenSelector = selectors.PollSelector
else:
_PopenSelector = selectors.SelectSelector
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except InterruptedError:
continue
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it too will be used internally. Example:
>>> check_output(["sed", "-e", "s/foo/bar/"],
... input=b"when in the course of fooman events\n")
b'when in the course of barman events\n'
If universal_newlines=True is passed, the return value will be a
string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
inputdata = kwargs['input']
del kwargs['input']
kwargs['stdin'] = PIPE
else:
inputdata = None
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(inputdata, timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
def getstatusoutput(cmd):
""" Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
try:
data = check_output(cmd, shell=True, universal_newlines=True, stderr=STDOUT)
status = 0
except CalledProcessError as ex:
data = ex.output
status = ex.returncode
if data[-1:] == '\n':
data = data[:-1]
return status, data
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
_child_created = False # Set here since __del__ checks it
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
# Held while anything is calling waitpid before returncode has been
# updated to prevent clobbering returncode if wait() or poll() are
# called from multiple threads at once. After acquiring the lock,
# code must re-check self.returncode to see if another thread just
# finished a waitpid() call.
self._waitpid_lock = threading.Lock()
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True,
line_buffering=(bufsize == 1))
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except OSError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except OSError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if self.stdin:
self.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be
bytes to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except OSError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
return Handle(h)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except OSError as e:
if e.errno == errno.EPIPE:
# communicate() should ignore pipe full error
pass
elif (e.errno == errno.EINVAL
and self.poll() is not None):
# Issue #19612: stdin.write() fails with EINVAL
# if the process already exited before the write
pass
else:
raise
self.stdin.close()
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process."""
# Don't signal a process that we know has already died.
if self.returncode is not None:
return
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process."""
# Don't terminate a process that we know has already died.
if self.returncode is not None:
return
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = os.pipe()
# errpipe_write must not be in the standard io 0, 1, or 2 fd range.
low_fds_to_close = []
while errpipe_write < 3:
low_fds_to_close.append(errpipe_write)
errpipe_write = os.dup(errpipe_write)
for low_fd in low_fds_to_close:
os.close(low_fd)
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'SubprocessError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
SubprocessError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
"""All callers to this function MUST hold self._waitpid_lock."""
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise SubprocessError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
if not self._waitpid_lock.acquire(False):
# Something else is busy calling waitpid. Don't allow two
# at once. We know nothing yet.
return None
try:
if self.returncode is not None:
return self.returncode # Another thread waited.
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except OSError as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
finally:
self._waitpid_lock.release()
return self.returncode
def _try_wait(self, wait_flags):
"""All callers to this function MUST hold self._waitpid_lock."""
try:
(pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
if self._waitpid_lock.acquire(False):
try:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
finally:
self._waitpid_lock.release()
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
with self._waitpid_lock:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to
# return 0 even without WNOHANG in odd situations.
# http://bugs.python.org/issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
stdout = None
stderr = None
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fileobj2output = {}
if self.stdout:
self._fileobj2output[self.stdout] = []
if self.stderr:
self._fileobj2output[self.stderr] = []
if self.stdout:
stdout = self._fileobj2output[self.stdout]
if self.stderr:
stderr = self._fileobj2output[self.stderr]
self._save_input(input)
if self._input:
input_view = memoryview(self._input)
with _PopenSelector() as selector:
if self.stdin and input:
selector.register(self.stdin, selectors.EVENT_WRITE)
if self.stdout:
selector.register(self.stdout, selectors.EVENT_READ)
if self.stderr:
selector.register(self.stderr, selectors.EVENT_READ)
while selector.get_map():
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
ready = selector.select(timeout)
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the file
# objects; they are no longer using C stdio!
for key, events in ready:
if key.fileobj is self.stdin:
chunk = input_view[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(key.fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
selector.unregister(key.fileobj)
key.fileobj.close()
else:
raise
else:
if self._input_offset >= len(self._input):
selector.unregister(key.fileobj)
key.fileobj.close()
elif key.fileobj in (self.stdout, self.stderr):
data = os.read(key.fd, 32768)
if not data:
selector.unregister(key.fileobj)
key.fileobj.close()
self._fileobj2output[key.fileobj].append(data)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def send_signal(self, sig):
"""Send a signal to the process."""
# Skip signalling a process that we know has already died.
if self.returncode is None:
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
|
the-stack_106_20626
|
import errno
import unittest
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.test_urllib2 import sanepathname2url
import os
import socket
import urllib.error
import urllib.request
import sys
support.requires("network")
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc as e:
last_exc = e
continue
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen,
urllib.error.URLError)
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(OSError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import http.client
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(http.client.InvalidURL,
# urllib2.urlopen, "http://evil:[email protected]")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
# clear _opener global variable
self.addCleanup(urllib.request.urlcleanup)
# calling .close() on urllib2's response objects should close the
# underlying socket
url = support.TEST_HTTP_URL
with socket_helper.transient_internet(url):
response = _urlopen_with_retry(url)
sock = response.fp
self.assertFalse(sock.closed)
response.close()
self.assertTrue(sock.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
def test_ftp(self):
urls = [
'ftp://www.pythontest.net/README',
('ftp://www.pythontest.net/non-existent-file',
None, urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = os_helper.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:' + sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None,
urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
self.assertRaises(ValueError, urllib.request.urlopen,'./relative_path/to/file')
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
urlwith_frag = "http://www.pythontest.net/index.html#frag"
with socket_helper.transient_internet(urlwith_frag):
req = urllib.request.Request(urlwith_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"http://www.pythontest.net/index.html#frag")
def test_redirect_url_withfrag(self):
redirect_url_with_frag = "http://www.pythontest.net/redir/with_frag/"
with socket_helper.transient_internet(redirect_url_with_frag):
req = urllib.request.Request(redirect_url_with_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"http://www.pythontest.net/elsewhere/#frag")
def test_custom_headers(self):
url = support.TEST_HTTP_URL
with socket_helper.transient_internet(url):
opener = urllib.request.build_opener()
request = urllib.request.Request(url)
self.assertFalse(request.header_items())
opener.open(request)
self.assertTrue(request.header_items())
self.assertTrue(request.has_header('User-agent'))
request.add_header('User-Agent','Test-Agent')
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
@unittest.skip('XXX: http://www.imdb.com is gone')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
URL = 'http://www.imdb.com' # mangles Connection:close
with socket_helper.transient_internet(URL):
try:
with urllib.request.urlopen(URL) as res:
pass
except ValueError:
self.fail("urlopen failed for site not sending \
Connection:close")
else:
self.assertTrue(res)
req = urllib.request.urlopen(URL)
res = req.read()
self.assertTrue(res)
def _test_urls(self, urls, handlers, retry=True):
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib.request.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError)
for url in urls:
with self.subTest(url=url):
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
with socket_helper.transient_internet(url):
try:
f = urlopen(url, req, support.INTERNET_TIMEOUT)
# urllib.error.URLError is a subclass of OSError
except OSError as err:
if expected_err:
msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
(expected_err, url, req, type(err), err))
self.assertIsInstance(err, expected_err, msg)
else:
raise
else:
try:
with time_out, \
socket_peer_reset, \
ioerror_peer_reset:
buf = f.read()
debug("read %d bytes" % len(buf))
except TimeoutError:
print("<timeout: %s>" % url, file=sys.stderr)
f.close()
time.sleep(0.1)
def _extra_handlers(self):
handlers = []
cfh = urllib.request.CacheFTPHandler()
self.addCleanup(cfh.clear_cache)
cfh.setTimeout(1)
handlers.append(cfh)
return handlers
class TimeoutTest(unittest.TestCase):
def setUp(self):
# clear _opener global variable
self.addCleanup(urllib.request.urlcleanup)
def test_http_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
url = support.TEST_HTTP_URL
with socket_helper.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
self.assertIsNone(u.fp.raw._sock.gettimeout())
def test_http_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
url = support.TEST_HTTP_URL
with socket_helper.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.raw._sock.gettimeout(), 60)
def test_http_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
url = support.TEST_HTTP_URL
with socket_helper.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(u.fp.raw._sock.gettimeout())
def test_http_timeout(self):
url = support.TEST_HTTP_URL
with socket_helper.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.addCleanup(u.close)
self.assertEqual(u.fp.raw._sock.gettimeout(), 120)
FTP_HOST = 'ftp://www.pythontest.net/'
def test_ftp_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
with socket_helper.transient_internet(self.FTP_HOST, timeout=None):
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
self.assertIsNone(u.fp.fp.raw._sock.gettimeout())
def test_ftp_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with socket_helper.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with socket_helper.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(u.fp.fp.raw._sock.gettimeout())
def test_ftp_timeout(self):
with socket_helper.transient_internet(self.FTP_HOST):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
self.addCleanup(u.close)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_20627
|
class Solution(object):
def preorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return []
values = []
self.visit(root, values)
return values
def visit(self, root, values):
values.append(root.val)
for child in root.children:
self.visit(child, values)
|
the-stack_106_20629
|
""""
Google cloud storage
"""
from io import BytesIO
from google.cloud import storage
from google.cloud.exceptions import NotFound
from sqlalchemy_media.exceptions import GCPError
from sqlalchemy_media.optionals import ensure_gcs
from .base import Store
from ..typing_ import FileLike
class GoogleCloudStorge(Store):
"""
Google Cloud Storage Implements Store base.
"""
def __init__(self, bucket: str, service_account_json: str, acl: str = 'private'):
"""
Initialize GoogleCloudStorge
:param bucket: bucket name
:param service_account_json: service account json file(credential)
:param acl: public or private
"""
self.bucket = bucket
self._storage_client = storage.Client.from_service_account_json(service_account_json)
self.acl = acl
def _get_or_create_bucket(self):
"""
Get bucket if exist else create a bucket
:return bucket object
"""
ensure_gcs()
try:
return self._storage_client.get_bucket(self.bucket)
except NotFound:
return self._storage_client.create_bucket(self.bucket)
def _upload_file(self, file_name: str, data: str):
"""
Create a blob and upload file,
add acl if needed
"""
ensure_gcs()
try:
bucket = self._get_or_create_bucket()
blob = bucket.blob(file_name)
blob.upload_from_string(data)
if self.acl == 'public':
blob.make_public()
except Exception as e:
raise GCPError(e)
def locate(self, attachment) -> str:
"""
Get Download link of a file by its name
"""
ensure_gcs()
try:
bucket = self._get_or_create_bucket()
return bucket.blob(attachment).public_url
except Exception as e:
raise GCPError(e)
def open(self, filename: str, mode: str = 'rb') -> FileLike:
"""
Download a file as byte
"""
ensure_gcs()
try:
bucket = self._get_or_create_bucket()
blob = bucket.blob(filename)
file_byte = blob.download_to_filename()
return BytesIO(bytes(file_byte))
except Exception as e:
raise GCPError(e)
def delete(self, filename: str) -> None:
"""
Delete a file by its name
"""
ensure_gcs()
try:
bucket = self._get_or_create_bucket()
blob = bucket.blob(filename)
blob.delete()
except Exception as e:
raise GCPError(e)
def put(self, filename: str, stream: FileLike) -> int:
"""
Put files into Google cloud storage
"""
ensure_gcs()
data = stream.read()
self._upload_file(filename, data)
return len(data)
|
the-stack_106_20631
|
# -*- coding: utf-8 -*-
"""
@File: rnns.py
@Copyright: 2019 Michael Zhu
@License:the Apache License, Version 2.0
@Author:Michael Zhu
@version:
@Date:
@Desc:
"""
import torch
import torch.nn as nn
class RnnEncoder(nn.Module):
"""
A ``RnnEncoder`` is a rnn layer. As a
:class:`Seq2SeqEncoder`, the input to this module is of shape ``(batch_size, num_tokens,
input_dim)``, and the output is of shape ``(batch_size, num_tokens, output_dim)``.
Parameters
----------
input_dim : ``int``
input dimension
output_dim: ``int``
output dimension, which should be divided by 2 if bidirectional == true
rnn_name" ``str``
name of the rnn networks
bidirectional: ``bool``, default=``True``
whether the rnn is bidirectional
dropout: ``float``, default=``None``
dropout rate
normalizer: ``str``, default = ``None``
name of the normalization we use
affine_for_normalizer: bool = False
whether affine is used in the normalization
"""
def __init__(self,
input_dim: int,
output_dim: int,
rnn_name: str = "lstm",
bidirectional: bool = True, ) -> None:
super(RnnEncoder, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.rnn_name = rnn_name
self.bidirectional = bidirectional
if bidirectional:
assert output_dim % 2 == 0
hidden_size = output_dim // 2
else:
hidden_size = output_dim
if rnn_name == "lstm":
self._rnn = torch.nn.LSTM(
input_dim,
hidden_size,
num_layers=1,
batch_first=True,
bidirectional=bidirectional,
bias=False
)
else:
self._rnn = torch.nn.GRU(
input_dim,
hidden_size,
num_layers=1,
batch_first=True,
bidirectional=bidirectional,
bias=False
)
def forward(self, input_tensors, mask=None): # pylint: disable=arguments-differ
# if mask is not None:
# input_tensors = input_tensors * mask.unsqueeze(-1).float()
encoded_output, _ = self._rnn(input_tensors)
return encoded_output
|
the-stack_106_20632
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_crossing02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812352, 43814272]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'crossing': 3})
chart.set_y_axis({'crossing': 8})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
the-stack_106_20634
|
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def pytest_addoption(parser):
parser.addoption('--language', action='store', default='en',
help="Choose language for page")
@pytest.fixture(scope="function")
def browser(request):
print("\nstart browser for test..")
options = Options()
language = request.config.getoption("language")
options.add_experimental_option('prefs',
{'intl.accept_languages': language})
browser = webdriver.Chrome(options=options)
yield browser
print("\nquit browser..")
browser.quit()
|
the-stack_106_20637
|
# -*- coding: utf-8 -*-
import os
import time
import unittest
from configparser import ConfigParser
from GenomeReport.GenomeReportImpl import GenomeReport
from GenomeReport.GenomeReportServer import MethodContext
from GenomeReport.authclient import KBaseAuth as _KBaseAuth
from installed_clients.WorkspaceClient import Workspace
class GenomeReportTest(unittest.TestCase):
"""
Testing functions defined in GenomeReport
"""
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeReport'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'GenomeReport',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.serviceImpl = GenomeReport(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
suffix = int(time.time() * 1000)
cls.wsName = "test_GenomeReport_" + str(suffix)
ret = cls.wsClient.create_workspace({'workspace': cls.wsName}) # noqa
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def test_create_genome_report(self):
# Prepare test objects in workspace if needed using
# self.getWsClient().save_objects({'workspace': self.getWsName(),
# 'objects': []})
#
# Run your method by
# ret = self.getImpl().your_method(self.getContext(), parameters...)
#
# Check returned data with
# self.assertEqual(ret[...], ...) or other unittest methods
#
# Note: The object_ref used here has to be a ref to an object in CI
ret = self.serviceImpl.create_genome_report(self.ctx, {'output_workspace': self.wsName,
'object_ref': '15792/185774/1',
'annotated_by': 'Prokka'})
|
the-stack_106_20638
|
import os
import json
import time
import datetime
import os.path
from pathlib import Path
from loguru import logger
from pycoingecko import CoinGeckoAPI
# NOTE: this is a WIP
def _get_coins_list(use_cache=True, filename="_coingecko/coins_list.json"):
cg = CoinGeckoAPI()
if use_cache and os.path.isfile(filename):
logger.info(f"Loading from cache: {filename}")
with open(filename) as f:
return json.load(f)
crawl_date = str(datetime.datetime.now())
coins = cg.get_coins_list()
for coin in coins:
coin["_crawl_datetime"] = crawl_date
folder = os.path.dirname(filename)
Path(folder).mkdir(parents=True, exist_ok=True)
coin_json = json.dumps(coins, indent=4)
with open(filename, "w") as f:
f.write(coin_json)
return coins
def _save_coin_by_id(coin_id, filename=None, skip_if_exists=True, throttle=3):
cg = CoinGeckoAPI()
if filename is None:
filename = f"_coingecko/id_{coin_id}.json"
if skip_if_exists and os.path.isfile(filename):
logger.info(f"Skipping crawl of {coin_id}...")
return
logger.info(f"Crawling {coin_id}...")
if throttle is not None:
time.sleep(throttle)
coin = cg.get_coin_by_id(coin_id)
coin["_crawl_datetime"] = str(datetime.datetime.now())
coin_json = json.dumps(coin, indent=4)
with open(f"_coingecko/id_{coin_id}.json", "w") as f:
f.write(coin_json)
def crawl_coins_by_cg_ids(coin_ids):
for idx, coin_id in enumerate(coin_ids):
_save_coin_by_id(coin_id)
def get_marketcap_by_cg_id(cg_id, currency="usd"):
cg_api_filepath = "_coingecko"
# TODO: handle crawl from api if file is too old?
filename = f"{cg_api_filepath}/id_{cg_id}.json"
if not os.path.isfile(filename):
_save_coin_by_id(cg_id, filename)
with open(filename) as f:
data = json.load(f)
crawl_datetime = data["_crawl_datetime"]
try:
market_cap_rank = data["market_cap_rank"]
except KeyError as ex:
logger.error(f"get_marketcap_by_cg_id 'market_cap_rank' KeyError for file {filename}: {ex}")
return -1, -1, crawl_datetime
try:
market_cap = data["market_data"]["market_cap"][currency]
except KeyError as ex:
logger.error(f"get_marketcap_by_cg_id 'market_cap' KeyError for file {filename} and currency {currency}: {ex}")
return -1, market_cap_rank, crawl_datetime
return market_cap, market_cap_rank, crawl_datetime
def get_coins_by_symbol(symbol, currency="usd"):
if not isinstance(symbol, str):
return None
coins = []
for coin in _get_coins_list():
if coin["symbol"].lower() == symbol.lower():
coin_id = coin["id"]
market_cap, market_cap_rank, crawl_datetime = get_marketcap_by_cg_id(coin_id, currency)
if market_cap > 0:
coin["market_cap"] = market_cap
coin["market_cap_rank"] = market_cap_rank
coin["market_cap_datetime"] = crawl_datetime
coins.append(coin)
if len(coins) == 0:
logger.error(f"get_coins_by_symbol couldn't get coin by symbol '{symbol}'")
return None
best_coin = coins[0]
for coin in coins:
if coin["market_cap_rank"] < best_coin["market_cap_rank"]:
best_coin = coin
return best_coin
def get_coins_by_symbols(symbols, currency="usd"):
results = []
for symbol in symbols:
best_coin, coins = get_coins_by_symbol(symbol, currency)
results.append(best_coin)
return results
# Methods testing coingeko api:
def get_global(use_cache=True, filename="_coingecko/global.json"):
# https://api.coingecko.com/api/v3/global
cg = CoinGeckoAPI()
if use_cache and os.path.isfile(filename):
logger.info(f"Loading from cache: {filename}")
with open(filename) as f:
return json.load(f)
crawl_date = str(datetime.datetime.now())
response = cg.get_global()
response["_crawl_datetime"] = crawl_date
response_json = json.dumps(response, indent=4)
with open(filename, "w") as f:
f.write(response_json)
return response
def get_prices(ids, use_cache=False, filename="_coingecko/_prices.json"):
# https://api.coingecko.com/api/v3/simple/price?ids=ethereum%2Cbitcoin&vs_currencies=usd&include_market_cap=true&include_24hr_vol=true&include_24hr_change=true&include_last_updated_at=true
cg = CoinGeckoAPI()
if use_cache and os.path.isfile(filename):
logger.info(f"Loading from cache: {filename}")
with open(filename) as f:
return json.load(f)
crawl_date = str(datetime.datetime.now())
prices = cg.get_price(ids,
vs_currencies='usd', include_market_cap='true', include_24hr_vol='true',
include_24hr_change='true', include_last_updated_at='true')
if len(prices) != len(ids):
logger.warning(f"get_prices only matched {len(prices)} of {len(ids)} requested ids")
else:
logger.info(f"get_prices matched all {len(prices)} of {len(ids)} requested ids")
for coin in prices:
prices[coin]["_crawl_datetime"] = crawl_date
price_json = json.dumps(prices, indent=4)
with open(filename, "w") as f:
f.write(price_json)
return prices
|
the-stack_106_20641
|
"""
Given a binary tree and a sum, find all root-to-leaf paths where each path's sum equals the given sum.
For example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
return
[
[5,4,11,2],
[5,8,4,5]
]
"""
__author__ = 'Danyang'
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root, sum):
"""
:param root: TreeNode
:param sum: integer
:return: a list of lists of integers
"""
result = []
self.accumulatePathSum(root, sum, [], result)
return result
def accumulatePathSum(self, root, sum, cur_path, result):
"""
DFS
Similar to previous path sum
"""
# trivial
if not root:
return
sum = sum - root.val
cur_path.append(root.val)
# terminal condition
if sum==0 and root.left is None and root.right is None:
result.append(list(cur_path)) # new copy
return
# dfs with pre-checking
if root.left: self.accumulatePathSum(root.left, sum, list(cur_path), result) # new copy
if root.right: self.accumulatePathSum(root.right, sum, list(cur_path), result) # new copy
|
the-stack_106_20642
|
import sys
sys.path.insert(0, '../')
from maskrcnn_benchmark.config_aurora import cfg
from predictor import AuroraDemo
import cv2
import matplotlib.pyplot as plt
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from PIL import Image
import numpy as np
import math
import os
save_reults_folder = './results/'
if not os.path.exists(save_reults_folder):
os.mkdir(save_reults_folder)
config_file = "../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2_aurora_width.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
aurora_demo = AuroraDemo(
cfg,
min_image_size=800,
confidence_threshold=0.5,
)
imgFolder = '/home/ljm/NiuChuang/AuroraObjectData/labeled2003_38044/'
imgNames = '/home/ljm/NiuChuang/AuroraObjectData/Alllabel2003_38044_arc.txt'
img_type = '.bmp'
f = open(imgNames, 'r')
lines = f.readlines()
num_img = len(lines)
mean_sum = 0
fig_id = 1
names = []
zenith_angles = []
arc_widths = []
show_prediction = False
show_details = False
one_fig = False
mode = 'line'
img_height = 440
img_width = 440
for i in range(0, num_img):
print(i)
name = lines[i][0:-1]
img_path = imgFolder + name + img_type
image_ori = cv2.imread(img_path)
angle = aurora_demo.compute_angle(image_ori, thresh_bdry_number=150)
if angle is None:
continue
image_ori = Image.fromarray(image_ori)
image = image_ori.rotate(angle)
image = np.asarray(image)
angle1 = aurora_demo.compute_angle(image, thresh_bdry_number=150)
if angle1 is None:
continue
angle = angle + angle1
image_r = image_ori.rotate(angle)
image = np.asarray(image_r)
if mode == 'line':
if one_fig:
plt.figure(fig_id)
fig_id += 1
zenith_angles_i, arc_widths_i = aurora_demo.compute_arc_zangle_width_intensity_line(image, show_details=show_details, one_fig=one_fig)
if mode == 'seg':
zenith_angles_i, arc_widths_i = aurora_demo.compute_arc_zangle_width(image, show_details=False)
zenith_angles += zenith_angles_i
arc_widths += arc_widths_i
names.append(name)
if show_prediction:
prediction = aurora_demo.run_on_opencv_image(image, angle=-angle)
plt.figure(fig_id)
fig_id += 1
plt.imshow(image_ori)
plt.axis('off')
line_v = np.array([[0, 219.5], [439, 219.5]])
sin_a = math.sin(-angle*math.pi/180.)
cos_a = math.cos(-angle*math.pi/180.)
mtx_r = np.array([[cos_a, sin_a], [-sin_a, cos_a]])
center_r = np.array([219.5, 219.5])
line_r = np.dot(line_v-center_r, mtx_r) + center_r
plt.plot(line_r[:, 1], line_r[:, 0], color='red')
plt.scatter([219.5], [219.5], s=40, c='r')
plt.figure(fig_id)
fig_id += 1
plt.imshow(prediction)
plt.axis('off')
plt.show()
save_zangle_width_file = '/home/ljm/NiuChuang/AuroraObjectData/zangle_width/agw_tr1058_te38044_arc_cnd2_' + mode + '.txt'
f = open(save_zangle_width_file, 'w')
for a in range(len(zenith_angles)):
f.write(str(zenith_angles[a]) + ' ' + str(arc_widths[a]) + '\n')
plt.figure(fig_id)
fig_id += 1
plt.scatter(zenith_angles, arc_widths, s=2)
plt.show()
|
the-stack_106_20644
|
import sys, yaml
with open("config/config.yaml", 'r') as stream:
data = yaml.safe_load(stream)
species = data["species"]
version = data["genome"]
ucsc2ensembl={}
for line in open(f"../resources/ChromosomeMappings/{version}_UCSC2ensembl.txt"):
linesplit=line.strip().split("\t")
if len(linesplit) <= 1: continue
ucsc2ensembl[linesplit[0]] = linesplit[1]
with open(f"../resources/ensembl/{species}.ensembl.vcf","w") as ensembl:
chrs={}
max_chr=0
for line in sys.stdin:
# header
if line.startswith("#"):
sys.stdout.write(line)
continue
# change chr from UCSC to Ensembl
splitline = line.split("\t")
if len(splitline) > 1 and splitline[0] in ucsc2ensembl:
splitline[0] = ucsc2ensembl[splitline[0]]
if splitline[0].isdigit() and int(splitline[0]) > max_chr:
max_chr = int(splitline[0])
lineline="\t".join(splitline)
if not splitline[0] in chrs:
chrs[splitline[0]] = [lineline]
else:
chrs[splitline[0]].append(lineline)
# order and output
ordered = []
chrn = [str(x) for x in range(1, max_chr + 1)]
chrn.extend(["X","Y","MT"])
chrn.extend([ccc for ccc in chrs.keys() if ccc.startswith("GL")])
chrn.extend([ccc for ccc in chrs.keys() if ccc.startswith("KI")])
otherchr = [ccc for ccc in chrs.keys() if ccc not in chrn]
chrn.extend(otherchr)
for chr in chrn:
if chr in chrs:
for line in chrs[chr]:
sys.stdout.write(line)
|
the-stack_106_20645
|
import flask
import sqlite3
from flask import g
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
DATABASE = './database.db'
TABLE_NAME = "books"
COLUMN_ID_NAME = "id"
COLUMN_TITLE_NAME = "title"
COLUMN_AUTHOR_NAME = "author"
COLUMN_CATEGORY_NAME = "category"
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def create_db_tables_if_not_exists():
cur = get_db().cursor()
cur.execute(
"create table if not exists " + TABLE_NAME + " ("
+ COLUMN_ID_NAME + " INTEGER " + " PRIMARY KEY AUTOINCREMENT, "
+ COLUMN_TITLE_NAME + " TEXT " + " NOT NULL, "
+ COLUMN_AUTHOR_NAME + " TEXT " + " NOT NULL, "
+ COLUMN_CATEGORY_NAME + " TEXT " + " NOT NULL "
+ ") "
)
@app.route('/', methods=['GET'])
def home():
return "<h1>This is a home test</h1>"
@app.route('/books', methods=['GET'])
def list_books():
create_db_tables_if_not_exists()
cur = get_db().cursor()
all_books = cur.execute("select * from " + TABLE_NAME).fetchall()
print(all_books)
if len(all_books) > 0:
return jsonify(all_books)
else:
return "<h1>No books found</h1>"
@app.route('/books', methods=['POST'])
def create_book():
print(request.form)
create_db_tables_if_not_exists()
cur = get_db().cursor()
query_state = "insert into " + TABLE_NAME + "(" + COLUMN_TITLE_NAME + "," + COLUMN_AUTHOR_NAME
query_state += "," + COLUMN_CATEGORY_NAME + ")"
query_state += " values(" + "\'" + request.form["t"]
query_state += "\'" + "," + "\'" + request.form["a"] + "\'"
query_state += "," + "\'" + request.form["c"] + "\'" + ");"
print("Stat: ", query_state)
cur.execute(query_state)
get_db().commit()
book = cur.execute(
"SELECT * from " + TABLE_NAME + " where " + COLUMN_ID_NAME + " =last_insert_rowid()"
).fetchone()
return jsonify(book)
@app.route('/books/<int:id>', methods=['GET'])
def show_book(id):
print(id)
cur = get_db().cursor()
book = cur.execute(
"SELECT * from " + TABLE_NAME + " where " + COLUMN_ID_NAME + "=" + str(id)
).fetchone()
return jsonify(book)
@app.route('/books/<int:id>', methods=['DELETE'])
def delete_book(id):
print(id)
cur = get_db().cursor()
cur.execute(
"delete from " + TABLE_NAME + " where " + COLUMN_ID_NAME + "=" + str(id)
)
get_db().commit()
return "Effected rows: " + str(cur.rowcount)
@app.route('/books/<int:id>', methods=['PUT', 'PATCH'])
def update_book(id):
print("REQUEST: ", id)
cur = get_db().cursor()
data = (
request.form["t"],
request.form["a"],
request.form["c"],
str(id),
)
sql_stat = "UPDATE " + TABLE_NAME
sql_stat += " SET "
sql_stat += "\'" + COLUMN_TITLE_NAME + "\'" + " = ?, "
sql_stat += "\'" + COLUMN_AUTHOR_NAME + "\'" + " = ?, "
sql_stat += "\'" + COLUMN_CATEGORY_NAME + "\'" + " = ? "
sql_stat += " WHERE " + COLUMN_ID_NAME + " = ?"
sql_stat += ";"
print("SQL: ", sql_stat)
print("data: ", data)
cur.execute(sql_stat, data)
get_db().commit()
return "Effected rows: " + str(cur.rowcount)
app.run()
|
the-stack_106_20648
|
import os
import re
from typing import Optional, Tuple
# Github has two URLs, one that is https and one that is ssh
GITHUB_HTTP_URL = r"^https://(www\.)?github.com/(.+)/(.+).git$"
GITHUB_SSH_URL = r"^[email protected]:(.+)/(.+).git$"
# We don't support git < 2.7, because we can't get repo info without
# talking to the remote server, which results in the user being prompted
# for credentials.
MIN_GIT_VERSION = (2, 7, 0)
class GitRepo:
def __init__(self, path):
# If we have a valid repo, git_version will be a tuple of 3+ ints:
# (major, minor, patch, possible_additional_patch_number)
self.git_version = None # type: Optional[Tuple[int, ...]]
try:
import git # type: ignore[import]
self.repo = git.Repo(path, search_parent_directories=True)
self.git_version = self.repo.git.version_info
if self.git_version >= MIN_GIT_VERSION:
git_root = self.repo.git.rev_parse("--show-toplevel")
self.module = os.path.relpath(path, git_root)
except:
# The git repo must be invalid for the following reasons:
# * git binary or GitPython not installed
# * No .git folder
# * Corrupted .git folder
# * Path is invalid
self.repo = None
def is_valid(self) -> bool:
"""True if there's a git repo here, and git.version >= MIN_GIT_VERSION."""
return (
self.repo is not None
and self.git_version is not None
and self.git_version >= MIN_GIT_VERSION
)
@property
def tracking_branch(self):
if not self.is_valid():
return None
if self.is_head_detached:
return None
return self.repo.active_branch.tracking_branch()
@property
def untracked_files(self):
return self.repo.untracked_files
@property
def is_head_detached(self):
return self.repo.head.is_detached
@property
def uncommitted_files(self):
if not self.is_valid():
return None
return [item.a_path for item in self.repo.index.diff(None)]
@property
def ahead_commits(self):
if not self.is_valid():
return None
try:
remote, branch_name = self.get_tracking_branch_remote()
remote_branch = "/".join([remote.name, branch_name])
return list(self.repo.iter_commits(f"{remote_branch}..{branch_name}"))
except:
return list()
def get_tracking_branch_remote(self):
if not self.is_valid():
return None
tracking_branch = self.tracking_branch
if tracking_branch is None:
return None
remote_name, *branch = tracking_branch.name.split("/")
branch_name = "/".join(branch)
return self.repo.remote(remote_name), branch_name
def is_github_repo(self):
if not self.is_valid():
return False
remote_info = self.get_tracking_branch_remote()
if remote_info is None:
return False
remote, _branch = remote_info
for url in remote.urls:
if (
re.match(GITHUB_HTTP_URL, url) is not None
or re.match(GITHUB_SSH_URL, url) is not None
):
return True
return False
def get_repo_info(self):
if not self.is_valid():
return None
remote_info = self.get_tracking_branch_remote()
if remote_info is None:
return None
remote, branch = remote_info
repo = None
for url in remote.urls:
https_matches = re.match(GITHUB_HTTP_URL, url)
ssh_matches = re.match(GITHUB_SSH_URL, url)
if https_matches is not None:
repo = f"{https_matches.group(2)}/{https_matches.group(3)}"
break
if ssh_matches is not None:
repo = f"{ssh_matches.group(1)}/{ssh_matches.group(2)}"
break
if repo is None:
return None
return repo, branch, self.module
|
the-stack_106_20649
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import http.client as http
from oslo_serialization import jsonutils
import webob
from glance.api import authorization
from glance.common import auth
from glance.common import exception
from glance.common import timeutils
import glance.domain
from glance.tests.unit import utils as unittest_utils
from glance.tests import utils
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc'
class FakeResponse(object):
"""
Simple class that masks the inconsistency between
webob.Response.status_int and httplib.Response.status
"""
def __init__(self, resp):
self.resp = resp
def __getitem__(self, key):
return self.resp.headers.get(key)
@property
def status(self):
return self.resp.status_int
class V2Token(object):
def __init__(self):
self.tok = self.base_token
def add_service_no_type(self):
catalog = self.tok['access']['serviceCatalog']
service_type = {"name": "glance_no_type"}
catalog.append(service_type)
service = catalog[-1]
service['endpoints'] = [self.base_endpoint]
def add_service(self, s_type, region_list=None):
if region_list is None:
region_list = []
catalog = self.tok['access']['serviceCatalog']
service_type = {"type": s_type, "name": "glance"}
catalog.append(service_type)
service = catalog[-1]
endpoint_list = []
if not region_list:
endpoint_list.append(self.base_endpoint)
else:
for region in region_list:
endpoint = self.base_endpoint
endpoint['region'] = region
endpoint_list.append(endpoint)
service['endpoints'] = endpoint_list
@property
def token(self):
return self.tok
@property
def base_endpoint(self):
return {
"adminURL": "http://localhost:9292",
"internalURL": "http://localhost:9292",
"publicURL": "http://localhost:9292"
}
@property
def base_token(self):
return {
"access": {
"token": {
"expires": "2010-11-23T16:40:53.321584",
"id": "5c7f8799-2e54-43e4-851b-31f81871b6c",
"tenant": {"id": "1", "name": "tenant-ok"}
},
"serviceCatalog": [
],
"user": {
"id": "2",
"roles": [{
"tenantId": "1",
"id": "1",
"name": "Admin"
}],
"name": "joeadmin"
}
}
}
class TestKeystoneAuthPlugin(utils.BaseTestCase):
"""Test that the Keystone auth plugin works properly"""
def setUp(self):
super(TestKeystoneAuthPlugin, self).setUp()
def test_get_plugin_from_strategy_keystone(self):
strategy = auth.get_plugin_from_strategy('keystone')
self.assertIsInstance(strategy, auth.KeystoneStrategy)
self.assertTrue(strategy.configure_via_auth)
def test_get_plugin_from_strategy_keystone_configure_via_auth_false(self):
strategy = auth.get_plugin_from_strategy('keystone',
configure_via_auth=False)
self.assertIsInstance(strategy, auth.KeystoneStrategy)
self.assertFalse(strategy.configure_via_auth)
def test_required_creds(self):
"""
Test that plugin created without required
credential pieces raises an exception
"""
bad_creds = [
{}, # missing everything
{
'username': 'user1',
'strategy': 'keystone',
'password': 'pass'
}, # missing auth_url
{
'password': 'pass',
'strategy': 'keystone',
'auth_url': 'http://localhost/v1'
}, # missing username
{
'username': 'user1',
'strategy': 'keystone',
'auth_url': 'http://localhost/v1'
}, # missing password
{
'username': 'user1',
'password': 'pass',
'auth_url': 'http://localhost/v1'
}, # missing strategy
{
'username': 'user1',
'password': 'pass',
'strategy': 'keystone',
'auth_url': 'http://localhost/v2.0/'
}, # v2.0: missing tenant
{
'username': None,
'password': 'pass',
'auth_url': 'http://localhost/v2.0/'
}, # None parameter
{
'username': 'user1',
'password': 'pass',
'auth_url': 'http://localhost/v2.0/',
'tenant': None
} # None tenant
]
for creds in bad_creds:
try:
plugin = auth.KeystoneStrategy(creds)
plugin.authenticate()
self.fail("Failed to raise correct exception when supplying "
"bad credentials: %r" % creds)
except exception.MissingCredentialError:
continue # Expected
def test_invalid_auth_url_v1(self):
"""
Test that a 400 during authenticate raises exception.AuthBadRequest
"""
def fake_do_request(*args, **kwargs):
resp = webob.Response()
resp.status = http.BAD_REQUEST
return FakeResponse(resp), ""
self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request)
bad_creds = {
'username': 'user1',
'auth_url': 'http://localhost/badauthurl/',
'password': 'pass',
'strategy': 'keystone',
'region': 'RegionOne'
}
plugin = auth.KeystoneStrategy(bad_creds)
self.assertRaises(exception.AuthBadRequest, plugin.authenticate)
def test_invalid_auth_url_v2(self):
"""
Test that a 400 during authenticate raises exception.AuthBadRequest
"""
def fake_do_request(*args, **kwargs):
resp = webob.Response()
resp.status = http.BAD_REQUEST
return FakeResponse(resp), ""
self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request)
bad_creds = {
'username': 'user1',
'auth_url': 'http://localhost/badauthurl/v2.0/',
'password': 'pass',
'tenant': 'tenant1',
'strategy': 'keystone',
'region': 'RegionOne'
}
plugin = auth.KeystoneStrategy(bad_creds)
self.assertRaises(exception.AuthBadRequest, plugin.authenticate)
def test_v1_auth(self):
"""Test v1 auth code paths"""
def fake_do_request(cls, url, method, headers=None, body=None):
if url.find("2.0") != -1:
self.fail("Invalid v1.0 token path (%s)" % url)
headers = headers or {}
resp = webob.Response()
if (headers.get('X-Auth-User') != 'user1' or
headers.get('X-Auth-Key') != 'pass'):
resp.status = http.UNAUTHORIZED
else:
resp.status = http.OK
resp.headers.update({"x-image-management-url": "example.com"})
return FakeResponse(resp), ""
self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request)
unauthorized_creds = [
{
'username': 'wronguser',
'auth_url': 'http://localhost/badauthurl/',
'strategy': 'keystone',
'region': 'RegionOne',
'password': 'pass'
}, # wrong username
{
'username': 'user1',
'auth_url': 'http://localhost/badauthurl/',
'strategy': 'keystone',
'region': 'RegionOne',
'password': 'badpass'
}, # bad password...
]
for creds in unauthorized_creds:
try:
plugin = auth.KeystoneStrategy(creds)
plugin.authenticate()
self.fail("Failed to raise NotAuthenticated when supplying "
"bad credentials: %r" % creds)
except exception.NotAuthenticated:
continue # Expected
no_strategy_creds = {
'username': 'user1',
'auth_url': 'http://localhost/redirect/',
'password': 'pass',
'region': 'RegionOne'
}
try:
plugin = auth.KeystoneStrategy(no_strategy_creds)
plugin.authenticate()
self.fail("Failed to raise MissingCredentialError when "
"supplying no strategy: %r" % no_strategy_creds)
except exception.MissingCredentialError:
pass # Expected
good_creds = [
{
'username': 'user1',
'auth_url': 'http://localhost/redirect/',
'password': 'pass',
'strategy': 'keystone',
'region': 'RegionOne'
}
]
for creds in good_creds:
plugin = auth.KeystoneStrategy(creds)
self.assertIsNone(plugin.authenticate())
self.assertEqual("example.com", plugin.management_url)
# Assert it does not update management_url via auth response
for creds in good_creds:
plugin = auth.KeystoneStrategy(creds, configure_via_auth=False)
self.assertIsNone(plugin.authenticate())
self.assertIsNone(plugin.management_url)
def test_v2_auth(self):
"""Test v2 auth code paths"""
mock_token = None
def fake_do_request(cls, url, method, headers=None, body=None):
if (not url.rstrip('/').endswith('v2.0/tokens') or
url.count("2.0") != 1):
self.fail("Invalid v2.0 token path (%s)" % url)
creds = jsonutils.loads(body)['auth']
username = creds['passwordCredentials']['username']
password = creds['passwordCredentials']['password']
tenant = creds['tenantName']
resp = webob.Response()
if (username != 'user1' or password != 'pass' or
tenant != 'tenant-ok'):
resp.status = http.UNAUTHORIZED
else:
resp.status = http.OK
body = mock_token.token
return FakeResponse(resp), jsonutils.dumps(body)
mock_token = V2Token()
mock_token.add_service('image', ['RegionOne'])
self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request)
unauthorized_creds = [
{
'username': 'wronguser',
'auth_url': 'http://localhost/v2.0',
'password': 'pass',
'tenant': 'tenant-ok',
'strategy': 'keystone',
'region': 'RegionOne'
}, # wrong username
{
'username': 'user1',
'auth_url': 'http://localhost/v2.0',
'password': 'badpass',
'tenant': 'tenant-ok',
'strategy': 'keystone',
'region': 'RegionOne'
}, # bad password...
{
'username': 'user1',
'auth_url': 'http://localhost/v2.0',
'password': 'pass',
'tenant': 'carterhayes',
'strategy': 'keystone',
'region': 'RegionOne'
}, # bad tenant...
]
for creds in unauthorized_creds:
try:
plugin = auth.KeystoneStrategy(creds)
plugin.authenticate()
self.fail("Failed to raise NotAuthenticated when supplying "
"bad credentials: %r" % creds)
except exception.NotAuthenticated:
continue # Expected
no_region_creds = {
'username': 'user1',
'tenant': 'tenant-ok',
'auth_url': 'http://localhost/redirect/v2.0/',
'password': 'pass',
'strategy': 'keystone'
}
plugin = auth.KeystoneStrategy(no_region_creds)
self.assertIsNone(plugin.authenticate())
self.assertEqual('http://localhost:9292', plugin.management_url)
# Add another image service, with a different region
mock_token.add_service('image', ['RegionTwo'])
try:
plugin = auth.KeystoneStrategy(no_region_creds)
plugin.authenticate()
self.fail("Failed to raise RegionAmbiguity when no region present "
"and multiple regions exist: %r" % no_region_creds)
except exception.RegionAmbiguity:
pass # Expected
wrong_region_creds = {
'username': 'user1',
'tenant': 'tenant-ok',
'auth_url': 'http://localhost/redirect/v2.0/',
'password': 'pass',
'strategy': 'keystone',
'region': 'NonExistentRegion'
}
try:
plugin = auth.KeystoneStrategy(wrong_region_creds)
plugin.authenticate()
self.fail("Failed to raise NoServiceEndpoint when supplying "
"wrong region: %r" % wrong_region_creds)
except exception.NoServiceEndpoint:
pass # Expected
no_strategy_creds = {
'username': 'user1',
'tenant': 'tenant-ok',
'auth_url': 'http://localhost/redirect/v2.0/',
'password': 'pass',
'region': 'RegionOne'
}
try:
plugin = auth.KeystoneStrategy(no_strategy_creds)
plugin.authenticate()
self.fail("Failed to raise MissingCredentialError when "
"supplying no strategy: %r" % no_strategy_creds)
except exception.MissingCredentialError:
pass # Expected
bad_strategy_creds = {
'username': 'user1',
'tenant': 'tenant-ok',
'auth_url': 'http://localhost/redirect/v2.0/',
'password': 'pass',
'region': 'RegionOne',
'strategy': 'keypebble'
}
try:
plugin = auth.KeystoneStrategy(bad_strategy_creds)
plugin.authenticate()
self.fail("Failed to raise BadAuthStrategy when supplying "
"bad auth strategy: %r" % bad_strategy_creds)
except exception.BadAuthStrategy:
pass # Expected
mock_token = V2Token()
mock_token.add_service('image', ['RegionOne', 'RegionTwo'])
good_creds = [
{
'username': 'user1',
'auth_url': 'http://localhost/v2.0/',
'password': 'pass',
'tenant': 'tenant-ok',
'strategy': 'keystone',
'region': 'RegionOne'
}, # auth_url with trailing '/'
{
'username': 'user1',
'auth_url': 'http://localhost/v2.0',
'password': 'pass',
'tenant': 'tenant-ok',
'strategy': 'keystone',
'region': 'RegionOne'
}, # auth_url without trailing '/'
{
'username': 'user1',
'auth_url': 'http://localhost/v2.0',
'password': 'pass',
'tenant': 'tenant-ok',
'strategy': 'keystone',
'region': 'RegionTwo'
} # Second region
]
for creds in good_creds:
plugin = auth.KeystoneStrategy(creds)
self.assertIsNone(plugin.authenticate())
self.assertEqual('http://localhost:9292', plugin.management_url)
ambiguous_region_creds = {
'username': 'user1',
'auth_url': 'http://localhost/v2.0/',
'password': 'pass',
'tenant': 'tenant-ok',
'strategy': 'keystone',
'region': 'RegionOne'
}
mock_token = V2Token()
# Add two identical services
mock_token.add_service('image', ['RegionOne'])
mock_token.add_service('image', ['RegionOne'])
try:
plugin = auth.KeystoneStrategy(ambiguous_region_creds)
plugin.authenticate()
self.fail("Failed to raise RegionAmbiguity when "
"non-unique regions exist: %r" % ambiguous_region_creds)
except exception.RegionAmbiguity:
pass
mock_token = V2Token()
mock_token.add_service('bad-image', ['RegionOne'])
good_creds = {
'username': 'user1',
'auth_url': 'http://localhost/v2.0/',
'password': 'pass',
'tenant': 'tenant-ok',
'strategy': 'keystone',
'region': 'RegionOne'
}
try:
plugin = auth.KeystoneStrategy(good_creds)
plugin.authenticate()
self.fail("Failed to raise NoServiceEndpoint when bad service "
"type encountered")
except exception.NoServiceEndpoint:
pass
mock_token = V2Token()
mock_token.add_service_no_type()
try:
plugin = auth.KeystoneStrategy(good_creds)
plugin.authenticate()
self.fail("Failed to raise NoServiceEndpoint when bad service "
"type encountered")
except exception.NoServiceEndpoint:
pass
try:
plugin = auth.KeystoneStrategy(good_creds,
configure_via_auth=False)
plugin.authenticate()
except exception.NoServiceEndpoint:
self.fail("NoServiceEndpoint was raised when authenticate "
"should not check for endpoint.")
class TestEndpoints(utils.BaseTestCase):
def setUp(self):
super(TestEndpoints, self).setUp()
self.service_catalog = [
{
'endpoint_links': [],
'endpoints': [
{
'adminURL': 'http://localhost:8080/',
'region': 'RegionOne',
'internalURL': 'http://internalURL/',
'publicURL': 'http://publicURL/',
},
],
'type': 'object-store',
'name': 'Object Storage Service',
}
]
def test_get_endpoint_with_custom_server_type(self):
endpoint = auth.get_endpoint(self.service_catalog,
service_type='object-store')
self.assertEqual('http://publicURL/', endpoint)
def test_get_endpoint_with_custom_endpoint_type(self):
endpoint = auth.get_endpoint(self.service_catalog,
service_type='object-store',
endpoint_type='internalURL')
self.assertEqual('http://internalURL/', endpoint)
def test_get_endpoint_raises_with_invalid_service_type(self):
self.assertRaises(exception.NoServiceEndpoint,
auth.get_endpoint,
self.service_catalog,
service_type='foo')
def test_get_endpoint_raises_with_invalid_endpoint_type(self):
self.assertRaises(exception.NoServiceEndpoint,
auth.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_type='foo')
def test_get_endpoint_raises_with_invalid_endpoint_region(self):
self.assertRaises(exception.NoServiceEndpoint,
auth.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_region='foo',
endpoint_type='internalURL')
class TestImageMutability(utils.BaseTestCase):
def setUp(self):
super(TestImageMutability, self).setUp()
self.image_factory = glance.domain.ImageFactory()
def _is_mutable(self, tenant, owner, is_admin=False):
context = glance.context.RequestContext(tenant=tenant,
is_admin=is_admin)
image = self.image_factory.new_image(owner=owner)
return authorization.is_image_mutable(context, image)
def test_admin_everything_mutable(self):
self.assertTrue(self._is_mutable(None, None, is_admin=True))
self.assertTrue(self._is_mutable(None, TENANT1, is_admin=True))
self.assertTrue(self._is_mutable(TENANT1, None, is_admin=True))
self.assertTrue(self._is_mutable(TENANT1, TENANT1, is_admin=True))
self.assertTrue(self._is_mutable(TENANT1, TENANT2, is_admin=True))
def test_no_tenant_nothing_mutable(self):
self.assertFalse(self._is_mutable(None, None))
self.assertFalse(self._is_mutable(None, TENANT1))
def test_regular_user(self):
self.assertFalse(self._is_mutable(TENANT1, None))
self.assertFalse(self._is_mutable(TENANT1, TENANT2))
self.assertTrue(self._is_mutable(TENANT1, TENANT1))
class TestImmutableImage(utils.BaseTestCase):
def setUp(self):
super(TestImmutableImage, self).setUp()
image_factory = glance.domain.ImageFactory()
self.context = glance.context.RequestContext(tenant=TENANT1)
image = image_factory.new_image(
image_id=UUID1,
name='Marvin',
owner=TENANT1,
disk_format='raw',
container_format='bare',
extra_properties={'foo': 'bar'},
tags=['ping', 'pong'],
)
self.image = authorization.ImmutableImageProxy(image, self.context)
def _test_change(self, attr, value):
self.assertRaises(exception.Forbidden,
setattr, self.image, attr, value)
self.assertRaises(exception.Forbidden,
delattr, self.image, attr)
def test_change_id(self):
self._test_change('image_id', UUID2)
def test_change_name(self):
self._test_change('name', 'Freddie')
def test_change_owner(self):
self._test_change('owner', TENANT2)
def test_change_min_disk(self):
self._test_change('min_disk', 100)
def test_change_min_ram(self):
self._test_change('min_ram', 1024)
def test_change_disk_format(self):
self._test_change('disk_format', 'vhd')
def test_change_container_format(self):
self._test_change('container_format', 'ova')
def test_change_visibility(self):
self._test_change('visibility', 'public')
def test_change_status(self):
self._test_change('status', 'active')
def test_change_created_at(self):
self._test_change('created_at', timeutils.utcnow())
def test_change_updated_at(self):
self._test_change('updated_at', timeutils.utcnow())
def test_change_locations(self):
self._test_change('locations', ['http://a/b/c'])
self.assertRaises(exception.Forbidden,
self.image.locations.append, 'http://a/b/c')
self.assertRaises(exception.Forbidden,
self.image.locations.extend, ['http://a/b/c'])
self.assertRaises(exception.Forbidden,
self.image.locations.insert, 'foo')
self.assertRaises(exception.Forbidden,
self.image.locations.pop)
self.assertRaises(exception.Forbidden,
self.image.locations.remove, 'foo')
self.assertRaises(exception.Forbidden,
self.image.locations.reverse)
self.assertRaises(exception.Forbidden,
self.image.locations.sort)
self.assertRaises(exception.Forbidden,
self.image.locations.__delitem__, 0)
self.assertRaises(exception.Forbidden,
self.image.locations.__delslice__, 0, 2)
self.assertRaises(exception.Forbidden,
self.image.locations.__setitem__, 0, 'foo')
self.assertRaises(exception.Forbidden,
self.image.locations.__setslice__,
0, 2, ['foo', 'bar'])
self.assertRaises(exception.Forbidden,
self.image.locations.__iadd__, 'foo')
self.assertRaises(exception.Forbidden,
self.image.locations.__imul__, 2)
def test_change_size(self):
self._test_change('size', 32)
def test_change_tags(self):
self.assertRaises(exception.Forbidden,
delattr, self.image, 'tags')
self.assertRaises(exception.Forbidden,
setattr, self.image, 'tags', ['king', 'kong'])
self.assertRaises(exception.Forbidden, self.image.tags.pop)
self.assertRaises(exception.Forbidden, self.image.tags.clear)
self.assertRaises(exception.Forbidden, self.image.tags.add, 'king')
self.assertRaises(exception.Forbidden, self.image.tags.remove, 'ping')
self.assertRaises(exception.Forbidden,
self.image.tags.update, set(['king', 'kong']))
self.assertRaises(exception.Forbidden,
self.image.tags.intersection_update, set([]))
self.assertRaises(exception.Forbidden,
self.image.tags.difference_update, set([]))
self.assertRaises(exception.Forbidden,
self.image.tags.symmetric_difference_update,
set([]))
def test_change_properties(self):
self.assertRaises(exception.Forbidden,
delattr, self.image, 'extra_properties')
self.assertRaises(exception.Forbidden,
setattr, self.image, 'extra_properties', {})
self.assertRaises(exception.Forbidden,
self.image.extra_properties.__delitem__, 'foo')
self.assertRaises(exception.Forbidden,
self.image.extra_properties.__setitem__, 'foo', 'b')
self.assertRaises(exception.Forbidden,
self.image.extra_properties.__setitem__, 'z', 'j')
self.assertRaises(exception.Forbidden,
self.image.extra_properties.pop)
self.assertRaises(exception.Forbidden,
self.image.extra_properties.popitem)
self.assertRaises(exception.Forbidden,
self.image.extra_properties.setdefault, 'p', 'j')
self.assertRaises(exception.Forbidden,
self.image.extra_properties.update, {})
def test_delete(self):
self.assertRaises(exception.Forbidden, self.image.delete)
def test_set_data(self):
self.assertRaises(exception.Forbidden,
self.image.set_data, 'blah', 4)
def test_deactivate_image(self):
self.assertRaises(exception.Forbidden, self.image.deactivate)
def test_reactivate_image(self):
self.assertRaises(exception.Forbidden, self.image.reactivate)
def test_get_data(self):
class FakeImage(object):
def get_data(self):
return 'tiddlywinks'
image = glance.api.authorization.ImmutableImageProxy(
FakeImage(), self.context)
self.assertEqual('tiddlywinks', image.get_data())
class TestImageFactoryProxy(utils.BaseTestCase):
def setUp(self):
super(TestImageFactoryProxy, self).setUp()
factory = glance.domain.ImageFactory()
self.context = glance.context.RequestContext(tenant=TENANT1)
self.image_factory = authorization.ImageFactoryProxy(factory,
self.context)
def test_default_owner_is_set(self):
image = self.image_factory.new_image()
self.assertEqual(TENANT1, image.owner)
def test_wrong_owner_cannot_be_set(self):
self.assertRaises(exception.Forbidden,
self.image_factory.new_image, owner=TENANT2)
def test_cannot_set_owner_to_none(self):
self.assertRaises(exception.Forbidden,
self.image_factory.new_image, owner=None)
def test_admin_can_set_any_owner(self):
self.context.is_admin = True
image = self.image_factory.new_image(owner=TENANT2)
self.assertEqual(TENANT2, image.owner)
def test_admin_can_set_owner_to_none(self):
self.context.is_admin = True
image = self.image_factory.new_image(owner=None)
self.assertIsNone(image.owner)
def test_admin_still_gets_default_tenant(self):
self.context.is_admin = True
image = self.image_factory.new_image()
self.assertEqual(TENANT1, image.owner)
class TestImageRepoProxy(utils.BaseTestCase):
class ImageRepoStub(object):
def __init__(self, fixtures):
self.fixtures = fixtures
def get(self, image_id):
for f in self.fixtures:
if f.image_id == image_id:
return f
else:
raise ValueError(image_id)
def list(self, *args, **kwargs):
return self.fixtures
def setUp(self):
super(TestImageRepoProxy, self).setUp()
image_factory = glance.domain.ImageFactory()
self.fixtures = [
image_factory.new_image(owner=TENANT1),
image_factory.new_image(owner=TENANT2, visibility='public'),
image_factory.new_image(owner=TENANT2),
]
self.context = glance.context.RequestContext(tenant=TENANT1)
image_repo = self.ImageRepoStub(self.fixtures)
self.image_repo = authorization.ImageRepoProxy(image_repo,
self.context)
def test_get_mutable_image(self):
image = self.image_repo.get(self.fixtures[0].image_id)
self.assertEqual(image.image_id, self.fixtures[0].image_id)
def test_get_immutable_image(self):
image = self.image_repo.get(self.fixtures[1].image_id)
self.assertRaises(exception.Forbidden,
setattr, image, 'name', 'Vince')
def test_list(self):
images = self.image_repo.list()
self.assertEqual(images[0].image_id, self.fixtures[0].image_id)
self.assertRaises(exception.Forbidden,
setattr, images[1], 'name', 'Wally')
self.assertRaises(exception.Forbidden,
setattr, images[2], 'name', 'Calvin')
class TestImmutableTask(utils.BaseTestCase):
def setUp(self):
super(TestImmutableTask, self).setUp()
task_factory = glance.domain.TaskFactory()
self.context = glance.context.RequestContext(tenant=TENANT2)
task_type = 'import'
image_id = 'fake_image_id'
user_id = 'fake_user'
request_id = 'fake_request_id'
owner = TENANT2
task = task_factory.new_task(task_type, owner, image_id,
user_id, request_id)
self.task = authorization.ImmutableTaskProxy(task)
def _test_change(self, attr, value):
self.assertRaises(
exception.Forbidden,
setattr,
self.task,
attr,
value
)
self.assertRaises(
exception.Forbidden,
delattr,
self.task,
attr
)
def test_change_id(self):
self._test_change('task_id', UUID2)
def test_change_type(self):
self._test_change('type', 'fake')
def test_change_status(self):
self._test_change('status', 'success')
def test_change_owner(self):
self._test_change('owner', 'fake')
def test_change_expires_at(self):
self._test_change('expires_at', 'fake')
def test_change_created_at(self):
self._test_change('created_at', 'fake')
def test_change_updated_at(self):
self._test_change('updated_at', 'fake')
def test_begin_processing(self):
self.assertRaises(
exception.Forbidden,
self.task.begin_processing
)
def test_succeed(self):
self.assertRaises(
exception.Forbidden,
self.task.succeed,
'result'
)
def test_fail(self):
self.assertRaises(
exception.Forbidden,
self.task.fail,
'message'
)
class TestImmutableTaskStub(utils.BaseTestCase):
def setUp(self):
super(TestImmutableTaskStub, self).setUp()
task_factory = glance.domain.TaskFactory()
self.context = glance.context.RequestContext(tenant=TENANT2)
task_type = 'import'
image_id = 'fake_image_id'
user_id = 'fake_user'
request_id = 'fake_request_id'
owner = TENANT2
task = task_factory.new_task(task_type, owner, image_id,
user_id, request_id)
self.task = authorization.ImmutableTaskStubProxy(task)
def _test_change(self, attr, value):
self.assertRaises(
exception.Forbidden,
setattr,
self.task,
attr,
value
)
self.assertRaises(
exception.Forbidden,
delattr,
self.task,
attr
)
def test_change_id(self):
self._test_change('task_id', UUID2)
def test_change_type(self):
self._test_change('type', 'fake')
def test_change_status(self):
self._test_change('status', 'success')
def test_change_owner(self):
self._test_change('owner', 'fake')
def test_change_expires_at(self):
self._test_change('expires_at', 'fake')
def test_change_created_at(self):
self._test_change('created_at', 'fake')
def test_change_updated_at(self):
self._test_change('updated_at', 'fake')
class TestTaskFactoryProxy(utils.BaseTestCase):
def setUp(self):
super(TestTaskFactoryProxy, self).setUp()
factory = glance.domain.TaskFactory()
self.context = glance.context.RequestContext(tenant=TENANT1)
self.context_owner_is_none = glance.context.RequestContext()
self.task_factory = authorization.TaskFactoryProxy(
factory,
self.context
)
self.task_type = 'import'
self.task_input = '{"loc": "fake"}'
self.owner = 'foo'
self.image_id = 'fake_image_id'
self.user_id = 'fake_user'
self.request_id = 'fake_request_id'
self.request1 = unittest_utils.get_fake_request(tenant=TENANT1)
self.request2 = unittest_utils.get_fake_request(tenant=TENANT2)
def test_task_create_default_owner(self):
owner = self.request1.context.owner
task = self.task_factory.new_task(task_type=self.task_type,
owner=owner, image_id=self.image_id,
user_id=self.user_id,
request_id=self.request_id)
self.assertEqual(TENANT1, task.owner)
def test_task_create_wrong_owner(self):
self.assertRaises(exception.Forbidden,
self.task_factory.new_task,
task_type=self.task_type,
task_input=self.task_input,
owner=self.owner, image_id=self.image_id,
user_id=self.user_id,
request_id=self.request_id)
def test_task_create_owner_as_None(self):
self.assertRaises(exception.Forbidden,
self.task_factory.new_task,
task_type=self.task_type,
task_input=self.task_input,
owner=None, image_id=self.image_id,
user_id=self.user_id,
request_id=self.request_id)
def test_task_create_admin_context_owner_as_None(self):
self.context.is_admin = True
self.assertRaises(exception.Forbidden,
self.task_factory.new_task,
task_type=self.task_type,
task_input=self.task_input,
owner=None, image_id=self.image_id,
user_id=self.user_id,
request_id=self.request_id)
class TestTaskRepoProxy(utils.BaseTestCase):
class TaskRepoStub(object):
def __init__(self, fixtures):
self.fixtures = fixtures
def get(self, task_id):
for f in self.fixtures:
if f.task_id == task_id:
return f
else:
raise ValueError(task_id)
class TaskStubRepoStub(object):
def __init__(self, fixtures):
self.fixtures = fixtures
def list(self, *args, **kwargs):
return self.fixtures
def setUp(self):
super(TestTaskRepoProxy, self).setUp()
task_factory = glance.domain.TaskFactory()
task_type = 'import'
image_id = 'fake_image_id'
user_id = 'fake_user'
request_id = 'fake_request_id'
owner = None
self.fixtures = [
task_factory.new_task(task_type, owner, image_id,
user_id, request_id),
task_factory.new_task(task_type, owner, image_id,
user_id, request_id),
task_factory.new_task(task_type, owner, image_id,
user_id, request_id),
]
self.context = glance.context.RequestContext(tenant=TENANT1)
task_repo = self.TaskRepoStub(self.fixtures)
task_stub_repo = self.TaskStubRepoStub(self.fixtures)
self.task_repo = authorization.TaskRepoProxy(
task_repo,
self.context
)
self.task_stub_repo = authorization.TaskStubRepoProxy(
task_stub_repo,
self.context
)
def test_get_mutable_task(self):
task = self.task_repo.get(self.fixtures[0].task_id)
self.assertEqual(task.task_id, self.fixtures[0].task_id)
def test_get_immutable_task(self):
task_id = self.fixtures[1].task_id
task = self.task_repo.get(task_id)
self.assertRaises(exception.Forbidden,
setattr, task, 'input', 'foo')
def test_list(self):
tasks = self.task_stub_repo.list()
self.assertEqual(tasks[0].task_id, self.fixtures[0].task_id)
self.assertRaises(exception.Forbidden,
setattr,
tasks[1],
'owner',
'foo')
self.assertRaises(exception.Forbidden,
setattr,
tasks[2],
'owner',
'foo')
|
the-stack_106_20650
|
import setuptools
from setuptools_behave import behave_test
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="colorgrade",
version="0.0.1",
author="Alexey Kuznetsov",
author_email="[email protected]",
description="Conditional formatting for terminal",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mrsmith/colorgrade",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
entry_points={"console_scripts": ["colorgrade=colorgrade:main", "colorgrade_test=colorgrade:test_color_scaler",],},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=["colorful>=0.5.4", "colour>=0.1.5",],
tests_require=["behave>=1.2.6", "flake8>=3.7.8", "pyhamcrest>=2.0.2", "pytest>=5.2.0",],
setup_requires=["pytest-runner",],
cmdclass={"behave_test": behave_test,},
)
|
the-stack_106_20651
|
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Name: AddFields.py
# Description: Creates custom fields in mosaic datasets.
# Version: 20201230
# Requirements: ArcGIS 10.1 SP1
# Author: Esri Imagery Workflows team
#------------------------------------------------------------------------------
#!/usr/bin/env python
import arcpy
import os
import sys
from xml.dom import minidom
import Base
class AddFields(Base.Base):
def __init__(self, base):
self.fieldNameList = []
self.fieldTypeList = []
self.fieldLengthList = []
self.setLog(base.m_log)
self.m_base = base
def CreateFields(self):
self.log("Adding custom fields:", self.const_general_text)
self.log("Using mosaic dataset:" + self.m_base.m_mdName, self.const_general_text)
try:
mdPath = os.path.join(self.m_base.m_geoPath, self.m_base.m_mdName)
if not arcpy.Exists(mdPath):
self.log("Mosaic dataset is not found.", self.const_warning_text)
return False
self.log("\tCreating fields:", self.const_general_text)
for j in range(len(self.fieldNameList)):
self.log("\t\t" + self.fieldNameList[j], self.const_general_text)
fieldExist = arcpy.ListFields(mdPath, self.fieldNameList[j])
if len(fieldExist) == 0:
arcpy.AddField_management(mdPath, self.fieldNameList[j], self.fieldTypeList[j], "", "", self.fieldLengthList[j])
except:
self.log("Error: " + arcpy.GetMessages(), self.const_critical_text)
return False
return True
def init(self, config):
Nodelist = self.m_base.m_doc.getElementsByTagName("MosaicDataset")
if (Nodelist.length == 0):
self.log("\nError: MosaicDataset node not found! Invalid schema.", self.const_critical_text)
return False
try:
for node in Nodelist[0].childNodes:
node = node.nextSibling
if (node is not None and node.nodeType == minidom.Node.ELEMENT_NODE):
if (node.nodeName == 'Name'):
try:
if (self.m_base.m_mdName == ''):
self.m_base.m_mdName = node.firstChild.nodeValue
break
except:
Error = True
except:
self.log("\nError: reading MosaicDataset nodes.", self.const_critical_text)
return False
Nodelist = self.m_base.m_doc.getElementsByTagName("Fields")
if (Nodelist.length == 0):
self.log("Error: Fields node not found! Invalid schema.", self.const_critical_text)
return False
try:
for node in Nodelist[0].childNodes:
if (node.nodeType == minidom.Node.ELEMENT_NODE):
for n in node.childNodes:
if(n.nodeType == minidom.Node.ELEMENT_NODE):
nodeName = n.nodeName.upper()
if (nodeName == 'NAME'):
self.fieldNameList.append(n.firstChild.nodeValue)
elif(nodeName == 'TYPE'):
self.fieldTypeList.append(n.firstChild.nodeValue)
elif(nodeName == 'LENGTH'):
try:
self.fieldLengthList.append(n.firstChild.nodeValue)
except:
self.fieldLengthList.append('')
except:
self.log("\nError: Reading fields information!", self.const_critical_text)
return False
fields_len = len(self.fieldNameList)
if (len(self.fieldTypeList) != fields_len or len(self.fieldLengthList) != fields_len):
self.log("\nError: Number of Field(Name, Type, Len) do not match!", self.const_critical_text)
return False
return True
|
the-stack_106_20654
|
import asyncio
from unittest import mock
from lib.jsonrpc import RPCError
from server.env import Env
from server.controller import Controller
loop = asyncio.get_event_loop()
def set_env():
env = mock.create_autospec(Env)
env.coin = mock.Mock()
env.loop_policy = None
env.max_sessions = 0
env.max_subs = 0
env.max_send = 0
env.bandwidth_limit = 0
env.identities = ''
env.tor_proxy_host = env.tor_proxy_port = None
env.peer_discovery = env.PD_SELF = False
env.daemon_url = 'http://localhost:9891/'
return env
async def coro(res):
return res
def raise_exception(exc, msg):
raise exc(msg)
def ensure_text_exception(test, exception):
res = err = None
try:
res = loop.run_until_complete(test)
except Exception as e:
err = e
assert isinstance(err, exception), (res, err)
def test_transaction_get():
async def test_verbose_ignore_by_backend():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
sut.daemon_request.return_value = coro('11'*32)
res = await sut.transaction_get('ff'*32, True)
assert res == '11'*32
async def test_verbose_ok():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
response = {
"hex": "00"*32,
"blockhash": "ff"*32
}
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32, True)
assert res == response
response = {
"hex": "00"*32,
"blockhash": None
}
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32, True)
assert res == response
async def test_no_verbose():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
response = 'cafebabe'*64
sut.daemon_request.return_value = coro(response)
res = await sut.transaction_get('ff'*32)
assert res == response
async def test_verbose_failure():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
sut.daemon_request.return_value = coro(raise_exception(RPCError, 'some unhandled error'))
await sut.transaction_get('ff' * 32, True)
async def test_wrong_txhash():
env = set_env()
sut = Controller(env)
sut.daemon_request = mock.Mock()
await sut.transaction_get('cafe')
sut.daemon_request.assert_not_called()
loop.run_until_complete(asyncio.gather(
*[
test_verbose_ignore_by_backend(),
test_verbose_ok(),
test_no_verbose()
]
))
for error_test in [test_verbose_failure, test_wrong_txhash]:
ensure_text_exception(error_test(), RPCError)
|
the-stack_106_20655
|
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from .base import validatorBase
from .validators import *
#
# author element.
#
class category(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'term'),(None,u'scheme'),(None,u'label')]
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
self.validate_required_attribute((None,'term'), nonblank)
self.validate_optional_attribute((None,'scheme'), rfc3987_full)
self.validate_optional_attribute((None,'label'), nonhtml)
|
the-stack_106_20657
|
import os
import torch
import numpy as np
from model import CNN, init_weights
from my_dataset import initialize_loader
from train import Trainer
import cv2
import util
import torchvision
from PIL import Image
from model import find_batch_bounding_boxes, Label
def train_model(load_model=None, num_features=16):
experiment = {
'seed': 1234,
'model_kernel': 3,
'model_num_features': 10,
'model_dropout_rate': 0.01,
'train_class_weight': [.18, .02, .80], # BALL, ROBOT, OTHER
'train_learn_rate': 1e-2, # 1e-3,
'train_weight_decay': 1e-9,
'train_batch_size': 8, # 32, # 80, # 20,
'train_epochs': 7,
'colour_jitter': [0.0, 0.0, 0.0, 0.0], # brightness, contrast, saturation, hue
'output_folder': 'nam-training3',
}
# Save directory
output_folder = experiment['output_folder']
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# test multiple times for statistical significance
valid_final_losses = []
test_final_losses = []
test_precision = []
test_recall = []
for i in range(1):
if load_model is not None:
print(f'Loading previously trained model: {load_model}')
model = CNN(kernel=3, num_features=num_features)
model.load_state_dict(torch.load(load_model))
else:
model = CNN(
kernel=experiment['model_kernel'],
num_features=experiment['model_num_features'],
dropout=experiment['model_dropout_rate'])
model.apply(init_weights)
trainer = Trainer(model,
learn_rate=experiment['train_learn_rate'],
weight_decay=experiment['train_weight_decay'],
batch_size=experiment['train_batch_size'],
epochs=experiment['train_epochs'],
colour_jitter=experiment['colour_jitter'],
output_folder=experiment['output_folder'],
seed=experiment['seed'],
class_weights=experiment['train_class_weight'])
trainer.train()
valid_final_losses.append(trainer.valid_losses[-2])
test_final_losses.append(trainer.valid_losses[-1])
test_precision.append(trainer.precision)
test_recall.append(trainer.recall)
print(valid_final_losses)
print(test_final_losses)
print('valid mean loss:', np.mean(valid_final_losses), ', std:', np.std(valid_final_losses))
print('test mean loss: ', np.mean(test_final_losses), ', std:', np.std(test_final_losses))
print(test_precision)
print(test_recall)
print('test precision:', np.mean(test_precision), ', std:', np.std(test_precision))
print('test recall: ', np.mean(test_recall), ', std:', np.std(test_recall))
print(experiment)
torch.save(model.state_dict(), 'outputs/model')
def display_dataset(model_path, num_feat):
if model_path is not None:
model = CNN(kernel=3, num_features=num_feat)
model.load_state_dict(torch.load(model_path))
model.eval()
else:
model = None
[trainl, _, _], [traind, testd] = initialize_loader(6, num_workers=1, shuffle=False)
testd.visualize_images(delay=1200, model=model, start=0, scale=2)
def test_model(model_num = 11):
model = CNN(kernel=3, num_features=8, dropout=0.2)
model.cuda()
model.load_state_dict(torch.load('06-25-2021-small-model-3/model' + str(model_num)))
trainer = Trainer(model, learn_rate=0.01, weight_decay=0, batch_size=16, epochs=0, colour_jitter=[0,0,0,0], output_folder='outputs', seed=1, class_weights=[0,0,0])
trainer.test_model('test', 0)
def webcam():
model = CNN()
model.load_state_dict(torch.load('outputs/model'))
transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.Resize(150, interpolation=Image.BILINEAR),
torchvision.transforms.CenterCrop((150, 200)),
torchvision.transforms.ToTensor()
])
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
if ret:
img = transform(frame)
img_batch = img.unsqueeze(0)
outputs, _ = model(img_batch)
bbxs = find_batch_bounding_boxes(outputs)[0]
img = util.draw_bounding_boxes(img, bbxs[Label.ROBOT.value], (0, 0, 255))
img = util.draw_bounding_boxes(img, bbxs[Label.BALL.value], (255, 0, 0))
util.stream_image(img, wait=25, scale=4)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
# test_model()
# display_dataset()
# train_model(load_model='outputs/model', num_features=16)
#train_model()
display_dataset('outputs/model_j1_feat17', num_feat=17)
|
the-stack_106_20658
|
import numpy as np
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import time
from neural_clf.controllers.clf_qp_net import CLF_QP_Net
from neural_clf.controllers.constrained_lqr import PVTOLSimpleMPC
from models.pvtol import (
control_affine_dynamics,
u_nominal,
n_controls,
n_dims,
low_m,
high_m,
low_I,
high_I,
r,
)
torch.set_default_dtype(torch.float64)
# Beautify plots
sns.set_theme(context="talk", style="white")
#################################################
#
# In this file, we'll simulate the PVTOL system
# with a number of different controllers and
# compare the performance of the controllers
#
#################################################
# First simulate the robust CLF QP
# Load the robust model from file
filename = "logs/pvtol_robust_clf_qp_multiple_scenario.pth.tar"
checkpoint = torch.load(filename)
nominal_scenario = {"m": low_m, "inertia": low_I}
scenarios = [
{"m": low_m, "inertia": low_I},
{"m": low_m, "inertia": high_I},
{"m": high_m, "inertia": low_I},
{"m": high_m, "inertia": high_I},
]
robust_clf_net = CLF_QP_Net(n_dims,
checkpoint['n_hidden'],
n_controls,
checkpoint['clf_lambda'],
checkpoint['relaxation_penalty'],
control_affine_dynamics,
u_nominal,
scenarios,
nominal_scenario)
robust_clf_net.load_state_dict(checkpoint['clf_net'])
# # Also load the non-robust model from file
# filename = "logs/pvtol_robust_clf_qp_single_scenario.pth.tar"
# checkpoint = torch.load(filename)
# nominal_scenario = {"m": low_m, "inertia": low_I}
# scenarios = [
# {"m": low_m, "inertia": low_I},
# ]
# nonrobust_clf_net = CLF_QP_Net(n_dims,
# checkpoint['n_hidden'],
# n_controls,
# checkpoint['clf_lambda'],
# checkpoint['relaxation_penalty'],
# control_affine_dynamics,
# u_nominal,
# scenarios,
# nominal_scenario)
# nonrobust_clf_net.load_state_dict(checkpoint['clf_net'])
# nonrobust_clf_net.use_QP = False
# Also set up the constrained lqr controller
lqr = PVTOLSimpleMPC(low_m, r, low_I, 0.001)
# Simulate some results
with torch.no_grad():
N_sim = 1
x_sim_start = torch.zeros(N_sim, n_dims)
x_sim_start[:, 1] = 0.0
x_sim_start[:, 2] = 1.0
x_sim_start[:, 4] = -2.0
x_sim_start[:, 5] = 1.0
# Get a random distribution of masses and inertias
ms = torch.Tensor(N_sim, 1).uniform_(low_m, high_m)
inertias = torch.Tensor(N_sim, 1).uniform_(low_I, high_I)
t_sim = 1
delta_t = 0.001
num_timesteps = int(t_sim // delta_t)
print("Simulating robust CLF QP controller...")
x_sim_rclfqp = torch.zeros(num_timesteps, N_sim, n_dims)
u_sim_rclfqp = torch.zeros(num_timesteps, N_sim, n_controls)
V_sim_rclfqp = torch.zeros(num_timesteps, N_sim, 1)
Vdot_sim_rclfqp = torch.zeros(num_timesteps, N_sim, 1)
x_sim_rclfqp[0, :, :] = x_sim_start
t_final_rclfqp = 0
rclfqp_runtime = 0.0
# try:
# for tstep in tqdm(range(1, num_timesteps)):
# # Get the current state
# x_current = x_sim_rclfqp[tstep - 1, :, :]
# # Get the control input at the current state
# ts = time.time()
# u, r, V, Vdot = robust_clf_net(x_current)
# tf = time.time()
# rclfqp_runtime += tf - ts
# u_sim_rclfqp[tstep, :, :] = u
# V_sim_rclfqp[tstep, :, 0] = V
# Vdot_sim_rclfqp[tstep, :, 0] = Vdot.squeeze()
# # Get the dynamics
# for i in range(N_sim):
# f_val, g_val = control_affine_dynamics(x_current[i, :].unsqueeze(0),
# m=ms[i],
# inertia=inertias[i])
# # Take one step to the future
# xdot = f_val + g_val @ u[i, :]
# x_sim_rclfqp[tstep, i, :] = x_current[i, :] + delta_t * xdot.squeeze()
# t_final_rclfqp = tstep
# except (Exception, KeyboardInterrupt):
# print("Controller failed")
#
# print(f"rCLF QP controller ran at {N_sim * num_timesteps / rclfqp_runtime} Hz")
# print("Simulating non-robust CLF QP controller...")
# x_sim_nclfqp = torch.zeros(num_timesteps, N_sim, n_dims)
# u_sim_nclfqp = torch.zeros(num_timesteps, N_sim, n_controls)
# V_sim_nclfqp = torch.zeros(num_timesteps, N_sim, 1)
# Vdot_sim_nclfqp = torch.zeros(num_timesteps, N_sim, 1)
# x_sim_nclfqp[0, :, :] = x_sim_start
# t_final_nclfqp = 0
# try:
# for tstep in tqdm(range(1, num_timesteps)):
# # Get the current state
# x_current = x_sim_nclfqp[tstep - 1, :, :]
# # Get the control input at the current state
# u, r, V, Vdot = nonrobust_clf_net(x_current)
# u_sim_nclfqp[tstep, :, :] = u
# V_sim_nclfqp[tstep, :, 0] = V
# Vdot_sim_nclfqp[tstep, :, 0] = Vdot.squeeze()
# # Get the dynamics
# for i in range(N_sim):
# f_val, g_val = control_affine_dynamics(x_current[i, :].unsqueeze(0),
# m=ms[i],
# inertia=inertias[i])
# # Take one step to the future
# xdot = f_val + g_val @ u[i, :]
# x_sim_nclfqp[tstep, i, :] = x_current[i, :] + delta_t * xdot.squeeze()
# t_final_nclfqp = tstep
# except (Exception, KeyboardInterrupt):
# print("Controller failed")
print("Simulating LQR controller...")
x_sim_lqr = torch.zeros(num_timesteps, N_sim, n_dims)
x_sim_lqr[0, :, :] = x_sim_start
u_sim_lqr = torch.zeros(num_timesteps, N_sim, n_controls)
V_sim_lqr = torch.zeros(num_timesteps, N_sim, 1)
Vdot_sim_lqr = torch.zeros(num_timesteps, N_sim, 1)
lrq_runtime = 0.0
try:
for tstep in tqdm(range(1, num_timesteps)):
# Get the current state
x_current = x_sim_lqr[tstep - 1, :, :]
# Measure the Lyapunov function value here
V, grad_V = robust_clf_net.compute_lyapunov(x_current)
V_sim_lqr[tstep, :, 0] = V
# Get the dynamics
for i in range(N_sim):
# Get the control input at the current state
ts = time.time()
u = lqr.step(x_current[i, :].numpy())
tf = time.time()
lrq_runtime += tf - ts
u_sim_lqr[tstep, :, :] = torch.tensor(u)
f_val, g_val = control_affine_dynamics(x_current[i, :].unsqueeze(0),
m=ms[i],
inertia=inertias[i])
# Take one step to the future
xdot = f_val + g_val @ u
Vdot_sim_lqr[tstep, :, 0] = (grad_V @ xdot.T).squeeze()
x_sim_lqr[tstep, i, :] = x_current[i, :] + delta_t * xdot.squeeze()
except (Exception, KeyboardInterrupt):
print("Controller failed")
print(f"Constrained LQR controller ran at {N_sim * num_timesteps / lrq_runtime} Hz")
fig, axs = plt.subplots(2, 2)
t = np.linspace(0, t_sim, num_timesteps)
ax1 = axs[0, 0]
ax1.plot([], c=sns.color_palette("pastel")[1], label="rCLF")
# ax1.plot([], c=sns.color_palette("pastel")[2], label="CLF")
ax1.plot([], c=sns.color_palette("pastel")[0], label="LQR")
ax1.plot(t[:t_final_rclfqp], x_sim_rclfqp[:t_final_rclfqp, :, 1],
c=sns.color_palette("pastel")[1])
# ax1.plot(t[:t_final_nclfqp], x_sim_nclfqp[:t_final_nclfqp, :, 1],
# c=sns.color_palette("pastel")[2])
ax1.plot(t, x_sim_lqr[:, :, 1], c=sns.color_palette("pastel")[0])
ax1.plot(t, t * 0.0 + checkpoint["safe_z"], c="g")
ax1.plot(t, t * 0.0 + checkpoint["unsafe_z"], c="r")
ax1.set_xlabel("$t$")
ax1.set_ylabel("$z$")
ax1.legend()
ax1.set_xlim([0, t_sim])
# ax1.set_ylim([-1, 1])
ax3 = axs[1, 1]
ax3.plot([], c=sns.color_palette("pastel")[0], label="LQR V")
ax3.plot([], c=sns.color_palette("pastel")[1], label="rCLF V")
# ax3.plot([], c=sns.color_palette("pastel")[2], label="CLF V")
ax3.plot(t[1:], V_sim_lqr[1:, :, 0],
c=sns.color_palette("pastel")[0])
ax3.plot(t[1:t_final_rclfqp], V_sim_rclfqp[1:t_final_rclfqp, :, 0],
c=sns.color_palette("pastel")[1])
# ax3.plot(t[1:t_final_nclfqp], V_sim_nclfqp[1:t_final_nclfqp, :, 0],
# c=sns.color_palette("pastel")[2])
ax3.plot(t, t * 0.0, c="k")
ax3.legend()
ax2 = axs[0, 1]
ax2.plot([], c=sns.color_palette("pastel")[0], label="LQR dV/dt")
ax2.plot([], c=sns.color_palette("pastel")[1], label="rCLF dV/dt")
# ax2.plot([], c=sns.color_palette("pastel")[2], label="CLF dV/dt")
ax2.plot(t[1:t_final_rclfqp], Vdot_sim_rclfqp[1:t_final_rclfqp, :, 0],
c=sns.color_palette("pastel")[1])
# ax2.plot(t[1:t_final_nclfqp], Vdot_sim_nclfqp[1:t_final_nclfqp, :, 0],
# c=sns.color_palette("pastel")[2])
ax2.plot(t[1:], Vdot_sim_lqr[1:, :, 0],
c=sns.color_palette("pastel")[0])
ax2.plot(t, t * 0.0, c="k")
ax2.legend()
ax4 = axs[1, 0]
ax4.plot([], c=sns.color_palette("pastel")[0], linestyle="-", label="LQR $u1$")
ax4.plot([], c=sns.color_palette("pastel")[0], linestyle=":", label="LQR $u2$")
ax4.plot([], c=sns.color_palette("pastel")[1], linestyle="-", label="rCLF $u1$")
ax4.plot([], c=sns.color_palette("pastel")[1], linestyle=":", label="rCLF $u2$")
# ax4.plot([], c=sns.color_palette("pastel")[2], linestyle="-", label="CLF $u1$")
# ax4.plot([], c=sns.color_palette("pastel")[2], linestyle=":", label="CLF $u2$")
ax4.plot()
ax4.plot(t[1:t_final_rclfqp], u_sim_rclfqp[1:t_final_rclfqp, :, 0],
c=sns.color_palette("pastel")[1], linestyle="-")
ax4.plot(t[1:t_final_rclfqp], u_sim_rclfqp[1:t_final_rclfqp, :, 1],
c=sns.color_palette("pastel")[1], linestyle=":")
# ax4.plot(t[1:t_final_nclfqp], u_sim_nclfqp[1:t_final_nclfqp, :, 0],
# c=sns.color_palette("pastel")[2], linestyle="-")
# ax4.plot(t[1:t_final_nclfqp], u_sim_nclfqp[1:t_final_nclfqp, :, 1],
# c=sns.color_palette("pastel")[2], linestyle=":")
ax4.plot(t[1:], u_sim_lqr[1:, :, 0],
c=sns.color_palette("pastel")[0], linestyle="-")
ax4.plot(t[1:], u_sim_lqr[1:, :, 1],
c=sns.color_palette("pastel")[0], linestyle=":")
ax4.legend()
fig.tight_layout()
plt.show()
|
the-stack_106_20659
|
#
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module is largely used by other modules, though it just contains
# simple string utilities :)
import json
from copy import deepcopy
from couchbase_core._pyport import long, ulp, basestring
from couchbase.exceptions import InvalidArgumentException
# Some constants
STALE_UPDATE_BEFORE = "false"
STALE_UPDATE_AFTER = "update_after"
STALE_OK = "ok"
ONERROR_CONTINUE = "continue"
ONERROR_STOP = "stop"
class _Unspec(object):
def __nonzero__(self):
return False
# Py3
__bool__ = __nonzero__
def __str__(self):
return ""
def __repr__(self):
return "<Placeholder>"
UNSPEC = _Unspec()
def _bool_param_handler(input):
if isinstance(input, bool):
if input:
return "true"
else:
return "false"
if isinstance(input, basestring):
if input not in ("true", "false"):
raise InvalidArgumentException.pyexc("String for boolean must be "
"'true' or 'false'", input)
return input
try:
input + 0
if input:
return "true"
else:
return "false"
except TypeError:
raise InvalidArgumentException.pyexc("Boolean value must be boolean, "
"numeric, or a string of 'true' "
"or 'false'", input)
def _num_param_handler(input):
# Don't allow booleans:
if isinstance(input, bool):
raise InvalidArgumentException.pyexc("Cannot use booleans as numeric values",
input)
try:
return str(int(input))
except Exception as e:
raise InvalidArgumentException.pyexc(
"Expected a numeric argument", input, e)
def _string_param_common(input, do_quote=False):
# TODO, if we pass this to urlencode, do we ever need to quote?
# For the moment, i'm always forcing non-quote behavior
do_quote = False
s = None
if isinstance(input, basestring):
s = input
elif isinstance(input, bool):
raise InvalidArgumentException.pyexc(
"Can't use boolean as string", input)
elif isinstance(input, (int, long, float)):
# Basic numeric types:
s = str(input)
else:
raise InvalidArgumentException.pyexc("Expected simple numeric type or string ",
input)
if do_quote:
s = ulp.quote(s)
return s
def _string_param_handler(input):
return _string_param_common(input, do_quote=True)
def _generic_param_handler(input):
return _string_param_handler(input, do_quote=False)
def _stale_param_handler(input):
if input in (STALE_UPDATE_AFTER, STALE_OK, STALE_UPDATE_BEFORE):
return input
ret = _bool_param_handler(input)
if ret == "true":
ret = STALE_OK
return ret
def _onerror_param_handler(input):
if input not in (ONERROR_CONTINUE, ONERROR_STOP):
raise InvalidArgumentException.pyexc(
"on_error must be 'continue' or 'stop'", input)
return input
def _jval_param_handler(input):
try:
ret = json.dumps(input)
return _string_param_handler(ret)
except Exception as e:
raise InvalidArgumentException.pyexc(
"Couldn't convert value to JSON", input, e)
def _jarry_param_handler(input):
ret = _jval_param_handler(input)
if not ret.startswith('['):
raise InvalidArgumentException.pyexc(
"Value must be converted to JSON array", input)
return ret
# Some more constants. Yippie!
class Params(object):
# Random, unspecified value.
DESCENDING = "descending"
STARTKEY = "startkey"
STARTKEY_DOCID = "startkey_docid"
ENDKEY = "endkey"
ENDKEY_DOCID = "endkey_docid"
KEY = "key"
KEYS = "keys"
INCLUSIVE_END = "inclusive_end"
GROUP = "group"
GROUP_LEVEL = "group_level"
REDUCE = "reduce"
SKIP = "skip"
LIMIT = "limit"
ON_ERROR = "on_error"
STALE = "stale"
DEBUG = "debug"
CONNECTION_TIMEOUT = "connection_timeout"
FULL_SET = "full_set"
MAPKEY_SINGLE = "mapkey_single"
MAPKEY_MULTI = "mapkey_multi"
MAPKEY_RANGE = "mapkey_range"
DOCKEY_RANGE = "dockey_range"
START_RANGE = "start_range"
END_RANGE = "end_range"
_HANDLER_MAP = {
Params.DESCENDING: _bool_param_handler,
Params.STARTKEY: _jval_param_handler,
Params.STARTKEY_DOCID: _string_param_handler,
Params.ENDKEY: _jval_param_handler,
Params.ENDKEY_DOCID: _string_param_handler,
Params.FULL_SET: _bool_param_handler,
Params.GROUP: _bool_param_handler,
Params.GROUP_LEVEL: _num_param_handler,
Params.INCLUSIVE_END: _bool_param_handler,
Params.KEY: _jval_param_handler,
Params.KEYS: _jarry_param_handler,
Params.ON_ERROR: _onerror_param_handler,
Params.REDUCE: _bool_param_handler,
Params.STALE: _stale_param_handler,
Params.SKIP: _num_param_handler,
Params.LIMIT: _num_param_handler,
Params.DEBUG: _bool_param_handler,
Params.CONNECTION_TIMEOUT: _num_param_handler,
Params.START_RANGE: _jarry_param_handler,
Params.END_RANGE: _jarry_param_handler,
}
def _gendoc(param):
for k, v in Params.__dict__.items():
if param == v:
return "\n:data:`Params.{0}`".format(k)
def _rangeprop(k_sugar, k_start, k_end):
def getter(self):
return self._user_options.get(k_sugar, UNSPEC)
def setter(self, value):
self._set_range_common(k_sugar, k_start, k_end, value)
return property(getter, setter, fdel=None, doc=_gendoc(k_sugar))
def _genprop(p):
def getter(self):
return self._get_common(p)
def setter(self, value):
self._set_common(p, value)
return property(getter, setter, fdel=None, doc=_gendoc(p))
class QueryBase(object):
def _set_common(self, param, value, set_user=True):
# Invalidate encoded string
self._encoded = None
if value is UNSPEC:
self._real_options.pop(param, None)
if set_user:
self._user_options.pop(param, None)
return
handler = _HANDLER_MAP.get(param)
if not handler:
if not self.unrecognized_ok:
raise InvalidArgumentException.pyexc(
"Unrecognized parameter. To use unrecognized parameters, "
"set 'unrecognized_ok' to True")
if not handler:
self._extra_options[param] = _string_param_handler(value)
return
if self.passthrough:
handler = _string_param_handler
self._real_options[param] = handler(value)
if set_user:
self._user_options[param] = value
def _get_common(self, param):
if param in self._user_options:
return self._user_options[param]
return self._real_options.get(param, UNSPEC)
def _set_range_common(self, k_sugar, k_start, k_end, value):
"""
Checks to see if the client-side convenience key is present, and if so
converts the sugar convenience key into its real server-side
equivalents.
:param string k_sugar: The client-side convenience key
:param string k_start: The server-side key specifying the beginning of
the range
:param string k_end: The server-side key specifying the end of the
range
"""
if not isinstance(value, (list, tuple, _Unspec)):
raise InvalidArgumentException.pyexc(
"Range specification for {0} must be a list, tuple or UNSPEC"
.format(k_sugar))
if self._user_options.get(k_start, UNSPEC) is not UNSPEC or (
self._user_options.get(k_end, UNSPEC) is not UNSPEC):
raise InvalidArgumentException.pyexc(
"Cannot specify {0} with either {1} or {2}"
.format(k_sugar, k_start, k_end))
if not value:
self._set_common(k_start, UNSPEC, set_user=False)
self._set_common(k_end, UNSPEC, set_user=False)
self._user_options[k_sugar] = UNSPEC
return
if len(value) not in (1, 2):
raise InvalidArgumentException.pyexc("Range specification "
"must have one or two elements",
value)
value = value[::]
if len(value) == 1:
value.append(UNSPEC)
for p, ix in ((k_start, 0), (k_end, 1)):
self._set_common(p, value[ix], set_user=False)
self._user_options[k_sugar] = value
STRING_RANGE_END = json.loads('"\u0FFF"')
"""
Highest acceptable unicode value
"""
def __init__(self, passthrough=False, unrecognized_ok=False, **params):
"""
Create a new Query object.
A Query object is used as a container for the various view options.
It can be used as a standalone object to encode queries but is typically
passed as the ``query`` value to :class:`~couchbase_core.views.iterator.View`.
:param boolean passthrough:
Whether *passthrough* mode is enabled
:param boolean unrecognized_ok:
Whether unrecognized options are acceptable. See
:ref:`passthrough_values`.
:param params:
Key-value pairs for view options. See :ref:`view_options` for
a list of acceptable options and their values.
:raise: :exc:`couchbase.exceptions.InvalidArgumentException` if a view option
or a combination of view options were deemed invalid.
"""
self.passthrough = passthrough
self.unrecognized_ok = unrecognized_ok
self._real_options = {}
self._user_options = {}
self._extra_options = {}
self._encoded = None
# String literal to pass along with the query
self._base_str = ""
self.update(**params)
def update(self, copy=False, **params):
"""
Chained assignment operator.
This may be used to quickly assign extra parameters to the
:class:`Query` object.
Example::
q = Query(reduce=True, full_sec=True)
# Someplace later
v = View(design, view, query=q.update(mapkey_range=["foo"]))
Its primary use is to easily modify the query object (in-place).
:param boolean copy:
If set to true, the original object is copied before new attributes
are added to it
:param params: Extra arguments. These must be valid query options.
:return: A :class:`Query` object. If ``copy`` was set to true, this
will be a new instance, otherwise it is the same instance on which
this method was called
"""
if copy:
self = deepcopy(self)
for k, v in params.items():
if not hasattr(self, k):
if not self.unrecognized_ok:
raise InvalidArgumentException.pyexc("Unknown option", k)
self._set_common(k, v)
else:
setattr(self, k, v)
return self
@classmethod
def from_any(cls, params, **ctor_opts):
"""
Creates a new Query object from input.
:param params: Parameter to convert to query
:type params: dict, string, or :class:`Query`
If ``params`` is a :class:`Query` object already, a deep copy is made
and a new :class:`Query` object is returned.
If ``params`` is a string, then a :class:`Query` object is contructed
from it. The string itself is not parsed, but rather prepended to
any additional parameters (defined via the object's methods)
with an additional ``&`` characted.
If ``params`` is a dictionary, it is passed to the :class:`Query`
constructor.
:return: a new :class:`Query` object
:raise: :exc:`InvalidArgumentException` if the input is none of the acceptable
types mentioned above. Also raises any exceptions possibly thrown
by the constructor.
"""
if isinstance(params, cls):
return deepcopy(params)
elif isinstance(params, dict):
ctor_opts.update(**params)
if cls is QueryBase:
if ('bbox' in params or 'start_range' in params or
'end_range' in params):
return SpatialQuery(**ctor_opts)
else:
return ViewQuery(**ctor_opts)
elif isinstance(params, basestring):
ret = cls()
ret._base_str = params
return ret
else:
raise InvalidArgumentException.pyexc(
"Params must be Query, dict, or string")
@classmethod
def from_string(cls, qstr):
"""Wrapper for :meth:`from_any`"""
return cls.from_any(qstr)
def _encode(self, omit_keys=False):
res_d = []
for k, v in self._real_options.items():
if v is UNSPEC:
continue
if omit_keys and k == "keys":
continue
if not self.passthrough:
k = ulp.quote(k)
v = ulp.quote(v)
res_d.append("{0}={1}".format(k, v))
for k, v in self._extra_options.items():
res_d.append("{0}={1}".format(k, v))
return '&'.join(res_d)
@property
def encoded(self):
"""
Returns an encoded form of the query
"""
if not self._encoded:
self._encoded = self._encode()
if self._base_str:
return '&'.join((self._base_str, self._encoded))
else:
return self._encoded
@property
def _long_query_encoded(self):
"""
Returns the (uri_part, post_data_part) for a long query.
"""
uristr = self._encode(omit_keys=True)
kstr = "{}"
klist = self._real_options.get('keys', UNSPEC)
if klist != UNSPEC:
kstr = '{{"keys":{0}}}'.format(klist)
return (uristr, kstr)
@property
def has_blob(self):
"""
Whether this query object is 'dirty'.
A 'dirty' object is one which
contains parameters unrecognized by the internal handling methods.
A dirty query may be constructed by using the ``passthrough``
or ``unrecognized_ok`` options, or by passing a string to
:meth:`from_any`
"""
return self._base_str or self.unrecognized_ok or self.passthrough
def __repr__(self):
return "Query:'{0}'".format(self.encoded)
# Common parameters:
stale = _genprop(Params.STALE)
skip = _genprop(Params.SKIP)
limit = _genprop(Params.LIMIT)
full_set = _genprop(Params.FULL_SET)
connection_timeout = _genprop(Params.CONNECTION_TIMEOUT)
debug = _genprop(Params.DEBUG)
on_error = _genprop(Params.ON_ERROR)
class ViewQuery(QueryBase):
descending = _genprop(Params.DESCENDING)
# Use the range parameters. They're easier
startkey = _genprop(Params.STARTKEY)
endkey = _genprop(Params.ENDKEY)
startkey_docid = _genprop(Params.STARTKEY_DOCID)
endkey_docid = _genprop(Params.ENDKEY_DOCID)
keys = _genprop(Params.KEYS)
key = _genprop(Params.KEY)
inclusive_end = _genprop(Params.INCLUSIVE_END)
reduce = _genprop(Params.REDUCE)
group = _genprop(Params.GROUP)
group_level = _genprop(Params.GROUP_LEVEL)
# Aliases:
mapkey_single = _genprop(Params.KEY)
mapkey_multi = _genprop(Params.KEYS)
mapkey_range = _rangeprop(Params.MAPKEY_RANGE,
Params.STARTKEY, Params.ENDKEY)
dockey_range = _rangeprop(Params.DOCKEY_RANGE,
Params.STARTKEY_DOCID,
Params.ENDKEY_DOCID)
class SpatialQuery(QueryBase):
start_range = _genprop(Params.START_RANGE)
end_range = _genprop(Params.END_RANGE)
class Query(ViewQuery):
pass
def make_options_string(input, unrecognized_ok=False, passthrough=False):
if not isinstance(input, QueryBase):
input = QueryBase.from_any(input, unrecognized_ok=unrecognized_ok,
passthrough=passthrough)
return input.encoded
def make_dvpath(doc, view):
return "_design/{0}/_view/{1}?".format(doc, view)
|
the-stack_106_20660
|
#!/usr/bin/env python3
import pyeapi
class my_switch():
def __init__(self, config_file_location, device):
# loads the config file
pyeapi.client.load_config(config_file_location)
self.node = pyeapi.connect_to(device)
self.hostname = self.node.enable('show hostname')[0]['result']['hostname']
self.running_config = self.node.enable('show running-config')
def create_vlan(self, vlan_number, vlan_name):
vlans = self.node.api('vlans')
vlans.create(vlan_number)
vlans.set_name(vlan_number, vlan_name)
|
the-stack_106_20661
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2018, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
__author__ = "Jonathan Madsen"
__copyright__ = "Copyright 2020, The Regents of the University of California"
__credits__ = ["Jonathan Madsen"]
__license__ = "MIT"
__version__ = "@PROJECT_VERSION@"
__maintainer__ = "Jonathan Madsen"
__email__ = "[email protected]"
__status__ = "Development"
__all__ = [
"util",
"base_decorator",
"auto_timer",
"auto_tuple",
"timer",
"rss_usage",
"marker",
]
from ..util import *
|
the-stack_106_20662
|
from flask_restful import Resource
from firebase_admin import firestore
from google.cloud import storage
from models import thumb_bucket
class MetaREST(Resource):
@staticmethod
def get():
# Make firestore client
fcl = firestore.client()
response = {}
# Gets the document, and transforms it to python dictionary
level_doc_ref = fcl.collection('meta').document("meta") # This is only the reference. Haven't pulled from the server
level_doc = level_doc_ref.get() # Here, data is actually pulled.
if level_doc.exists:
response = level_doc.to_dict()
# Get versions
response.get("version", "")
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.