filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_21843
|
"""
Prepares documentation and sets up a Jekyll project into which the documentation goes.
Functions
=========
add_frontmatter(file_name, title, makenew=False)
Adds basic frontmatter to a MarkDown file that will be used in a Jekyll project.
"""
#def make_project
def add_frontmatter(file_name, title, makenew=False):
"""
Adds basic frontmatter to a MarkDown file that will be used in a Jekyll project.
Parameters
==========
file_name : String
Relative file path from where this method is called to the location of the file that will have frontmatter added.
title : String
Title of the page that will go into the Jekyll project.
makenew : Boolean (OPTIONAL)
If set to True, will create a new file with the frontmatter next to the original file with "_added_frontmatter" appended to its name. Otherwise, the method simply edits the original file.
Examples
========
Suppose we have the following directory:
data/
doc.md
To write to a new file doc_add_frontmatter.md and add frontmatter:
>>> from aide_document import jekyll
>>> jekyll.add_frontmatter('doc.md', 'Document', True)
The last parameter can be omitted if you want to just overwrite doc.md.
"""
with open(file_name, "r+") as oldfile:
# Creates new file and writes to it if specified
if makenew:
with open(file_name[:-3] + '_added_frontmatter.md', 'w') as newfile:
newfile.write('---\n' + 'title: ' + title + '\n' + '---\n')
newfile.write(oldfile.read())
# Writes to old file if unspecified
else:
content = oldfile.read()
oldfile.seek(0)
oldfile.write('---\n' + 'title: ' + title + '\n' + '---\n' + content)
|
the-stack_106_21844
|
from typing import cast, Dict, Set, Optional, Union
from spec_random import SPEC_RANDOM
from spec_basis import Rand, Kobj, KindSend, Lego, Syscall, Program, Executable
from spec_pack import pack_int
from spec_lego_simple import LegoSimple
from spec_lego_pointer import LegoPointer
from spec_lego_vector import RandVector
from spec_type_str import RandStr
from spec_type_buf import RandBuf
from util_bean import Bean, BeanRef
class RandLen(Rand):
offset: Union[int, float] # offset to the actual length
class KobjLen(Kobj):
pass
class KindSendLen(KindSend[RandLen]):
bits: int
ptr: BeanRef[LegoPointer]
# bean
def validate(self) -> None:
assert self.bits in {8, 16, 32, 64}
# debug
def note(self) -> str:
return self.ptr.bean.dump()
# chain
def link(self, ctxt: Syscall) -> None:
# use None as the root so we do not associate that lego with anything
self.ptr.bean.link(None, ctxt)
# memory
def length(self) -> Optional[int]:
return self.bits // 8
# builder
def mk_rand(self) -> RandLen:
return RandLen()
# operations: engage and remove
def engage_rand(self, rand: RandLen, prog: Program) -> None:
# prep
self.ptr.bean.engage(prog)
self.ptr.bean.add_rdep_rand(self.lego.bean)
# init
rand.offset = 0
def remove_rand(self, rand: RandLen, prog: Program) -> None:
# un-prep
self.ptr.bean.del_rdep_rand(self.lego.bean)
self.ptr.bean.remove(prog)
# operations: mutate and puzzle
def mutate_rand(self, rand: RandLen, prog: Program) -> None:
# [TOSS] change the underlying pointer
self.ptr.bean.mutate_rand(prog)
def puzzle_rand(self, rand: RandLen, prog: Program) -> None:
p = SPEC_RANDOM.random()
if p < 0.1:
# [DRAG] add an offset (absolute) to the length
rand.offset = SPEC_RANDOM.choice([
1, 8, 64, 512, 4096
])
elif p < 0.2:
# [DRAG] add an offset (relative) to the length
rand.offset = SPEC_RANDOM.choice([
0.1, 0.5, 0.9, 1.0,
])
else:
# [DRAG] change the underlying pointer
self.ptr.bean.puzzle_rand(prog)
# operations: update
def update_rand(self, rand: RandLen, prog: Program) -> None:
assert self.ptr.bean.rand is not None
# operations: migrate
def migrate_rand(
self,
rand: RandLen, orig: RandLen,
ctxt: Dict[Bean, Bean], hist: Set['Lego']
) -> None:
# first migrate the ptr
self.ptr.bean.migrate(
cast(KindSendLen,
cast(LegoSimple, orig.lego.bean).kind_send).ptr.bean,
ctxt, hist
)
# then migrate the rand value
rand.offset = orig.offset
# utils
def _measure(self, rand: RandLen) -> int:
ptr = self.ptr.bean.rand
assert ptr is not None
# get the object size
if not ptr.pick:
# null pointer gets size 0
return 0
mem = cast(BeanRef[Lego], self.ptr.bean.memv)
obj = mem.bean.rand
if isinstance(obj, RandStr):
size = len(obj.data) + 1
elif isinstance(obj, RandBuf):
size = len(obj.data)
elif isinstance(obj, RandVector):
size = len(obj.meta)
else:
raise RuntimeError('Invalid type for length measurement')
# adjust for offset (only substraction allowed, stopped at 0)
if isinstance(rand.offset, int):
if size >= rand.offset:
size -= rand.offset
elif isinstance(rand.offset, float):
size = int(size * (1 - rand.offset))
else:
raise RuntimeError('Invalid type for length offset')
return size
# expo
def expo_rand(self, rand: RandLen) -> str:
return str(self._measure(rand))
# blob
def blob_size_rand(self, rand: RandLen) -> int:
return cast(int, self.length())
def blob_hole_rand(self, rand: RandLen, inst: Executable) -> None:
# although KindSendLen requires a ptr lego, it does not require
# that the lego to be on heap (although the lego is indeed on
# heap in almost all cases)
pass
def blob_data_rand(self, rand: RandLen, inst: Executable) -> bytes:
return pack_int(self._measure(rand), self.bits)
def blob_fill_rand(self, rand: RandLen, inst: Executable) -> None:
# similar to the logic in blob_hole_rand(), the ptr lego may not
# necessarily be on heap, therefore, do not call fill on it
pass
# relationship
def rely_on_rand(self, rand: RandLen, prog: Program) -> Set[int]:
return set()
|
the-stack_106_21846
|
import unittest
from itertools import combinations
import json
import mock
import pytest
import uuid
from mlflow.protos.model_registry_pb2 import CreateRegisteredModel, \
UpdateRegisteredModel, DeleteRegisteredModel, ListRegisteredModels, \
GetRegisteredModel, GetLatestVersions, CreateModelVersion, UpdateModelVersion, \
DeleteModelVersion, GetModelVersion, GetModelVersionDownloadUri, SearchModelVersions, \
RenameRegisteredModel, TransitionModelVersionStage, SearchRegisteredModels
from mlflow.store.model_registry.rest_store import RestStore
from mlflow.utils.proto_json_utils import message_to_json
from mlflow.utils.rest_utils import MlflowHostCreds
@pytest.fixture(scope="class")
def request_fixture():
with mock.patch('requests.request') as request_mock:
response = mock.MagicMock
response.status_code = 200
response.text = '{}'
request_mock.return_value = response
yield request_mock
@pytest.mark.usefixtures("request_fixture")
class TestRestStore(unittest.TestCase):
def setUp(self):
self.creds = MlflowHostCreds('https://hello')
self.store = RestStore(lambda: self.creds)
def tearDown(self):
pass
def _args(self, host_creds, endpoint, method, json_body):
res = {'host_creds': host_creds,
'endpoint': "/api/2.0/preview/mlflow/%s" % endpoint,
'method': method}
if method == "GET":
res["params"] = json.loads(json_body)
else:
res["json"] = json.loads(json_body)
return res
def _verify_requests(self, http_request, endpoint, method, proto_message):
print(http_request.call_args_list)
json_body = message_to_json(proto_message)
http_request.assert_any_call(**(self._args(self.creds, endpoint, method, json_body)))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_create_registered_model(self, mock_http):
self.store.create_registered_model("model_1")
self._verify_requests(mock_http, "registered-models/create", "POST",
CreateRegisteredModel(name="model_1"))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_update_registered_model_name(self, mock_http):
name = "model_1"
new_name = "model_2"
self.store.rename_registered_model(name=name, new_name=new_name)
self._verify_requests(mock_http, "registered-models/rename", "POST",
RenameRegisteredModel(name=name, new_name=new_name))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_update_registered_model_description(self, mock_http):
name = "model_1"
description = "test model"
self.store.update_registered_model(name=name, description=description)
self._verify_requests(mock_http, "registered-models/update", "PATCH",
UpdateRegisteredModel(name=name, description=description))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_delete_registered_model(self, mock_http):
name = "model_1"
self.store.delete_registered_model(name=name)
self._verify_requests(mock_http, "registered-models/delete", "DELETE",
DeleteRegisteredModel(name=name))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_list_registered_model(self, mock_http):
self.store.list_registered_models(max_results=50, page_token=None)
self._verify_requests(mock_http, "registered-models/list", "GET",
ListRegisteredModels(page_token=None, max_results=50))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_search_registered_model(self, mock_http):
self.store.search_registered_models()
self._verify_requests(mock_http, "registered-models/search", "GET",
SearchRegisteredModels())
params_list = [
{"filter_string": "model = 'yo'"},
{"max_results": 400},
{"page_token": "blah"},
{"order_by": ["x", "Y"]}
]
# test all combination of params
for sz in [0, 1, 2, 3, 4]:
for combination in combinations(params_list, sz):
params = {k: v for d in combination for k, v in d.items()}
self.store.search_registered_models(**params)
if "filter_string" in params:
params["filter"] = params.pop("filter_string")
self._verify_requests(mock_http, "registered-models/search", "GET",
SearchRegisteredModels(**params))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_get_registered_model(self, mock_http):
name = "model_1"
self.store.get_registered_model(name=name)
self._verify_requests(mock_http, "registered-models/get", "GET",
GetRegisteredModel(name=name))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_get_latest_versions(self, mock_http):
name = "model_1"
self.store.get_latest_versions(name=name)
self._verify_requests(mock_http, "registered-models/get-latest-versions", "GET",
GetLatestVersions(name=name))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_get_latest_versions_with_stages(self, mock_http):
name = "model_1"
self.store.get_latest_versions(name=name, stages=["blaah"])
self._verify_requests(mock_http, "registered-models/get-latest-versions", "GET",
GetLatestVersions(name=name, stages=["blaah"]))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_create_model_version(self, mock_http):
run_id = uuid.uuid4().hex
self.store.create_model_version("model_1", "path/to/source", run_id)
self._verify_requests(mock_http, "model-versions/create", "POST",
CreateModelVersion(name="model_1", source="path/to/source",
run_id=run_id))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_transition_model_version_stage(self, mock_http):
name = "model_1"
version = "5"
self.store.transition_model_version_stage(name=name, version=version, stage="prod",
archive_existing_versions=True)
self._verify_requests(mock_http, "model-versions/transition-stage", "POST",
TransitionModelVersionStage(name=name, version=version,
stage="prod",
archive_existing_versions=True))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_update_model_version_decription(self, mock_http):
name = "model_1"
version = "5"
description = "test model version"
self.store.update_model_version(name=name, version=version, description=description)
self._verify_requests(mock_http, "model-versions/update", "PATCH",
UpdateModelVersion(name=name, version=version,
description="test model version"))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_delete_model_version(self, mock_http):
name = "model_1"
version = "12"
self.store.delete_model_version(name=name, version=version)
self._verify_requests(mock_http, "model-versions/delete", "DELETE",
DeleteModelVersion(name=name, version=version))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_get_model_version_details(self, mock_http):
name = "model_11"
version = "8"
self.store.get_model_version(name=name, version=version)
self._verify_requests(mock_http, "model-versions/get", "GET",
GetModelVersion(name=name, version=version))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_get_model_version_download_uri(self, mock_http):
name = "model_11"
version = "8"
self.store.get_model_version_download_uri(name=name, version=version)
self._verify_requests(mock_http, "model-versions/get-download-uri", "GET",
GetModelVersionDownloadUri(name=name, version=version))
@mock.patch('mlflow.utils.rest_utils.http_request')
def test_search_model_versions(self, mock_http):
self.store.search_model_versions(filter_string="name='model_12'")
self._verify_requests(mock_http, "model-versions/search", "GET",
SearchModelVersions(filter="name='model_12'"))
|
the-stack_106_21847
|
import tensorflow as tf
EMB_SIZE = 10
def gen_conv(batch_input, out_channels, kernel_size, a):
# [batch, in_width, in_channels] => [batch, out_width, out_channels]
initializer = tf.random_normal_initializer(0, 0.02)
return tf.layers.conv1d(batch_input, out_channels, kernel_size=kernel_size, strides=1, padding="same", kernel_initializer=initializer)
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def batchnorm(inputs):
return tf.layers.batch_normalization(inputs, axis=2, epsilon=1e-5, momentum=0.1, training=True, gamma_initializer=tf.random_normal_initializer(1.0, 0.02))
def create_model(model_inputs, a):
print("Generator input shape", model_inputs.shape)
layers = []
num_classes = 1
# embedding: [batch, sentence_len] => [batch, sentence_len, EMB_SIZE]
with tf.variable_scope("embedding"):
embeddings = tf.Variable(tf.random_normal([a.vocab_size, EMB_SIZE], dtype=tf.float32))
output = tf.nn.embedding_lookup(embeddings, model_inputs)
layers.append(output)
layer_specs = [
(50, 1), # encoder_1: [batch, sentence_len, EMB_SIZE] => [batch, 1, 50]
(50, 2), # encoder_2: [batch, sentence_len, EMB_SIZE] => [batch, 1, 50]
(50, 3), # encoder_3: [batch, sentence_len, EMB_SIZE] => [batch, 1, 50]
(50, 4), # encoder_4: [batch, sentence_len, EMB_SIZE] => [batch, 1, 50]
]
for out_channels, kernel_size in layer_specs:
with tf.variable_scope("encoder_%d" % (len(layers))):
convolved = gen_conv(layers[0], out_channels, kernel_size, a)
output = batchnorm(convolved)
rectified = lrelu(output, 0.2)
output = tf.layers.max_pooling1d(rectified, a.max_len, 1)
layers.append(output)
# [batch, 1, 200]
output = tf.concat(layers[1:], axis=2)
if a.dropout > 0.0:
output = tf.nn.dropout(output, keep_prob=1 - a.dropout)
# mlp: [batch, 1, 200] => [batch, num_classes]
with tf.variable_scope("mlp"):
initializer = tf.random_normal_initializer(0, 0.02)
output = tf.layers.conv1d(output, num_classes, kernel_size=1, strides=1, padding="valid", kernel_initializer=initializer)
output = tf.squeeze(output, axis=1)
layers.append(output)
return layers[-1]
|
the-stack_106_21848
|
'''
Original Code:
https://github.com/yysijie/st-gcn/blob/master/processor/io.py
'''
#!/usr/bin/env python
# pylint: disable=W0201
import sys
import argparse
import yaml
import numpy as np
# torch
import torch
import torch.nn as nn
# torchlight
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
import ipdb; pdb=ipdb.set_trace
class IO():
"""
IO Processor
"""
def __init__(self, argv=None):
self.load_arg(argv)
self.init_environment()
self.load_model()
self.load_weights()
self.gpu()
def load_arg(self, argv=None):
parser = self.get_parser()
# load arg form config file
p = parser.parse_args(argv)
if p.config is not None:
# load config file
with open(p.config, 'r') as f:
default_arg = yaml.load(f)
# update parser from config file
key = vars(p).keys()
for k in default_arg.keys():
if k not in key:
print('Unknown Arguments: {}'.format(k))
assert k in key
parser.set_defaults(**default_arg)
self.arg = parser.parse_args(argv)
def init_environment(self):
self.io = torchlight.IO(
self.arg.work_dir,
save_log=self.arg.save_log,
# print_log=self.arg.print_log
)
self.io.save_arg(self.arg)
# gpu
if self.arg.use_gpu:
gpus = torchlight.visible_gpu(self.arg.device)
torchlight.occupy_gpu(gpus)
self.gpus = gpus
self.dev = "cuda:0"
else:
self.dev = "cpu"
def load_model(self):
self.model = self.io.load_model(self.arg.model,
**(self.arg.model_args))
def load_weights(self):
if self.arg.weights:
self.model = self.io.load_weights(self.model, self.arg.weights,
self.arg.ignore_weights)
def gpu(self):
# move modules to gpu
self.model = self.model.to(self.dev)
for name, value in vars(self).items():
cls_name = str(value.__class__)
if cls_name.find('torch.nn.modules') != -1:
setattr(self, name, value.to(self.dev))
# model parallel
if self.arg.use_gpu and len(self.gpus) > 1:
self.model = nn.DataParallel(self.model, device_ids=self.gpus)
def start(self):
self.io.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
@staticmethod
def get_parser(add_help=False):
#region arguments yapf: disable
# parameter priority: command line > config > default
parser = argparse.ArgumentParser( add_help=add_help, description='IO Processor')
parser.add_argument('-w', '--work_dir', default='./work_dir/tmp', help='the work folder for storing results')
parser.add_argument('-c', '--config', default=None, help='path to the configuration file')
# processor
parser.add_argument('--use_gpu', type=str2bool, default=False, help='use GPUs or not')
parser.add_argument('--device', type=int, default=0, nargs='+', help='the indexes of GPUs for training or testing')
# visulize and debug
parser.add_argument('--print_log', type=str2bool, default=True, help='print logging or not')
parser.add_argument('--save_log', type=str2bool, default=True, help='save logging or not')
# model
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument('--model_args', action=DictAction, default=dict(), help='the arguments of model')
parser.add_argument('--weights', default=None, help='the weights for network initialization')
parser.add_argument('--ignore_weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization')
#endregion yapf: enable
# self
parser.add_argument('--video', '-v', type=str, dest='video_path', default="data/demo/video.mp4")
parser.add_argument('--pose_detector', '-p', type=int, default=0)
return parser
|
the-stack_106_21850
|
import json
from datetime import datetime
import pandas as pd
import scrapy
class PlayerSpider(scrapy.Spider):
# set the attributes for the spider
name = "player"
def __init__(self, **kwargs):
"""initialize the data"""
super().__init__(**kwargs)
# create player data frame
player_df = pd.DataFrame(
columns=['Account_ID', 'Name', 'Country_Code', 'Team_ID', 'Team_Name', 'Team_Tag', 'Date'])
player_df.to_csv('../Data/DotaPlayer.csv', index=False)
# read in the dfs and convert the date column to a pandas datetime column
self.player_df = pd.read_csv('../Data/DotaPlayer.csv', index_col=False, header=0)
self.player_df["Date"] = pd.to_datetime(self.player_df["Date"])
def start_requests(self):
"""start the data gathering"""
# first gather all pro players
yield scrapy.Request(url='https://api.opendota.com/api/proPlayers', callback=self.parse)
def parse(self, response):
"""the response are all pro players. This method saves these players in a data frame"""
response_list = json.loads(response.text)
for res in response_list:
date = datetime.now().replace(second=0, microsecond=0, hour=0, minute=0)
data = {'Account_ID': res['account_id'], 'Name': res['name'], 'Country_Code': res['country_code'],
'Team_ID': res['team_id'], 'Team_Name': res['team_name'], 'Team_Tag': res['team_tag'], 'Date': date}
# merge the result data frame with the current data frame
data_df = pd.DataFrame(data, index=[len(self.player_df)])
frames = [data_df, self.player_df]
self.player_df = pd.concat(frames)
# save the result data frame
self.player_df.to_csv('../Data/DotaPlayer.csv', index=False)
|
the-stack_106_21851
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# @Author : Bismarckkk
# @Site : https://github.com/bismarckkk
# @File : rosNode.py
import sys
import logging
import cv2
import numpy as np
import rospy
from sensor_msgs.msg import Image
from radar_msgs.msg import points, point, view_control
from hp_limit_helper.msg import RobotsHP
from base import BaseNode, BaseImageSubscriber, BasePathHandler, BaseHpHandler, BaseMinimapShapeSubscriber, \
BaseCameraShapeSubscriber, BaseViewHandler
import config
"""
@warning: 当在配置文件中打开cvBridge选项时,将会引入cv_bridge包替代自编image消息解析程序,请自行为python3编译cv_bridge包
"""
if config.isCvBridge:
import cv_bridge
bridge = cv_bridge.CvBridge()
id2RobotType = ['', 'Hero', 'Engineer', 'Infantry1', 'Infantry2', 'Infantry3', '', 'Sentry', 'Outpost', 'Base']
class RosNode(BaseNode):
"""
@brief: RosNode类,继承自BaseNode类
@details: 在启动后初始化ros句柄,使用spin()方法运行ros后端
"""
def __init__(self):
super().__init__()
fixLogging(logging.INFO)
rospy.init_node('displayer')
def run(self):
logging.info('RosNode is running...')
rospy.spin()
"""
@brief: 将被ROS接管的日志系统重新连接到python
@href: https://github.com/ros/ros_comm/issues/1384
"""
def fixLogging(level=logging.WARNING):
console = logging.StreamHandler()
console.setLevel(level)
logging.getLogger('').addHandler(console)
formatter = logging.Formatter('%(levelname)-8s:%(name)-12s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
class RosImageSubscriber(BaseImageSubscriber):
"""
@brief: ros相机订阅者类,继承自BaseImageSubscriber类
@fn self.img_to_cv2: 通过cv_bridge或者第三方代码解析image消息获取cv2图像
"""
def __init__(self, cfg: dict):
self.topic = cfg['topic']
self.sub = rospy.Subscriber(self.topic, Image, self.callback, 1)
self.size = cfg['size']
if len(self.size) == 0:
self.size = None
else:
self.size = tuple(self.size)
super().__init__(self.size)
def img_to_cv2(self, img_msg: Image):
"""
@param img_msg: ros sensor_msgs/Image消息
@return: cv2图像
@warning: 此解析代码仅支持bgr8和rgb8编码
@href: https://answers.ros.org/question/350904/cv_bridge-throws-boost-import-error-in-python-3-and-ros-melodic/
"""
if config.isCvBridge:
return bridge.imgmsg_to_cv2(img_msg, 'bgr8')
else:
dtype = np.dtype("uint8")
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
image_opencv = np.ndarray(shape=(img_msg.height, img_msg.width, 3),
dtype=dtype, buffer=img_msg.data)
if img_msg.is_bigendian == (sys.byteorder == 'little'):
image_opencv = image_opencv.byteswap().newbyteorder()
if img_msg.encoding == 'bgr8':
pass
elif img_msg.encoding == 'rgb8':
image_opencv = image_opencv[:, :, [2, 1, 0]]
else:
raise ValueError('Unsupported encoding: ' + img_msg.encoding)
if self.size is not None:
image_opencv = cv2.resize(image_opencv, self.size)
return image_opencv
def callback(self, data, _):
self.queue.put(self.img_to_cv2(data))
class RosPathHandler(BasePathHandler):
def __init__(self, cfg: dict, name: str):
cfg['name'] = name
self.pub = rospy.Publisher(cfg['calibrationTopic'], points, queue_size=1)
if cfg['calibrationDefaultSubscribe'] and cfg['calibrationDefaultSubscribe'] != '':
self.sub = rospy.Subscriber(cfg['calibrationDefaultSubscribe'], points, self.callback, queue_size=1)
super().__init__(cfg)
def publish(self, data):
msg = points()
res = []
i = 0
for it in data:
p = point()
p.x = it[0]
p.y = it[1]
p.id = i
res.append(p)
i += 1
msg.data = res
self.pub.publish(msg)
def callback(self, msg):
data = [[p.x, p.y] for p in msg.data]
self.setPath(data)
class RosHpHandler(BaseHpHandler):
def __init__(self, cfg: dict):
self.hpSubscriber = rospy.Subscriber(cfg['hpSubscribe'], RobotsHP, self.hpSubscribe, queue_size=1)
self.hpLimitSubscriber = rospy.Subscriber(cfg['hpLimitSubscribe'], RobotsHP, self.hpLimitSubscribe, queue_size=1)
def hpSubscribe(self, msg: RobotsHP):
for it in msg.data:
if it.team == 0:
info = self.data['red']
else:
info = self.data['blue']
robotType = id2RobotType[it.number]
if robotType in info.keys():
info[robotType]['hp'] = it.hp
else:
info[robotType] = {'hp': it.hp, 'hpLimit': it.hp}
self.sendInfo()
def hpLimitSubscribe(self, msg: RobotsHP):
for it in msg.data:
if it.team == 0:
info = self.data['red']
else:
info = self.data['blue']
robotType = id2RobotType[it.number]
if robotType in info.keys():
info[robotType]['hpLimit'] = it.hp
else:
info[robotType] = {'hp': it.hp, 'hpLimit': it.hp}
self.sendInfo()
class RosMinimapShapeSubscriber(BaseMinimapShapeSubscriber):
def __init__(self, cfg):
if cfg.minimapTopic is not None:
self.subscriber = rospy.Subscriber(cfg.minimapTopic, points, self.callback, queue_size=1)
def callback(self, msg: points):
data = [{
'id': msg.id,
'color': msg.color,
'text': msg.text,
'data': [[p.x, p.y] for p in msg.data]
}]
self.sendInfo(data)
class RosCameraShapeSubscriber(BaseCameraShapeSubscriber):
def __init__(self, cfg, cam):
if cfg['shapeTopic'] is not None:
self.camera = cam
self.subscriber = rospy.Subscriber(cfg['shapeTopic'], points, self.callback, queue_size=1)
def callback(self, msg: points):
data = [{
'id': msg.id,
'color': msg.color,
'text': msg.text,
'data': [[p.x, p.y] for p in msg.data]
}]
self.sendInfo(data)
class RosViewControl(BaseViewHandler):
def __init__(self, topic):
if cfg['shapeTopic'] is not None:
self.subscriber = rospy.Subscriber(topic, view_control, self.callback, queue_size=1)
def callback(self, msg: view_control):
self.setView(msg.camera, msg.cameraFullScreen, msg.scale, msg.x, msg.y)
imageSubscribers = {}
for cam, cfg in config.cameraConfig.items():
imageSubscribers[cam] = RosImageSubscriber(cfg)
calibrateHandlers = {}
for cam, cfg in config.cameraConfig.items():
if cfg['calibrationTopic'] != '':
calibrateHandlers[cam] = RosPathHandler(cfg, cam)
rosCameraShapeSubscribers = {}
for cam, cfg in config.cameraConfig.items():
if cfg['shapeTopic'] != '':
rosCameraShapeSubscribers[cam] = RosCameraShapeSubscriber(cfg, cam)
rosHpHandler = RosHpHandler(config.judgeSystem)
rosMinimapShapeSubscriber = RosMinimapShapeSubscriber(config)
rosViewControl = RosViewControl(config.viewControlTopic)
|
the-stack_106_21854
|
# Copyright (c) 2014 Olli Wang. All right reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import xml.etree.ElementTree as ET
from svg2nvg import definitions
from svg2nvg import generator
# A list of tag names that should be ignored when parsing.
ignored_tags = ('comment', 'desc', 'title', 'namedview')
# A list of supported path commands and the number of parameters each command
# requires.
path_commands = (('A', 7), ('C', 6), ('H', 1), ('L', 2), ('M', 2), ('Q', 4),
('S', 4), ('T', 2), ('V', 1), ('Z', 0))
def attribute(method):
"""Decorator for parsing element attributes.
Methods with this decorator must return a dictionary with interested
attributes. The dictionary will then be passed to corresponded generator
method as parameters.
"""
def inner(*args, **kwargs):
self = args[0]
result = method(*args, **kwargs)
if result:
func = getattr(self.generator, method.__name__.rsplit('_')[-1])
func(**result)
return result
return inner
def element(method):
"""Decorator for parsing a element.
This decorator simply wraps the method between generator's begin_element()
and end_element() calls with the tag name as the parameter.
"""
def inner(*args, **kwargs):
self = args[0]
element_tag = get_element_tag(args[1])
self.generator.begin_element(element_tag)
method(*args, **kwargs)
self.generator.end_element(element_tag)
return inner
def get_element_tag(element):
"""Returns the tag name string without namespace of the passed element."""
return element.tag.rsplit('}')[1]
class SVGParser(object):
def __init__(self, context='context'):
self.context = context
self.stmts = list()
@attribute
def __parse_bounds(self, element):
args = dict()
args['x'] = element.attrib.get('x', 0)
args['y'] = element.attrib.get('y', 0)
args['width'] = element.attrib.get('width', 0)
args['height'] = element.attrib.get('height', 0)
return args
@element
def __parse_circle(self, element):
self.generator.circle(**element.attrib)
self.__parse_fill(element)
self.__parse_stroke(element)
def __parse_element(self, element):
tag = get_element_tag(element)
if tag in ignored_tags:
return
# Deteremins the method for parsing the passed element.
method_name = '_' + self.__class__.__name__ + '__parse_%s' % tag.lower()
try:
method = getattr(self, method_name)
except AttributeError:
print('Error: %r element is not supported' % tag)
exit(1)
else:
method(element)
@element
def __parse_ellipse(self, element):
self.generator.ellipse(**element.attrib)
self.__parse_fill(element)
self.__parse_stroke(element)
@attribute
def __parse_fill(self, element):
args = dict()
if 'fill' not in element.attrib:
fill = '#000000'
else:
fill = element.attrib['fill']
if fill == 'none' or fill == 'transparent':
return args
# Expands three-digit shorthand of hex color.
if fill.startswith("#") and len(fill) == 4:
fill = '#%c%c%c%c%c%c' % (fill[1], fill[1], fill[2], fill[2],
fill[3], fill[3])
args['fill'] = fill
args['fill-opacity'] = float(element.attrib.get('opacity', 1)) * \
float(element.attrib.get('fill-opacity', 1))
return args
@element
def __parse_g(self, element):
# Gathers all group attributes at current level.
self.group_attrib.append(element.attrib)
group_attrib = dict()
for attrib in self.group_attrib:
group_attrib.update(attrib)
# Applies group attributes to child elements.
for child in element:
child.attrib.update(group_attrib)
self.__parse_element(child)
# Removes the group attributes at current level.
self.group_attrib.pop()
@element
def __parse_line(self, element):
self.generator.line(element.attrib['x1'], element.attrib['y1'],
element.attrib['x2'], element.attrib['y2'])
self.__parse_fill(element);
self.__parse_stroke(element);
@element
def __parse_lineargradient(self, element):
self.generator.definitions[element.get('id')] = \
definitions.LinearGradientDefinition(element)
@element
def __parse_path(self, element):
def execute_command(command, parameters):
if not command:
return
for path_command in path_commands:
if path_command[0] == command.upper():
break
else:
print("Path command %r is not supported." % command)
parameter_count = path_command[1]
if parameter_count == 0:
if parameters:
print("Path command %r should not take parameters: %s" % \
(command, parameters))
exit(1)
self.generator.path_command(command)
else:
# Checks if the number of parameters matched.
if (len(parameters) % parameter_count) != 0:
print("Path command %r should take %d parameters instead "
"of %d" % (command, parameter_count, len(parameters)))
exit(1)
while parameters:
self.generator.path_command(command,
*parameters[:parameter_count])
parameters = parameters[parameter_count:]
parameters = list()
command = None
found_decimal_separator = False
parameter_string = list()
commands = tuple(c[0] for c in path_commands) + \
tuple(c[0].lower() for c in path_commands)
self.generator.begin_path_commands()
for char in element.attrib['d']:
if char in ['\n', '\t']:
continue
elif char in commands: # found command
if parameter_string:
parameters.append(''.join(parameter_string))
parameter_string = list()
execute_command(command, parameters)
command = char
parameters = list()
found_decimal_separator = False
elif char in [' ', ',', '-']:
if parameter_string:
parameters.append(''.join(parameter_string))
parameter_string = list()
found_decimal_separator = False
if char in ['-']:
parameter_string.append(char)
found_decimal_separator = False
elif char == '.':
if found_decimal_separator:
parameters.append(''.join(parameter_string))
parameter_string = list()
parameter_string.append(char)
else:
found_decimal_separator = True
parameter_string.append(char)
elif command is not None:
parameter_string.append(char)
if parameter_string:
parameters.append(''.join(parameter_string))
parameter_string = list()
execute_command(command, parameters)
self.generator.end_path_commands()
self.__parse_fill(element)
self.__parse_stroke(element)
@element
def __parse_polygon(self, element):
self.generator.polygon(**element.attrib)
self.__parse_fill(element)
self.__parse_stroke(element)
@element
def __parse_polyline(self, element):
self.generator.polyline(**element.attrib)
self.__parse_fill(element)
self.__parse_stroke(element)
@element
def __parse_rect(self, element):
self.__parse_transform(element)
args = self.__parse_bounds(element)
self.generator.rect(**args)
self.__parse_fill(element)
self.__parse_stroke(element)
@attribute
def __parse_stroke(self, element):
args = dict()
if 'stroke' not in element.attrib:
return dict()
stroke = element.attrib['stroke']
if stroke == 'none' or stroke == 'transparent':
return dict()
args['stroke'] = stroke
args['stroke-opacity'] = float(element.attrib.get('opacity', 1)) * \
float(element.attrib.get('stroke-opacity', 1))
for attrib in ['linecap', 'linejoin', 'miterlimit']:
attrib = 'stroke-%s' % attrib
if attrib in element.attrib:
args[attrib] = element.attrib[attrib]
if 'stroke-width' in element.attrib:
args['stroke-width'] = element.attrib['stroke-width']
if float(args['stroke-width']) < 1:
return dict()
return args
@attribute
def __parse_transform(self, element):
if 'transform' not in element.attrib:
return dict()
return {'transform': element.attrib['transform']}
def __parse_tree(self, tree):
root = tree.getroot()
root_tag = get_element_tag(root)
if root_tag != 'svg':
print("Error: the root tag must be svg instead of %r" % root_tag)
exit(1)
del self.stmts[:] # clears the cached statements.
try:
self.canvas_width = root.attrib['width']
self.canvas_height = root.attrib['height']
except KeyError:
view_box = root.attrib['viewBox'].split(' ')
self.canvas_width = view_box[2]
self.canvas_height = view_box[3]
self.generator = generator.Generator(self.stmts, self.context)
self.group_attrib = list()
for child in root:
self.__parse_element(child)
def get_content(self):
return '\n'.join(self.stmts)
def get_header_file_content(self, filename, nanovg_include_path,
uses_namespace=False, prototype_only=False):
basename = os.path.splitext(os.path.basename(filename))[0]
guard_constant = 'SVG2NVG_%s_H_' % basename.upper()
function_name = 'Render%s' % basename.title().replace('_', '')
result = '#ifndef %s\n' % guard_constant
result += '#define %s\n\n' % guard_constant
if nanovg_include_path:
result += '#include "%s"\n\n' % nanovg_include_path
if uses_namespace:
result += 'namespace svg2nvg {\n\n'
prototype = 'void %s(NVGcontext* %s)' % (function_name, self.context)
if prototype_only:
result += '%s;\n\n' % prototype
else:
result += 'static %s {\n' % prototype
for stmt in self.stmts:
result += ' %s\n' % stmt
result += '}\n\n'
if uses_namespace:
result += '} // namespace svg2nvg\n\n'
result += '#endif // %s\n' % guard_constant
return result
def get_source_file_content(self, filename, nanovg_include_path,
uses_namespace=False,
header_include_path=None):
result = ''
basename = os.path.splitext(os.path.basename(filename))[0]
if header_include_path is None:
header_include_path = ''
header_name = '%s.h' % basename
header_include_path = os.path.join(header_include_path, header_name)
result += '#include "%s"\n\n' % header_include_path
if nanovg_include_path:
result += '#include "%s"\n\n' % nanovg_include_path
if uses_namespace:
result += 'namespace svg2nvg {\n\n'
function_name = 'Render%s' % basename.title().replace('_', '')
result += 'void %s(NVGcontext* %s) {\n' % (function_name, self.context)
for stmt in self.stmts:
result += ' %s\n' % stmt
result += '}\n\n'
if uses_namespace:
result += '} // namespace svg2nvg\n'
return result
def parse_file(self, filename):
try:
tree = ET.parse(filename)
except exceptions.IOError:
print('Error: cannot open SVG file at path: %s' % filename)
exit(1)
else:
self.__parse_tree(tree)
def parse_string(self, string):
tree = ET.fromstring(string)
self.__parse_tree(tree)
|
the-stack_106_21856
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-GPU tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.contrib.distribute.python import values
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
GPU_TEST = "test_gpu" in sys.argv[0]
class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase):
def _get_distribution_strategy(self):
devices = ["/device:CPU:0", "/device:GPU:0"]
if GPU_TEST:
self.assertGreater(context.num_gpus(), 0)
if context.num_gpus() > 1:
devices = ["/device:GPU:0", "/device:GPU:1"]
print(self.id().split(".")[-1], "devices:", ", ".join(devices))
return mirrored_strategy.MirroredStrategy(devices)
def testMinimizeLossEager(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_minimize_loss_eager(self._get_distribution_strategy())
def testMinimizeLossGraph(self):
soft_placement = not GPU_TEST
print("testMinimizeLossGraph soft_placement:", soft_placement)
self._test_minimize_loss_graph(
self._get_distribution_strategy(), soft_placement=soft_placement)
def testMapReduce(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_map_reduce(self._get_distribution_strategy())
def testDeviceIndex(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_device_index(self._get_distribution_strategy())
def testTowerId(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_tower_id(self._get_distribution_strategy())
def testNumTowers(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self.assertEqual(2, self._get_distribution_strategy().num_towers)
@test_util.run_in_graph_and_eager_modes()
def testCallAndMergeExceptions(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_call_and_merge_exceptions(self._get_distribution_strategy())
@test_util.run_in_graph_and_eager_modes()
def testRunRegroupError(self):
def run_fn(device_id):
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(device_id))
dist = self._get_distribution_strategy()
with dist.scope(), self.assertRaises(AssertionError):
dist.call_for_each_tower(run_fn, dist.worker_device_index)
@test_util.run_in_graph_and_eager_modes()
def testReduceToCpu(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
def run_fn(device_id):
return device_id
dist = self._get_distribution_strategy()
with dist.scope():
result = dist.call_for_each_tower(run_fn, dist.worker_device_index)
reduced = dist.reduce("sum", result, destinations="/device:CPU:0")
unwrapped = dist.unwrap(reduced)
self.assertEqual(1, len(unwrapped))
expected = sum(range(len(dist.worker_devices)))
self.assertEqual(expected, self.evaluate(unwrapped[0]))
@test_util.with_c_api
class MirroredStrategyVariableCreationTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Enough GPUs not available for this test in eager mode.")
@test_util.run_in_graph_and_eager_modes(config=config)
def testSingleVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
# This variable should be created only once across the threads because of
# special variable_creator functions used by `dist.call_for_each_tower`.
v = variable_scope.variable(1.0, name="foo")
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testUnnamedVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v = variable_scope.variable(1.0)
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# Default name of "Variable" will be used.
self.assertEquals("Variable:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariables(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
for i in range(5):
vs.append(variable_scope.variable(1.0, name="foo" + str(i)))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for i, v in enumerate(result):
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals("foo" + str(i) + ":0", v.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariablesWithSameCanonicalName(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
vs.append(variable_scope.variable(1.0, name="foo/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar_1"))
vs.append(variable_scope.variable(1.0, name="foo/bar_1"))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for v in result:
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals(4, len(result))
self.assertEquals("foo/bar:0", result[0].name)
self.assertEquals("foo_1/bar:0", result[1].name)
self.assertEquals("foo_1/bar_1:0", result[2].name)
self.assertEquals("foo/bar_1:0", result[3].name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableWithSameCanonicalNameAcrossThreads(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(device_id):
v = variable_scope.variable(1.0, name="foo_" + str(device_id))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# The resulting mirrored variable will use the name from the first device.
self.assertEquals("foo_0:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithLayers(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(features):
with variable_scope.variable_scope("common"):
layer1 = core.Dense(1)
layer1(features)
layer2 = core.Dense(1)
layer2(features)
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
layer3 = core.Dense(1)
layer3(features)
return [(layer1.kernel, layer1.bias),
(layer2.kernel, layer2.bias),
(layer3.kernel, layer3.bias)]
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
features = dist.distribute_dataset(
lambda: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10)
).make_one_shot_iterator().get_next()
with dist.scope():
result = dist.call_for_each_tower(
model_fn, features, run_concurrently=False)
suffixes = ["", "_1", "_2"]
for (kernel, bias), suffix in zip(result, suffixes):
self.assertIsInstance(kernel, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/kernel:0", kernel.name)
self.assertIsInstance(bias, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/bias:0", bias.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithGetVariableAndVariableScope(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v0 = variable_scope.get_variable("var-thread0", [1])
with variable_scope.variable_scope("common"):
v1 = variable_scope.get_variable("var-thread1", [1])
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
v2 = variable_scope.get_variable("var-thread2", [1])
return v0, v1, v2
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with variable_scope.variable_scope("main"):
v = variable_scope.get_variable("var-main0", [1])
self.assertEquals("main/var-main0:0", v.name)
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(3, len(result))
v0, v1, v2 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEquals("main/var-thread0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEquals("main/common/var-thread1:0", v1.name)
self.assertIsInstance(v2, values.MirroredVariable)
self.assertEquals("main/common/var-thread2:0", v2.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testThreeDevices(self):
self._skip_eager_if_gpus_less_than(2)
def model_fn():
v = variable_scope.variable(1.0, name="foo")
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testNonMatchingVariableCreation(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(name):
v = variable_scope.variable(1.0, name=name)
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
names = values.DistributedValues({
"/device:CPU:0": "foo",
"/device:GPU:0": "bar"
})
with self.assertRaises(RuntimeError):
_ = dist.call_for_each_tower(model_fn, names, run_concurrently=False)
@test_util.run_in_graph_and_eager_modes(config=config)
def testTowerLocalVariable(self):
self._skip_eager_if_gpus_less_than(1)
all_v_sum = {}
all_v_mean = {}
def model_fn(device_id):
tower_context = distribute_lib.get_tower_context()
with tower_context.tower_local_var_scope("sum"):
v_sum = variable_scope.variable(1.0)
with tower_context.tower_local_var_scope("mean"):
v_mean = variable_scope.variable(4.0)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
self.assertTrue(isinstance(v_mean, values.TowerLocalVariable))
updates = [v_sum.assign_add(2.0 + device_id),
v_mean.assign(6.0 * device_id)]
all_v_sum[device_id] = v_sum
all_v_mean[device_id] = v_mean
return updates, v_sum, v_mean
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
# Create "sum" and "mean" versions of TowerLocalVariables.
ret_ops, ret_v_sum, ret_v_mean = dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False)
# Should see the same wrapping instance in all towers.
self.assertIs(all_v_sum[0], ret_v_sum)
self.assertIs(all_v_mean[0], ret_v_mean)
for i in range(1, dist.num_towers):
self.assertIs(all_v_sum[0], all_v_sum[1])
self.assertIs(all_v_mean[0], all_v_mean[1])
# Apply updates
self.evaluate(variables.global_variables_initializer())
self.evaluate([y for x in ret_ops for y in dist.unwrap(x)])
expected_sum = 0.0
expected_mean = 0.0
for i, d in enumerate(dist.worker_devices):
# Should see different values on different devices.
v_sum_value = self.evaluate(ret_v_sum.get(d).read_value())
v_mean_value = self.evaluate(ret_v_mean.get(d).read_value())
expected = i + 3.0
self.assertEqual(expected, v_sum_value)
expected_sum += expected
expected = i * 6.0
self.assertEqual(expected, v_mean_value)
expected_mean += expected
expected_mean /= len(dist.worker_devices)
# Without get(device), should return the value you get by
# applying the reduction across all towers (whether you use
# fetch(), get(), or nothing).
self.assertEqual(expected_sum, self.evaluate(dist.fetch(ret_v_sum)))
self.assertEqual(expected_mean, self.evaluate(dist.fetch(ret_v_mean)))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get()))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get()))
if not context.executing_eagerly():
self.assertEqual(expected_sum, self.evaluate(ret_v_sum))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean))
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
distribute_lib.get_tower_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("main/foo/" + name + ":0", v0.name)
self.assertEquals("main/tower_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
distribute_lib.get_tower_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("foo/" + name + ":0", v0.name)
self.assertEquals("tower_1/foo/" + name + ":0", v1.name)
def testDynamicRnnVariables(self):
def model_fn():
inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])
cell_fw = rnn_cell_impl.LSTMCell(300)
cell_bw = rnn_cell_impl.LSTMCell(300)
(outputs, _) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32)
return outputs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
# Two variables are created by the RNN layer.
self.assertEquals(2, len(result))
for v in result:
self.assertIsInstance(v, values.DistributedValues)
_, v1 = dist.unwrap(v)
self.assertStartsWith(v1.name, "tower_1/")
if __name__ == "__main__":
test.main()
|
the-stack_106_21857
|
"""Support for mobile_app push notifications."""
import asyncio
import logging
import async_timeout
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import (
HTTP_ACCEPTED,
HTTP_CREATED,
HTTP_OK,
HTTP_TOO_MANY_REQUESTS,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.util.dt as dt_util
from .const import (
ATTR_APP_DATA,
ATTR_APP_ID,
ATTR_APP_VERSION,
ATTR_DEVICE_NAME,
ATTR_OS_VERSION,
ATTR_PUSH_RATE_LIMITS,
ATTR_PUSH_RATE_LIMITS_ERRORS,
ATTR_PUSH_RATE_LIMITS_MAXIMUM,
ATTR_PUSH_RATE_LIMITS_RESETS_AT,
ATTR_PUSH_RATE_LIMITS_SUCCESSFUL,
ATTR_PUSH_TOKEN,
ATTR_PUSH_URL,
DATA_CONFIG_ENTRIES,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
def push_registrations(hass):
"""Return a dictionary of push enabled registrations."""
targets = {}
for webhook_id, entry in hass.data[DOMAIN][DATA_CONFIG_ENTRIES].items():
data = entry.data
app_data = data[ATTR_APP_DATA]
if ATTR_PUSH_TOKEN in app_data and ATTR_PUSH_URL in app_data:
device_name = data[ATTR_DEVICE_NAME]
if device_name in targets:
_LOGGER.warning("Found duplicate device name %s", device_name)
continue
targets[device_name] = webhook_id
return targets
# pylint: disable=invalid-name
def log_rate_limits(hass, device_name, resp, level=logging.INFO):
"""Output rate limit log line at given level."""
if ATTR_PUSH_RATE_LIMITS not in resp:
return
rate_limits = resp[ATTR_PUSH_RATE_LIMITS]
resetsAt = rate_limits[ATTR_PUSH_RATE_LIMITS_RESETS_AT]
resetsAtTime = dt_util.parse_datetime(resetsAt) - dt_util.utcnow()
rate_limit_msg = (
"mobile_app push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s"
)
_LOGGER.log(
level,
rate_limit_msg,
device_name,
rate_limits[ATTR_PUSH_RATE_LIMITS_SUCCESSFUL],
rate_limits[ATTR_PUSH_RATE_LIMITS_MAXIMUM],
rate_limits[ATTR_PUSH_RATE_LIMITS_ERRORS],
str(resetsAtTime).split(".")[0],
)
async def async_get_service(hass, config, discovery_info=None):
"""Get the mobile_app notification service."""
session = async_get_clientsession(hass)
return MobileAppNotificationService(session)
class MobileAppNotificationService(BaseNotificationService):
"""Implement the notification service for mobile_app."""
def __init__(self, session):
"""Initialize the service."""
self._session = session
@property
def targets(self):
"""Return a dictionary of registered targets."""
return push_registrations(self.hass)
async def async_send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
if kwargs.get(ATTR_TITLE) is not None:
# Remove default title from notifications.
if kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT:
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = push_registrations(self.hass).values()
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
for target in targets:
entry = self.hass.data[DOMAIN][DATA_CONFIG_ENTRIES][target]
entry_data = entry.data
app_data = entry_data[ATTR_APP_DATA]
push_token = app_data[ATTR_PUSH_TOKEN]
push_url = app_data[ATTR_PUSH_URL]
data[ATTR_PUSH_TOKEN] = push_token
reg_info = {
ATTR_APP_ID: entry_data[ATTR_APP_ID],
ATTR_APP_VERSION: entry_data[ATTR_APP_VERSION],
}
if ATTR_OS_VERSION in entry_data:
reg_info[ATTR_OS_VERSION] = entry_data[ATTR_OS_VERSION]
data["registration_info"] = reg_info
try:
with async_timeout.timeout(10):
response = await self._session.post(push_url, json=data)
result = await response.json()
if response.status in [HTTP_OK, HTTP_CREATED, HTTP_ACCEPTED]:
log_rate_limits(self.hass, entry_data[ATTR_DEVICE_NAME], result)
continue
fallback_error = result.get("errorMessage", "Unknown error")
fallback_message = (
f"Internal server error, please try again later: {fallback_error}"
)
message = result.get("message", fallback_message)
if "message" in result:
if message[-1] not in [".", "?", "!"]:
message += "."
message += (
" This message is generated externally to Home Assistant."
)
if response.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(message)
log_rate_limits(
self.hass, entry_data[ATTR_DEVICE_NAME], result, logging.WARNING
)
else:
_LOGGER.error(message)
except asyncio.TimeoutError:
_LOGGER.error("Timeout sending notification to %s", push_url)
|
the-stack_106_21858
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch XLM RoBERTa xl,xxl model."""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_xlm_roberta_xl import XLMRobertaXLConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "xlm-roberta-xlarge"
_CONFIG_FOR_DOC = "XLMRobertaXLConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/xlm-roberta-xl",
"facebook/xlm-roberta-xxl",
# See all RoBERTa models at https://huggingface.co/models?filter=xlm-roberta-xl
]
class XLMRobertaXLEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_inputs_embeds
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->XLMRobertaXL
class XLMRobertaXLSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in XLMRobertaXLModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class XLMRobertaXLSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class XLMRobertaXLAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self = XLMRobertaXLSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = XLMRobertaXLSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
intermediate = self.self_attn_layer_norm(hidden_states)
self_outputs = self.self(
intermediate,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class XLMRobertaXLIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class XLMRobertaXLOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class XLMRobertaXLLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = XLMRobertaXLAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = XLMRobertaXLAttention(config, position_embedding_type="absolute")
self.intermediate = XLMRobertaXLIntermediate(config)
self.output = XLMRobertaXLOutput(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.LayerNorm(attention_output)
intermediate_output = self.intermediate(intermediate_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class XLMRobertaXLEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([XLMRobertaXLLayer(config) for _ in range(config.num_hidden_layers)])
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
hidden_states = self.LayerNorm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class XLMRobertaXLPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class XLMRobertaXLPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = XLMRobertaXLConfig
base_model_prefix = "roberta"
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def update_keys_to_ignore(self, config, del_keys_to_ignore):
"""Remove some keys from ignore list"""
if not config.tie_word_embeddings:
# must make a new list, or the class variable gets modified!
self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
self._keys_to_ignore_on_load_missing = [
k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
]
XLM_ROBERTA_XL_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config ([`XLMRobertaXLConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
XLM_ROBERTA_XL_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`RobertaTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare XLM-RoBERTa-xlarge Model transformer outputting raw hidden-states without any specific head on top.",
XLM_ROBERTA_XL_START_DOCSTRING,
)
class XLMRobertaXLModel(XLMRobertaXLPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in *Attention is
all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder`
argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with
both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as
an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->XLMRobertaXL
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = XLMRobertaXLEmbeddings(config)
self.encoder = XLMRobertaXLEncoder(config)
self.pooler = XLMRobertaXLPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""XLM-RoBERTa-xlarge Model with a `language modeling` head on top for CLM fine-tuning.""",
XLM_ROBERTA_XL_START_DOCSTRING,
)
class XLMRobertaXLForCausalLM(XLMRobertaXLPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
self.lm_head = XLMRobertaXLLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
Example:
```python
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained("roberta-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings(
"""XLM-RoBERTa-xlarge Model with a `language modeling` head on top.""", XLM_ROBERTA_XL_START_DOCSTRING
)
class XLMRobertaXLForMaskedLM(XLMRobertaXLPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
self.lm_head = XLMRobertaXLLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class XLMRobertaXLLMHead(nn.Module):
"""XLM-Roberta-xlarge Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top
of the pooled output) e.g. for GLUE tasks.
""",
XLM_ROBERTA_XL_START_DOCSTRING,
)
class XLMRobertaXLForSequenceClassification(XLMRobertaXLPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
self.classifier = XLMRobertaXLClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
XLM-Roberta-xlarge Model with a multiple choice classification head on top (a linear layer on top of the pooled
output and a softmax) e.g. for RocStories/SWAG tasks.
""",
XLM_ROBERTA_XL_START_DOCSTRING,
)
class XLMRobertaXLForMultipleChoice(XLMRobertaXLPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = XLMRobertaXLModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
XLM-Roberta-xlarge Model with a token classification head on top (a linear layer on top of the hidden-states
output) e.g. for Named-Entity-Recognition (NER) tasks.
""",
XLM_ROBERTA_XL_START_DOCSTRING,
)
class XLMRobertaXLForTokenClassification(XLMRobertaXLPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class XLMRobertaXLClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
XLM-Roberta-xlarge Model with a span classification head on top for extractive question-answering tasks like SQuAD
(a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
XLM_ROBERTA_XL_START_DOCSTRING,
)
class XLMRobertaXLForQuestionAnswering(XLMRobertaXLPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(XLM_ROBERTA_XL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
the-stack_106_21859
|
# coding: utf-8
# Like v2, and in contrast to v1, this version removes the cumprod from the forward pass
# In addition, it uses a different conditional loss function compared to v2.
# Here, the loss is computed as the average loss of the total samples,
# instead of firstly averaging the cross entropy inside each task and then averaging over tasks equally.
# The weight of each task will be adjusted
# for the sample size used for training each task naturally without manually setting the weights.
# Imports
import os
import json
import pandas as pd
import time
import torch
import torch.nn as nn
import argparse
import sys
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
# ### from local .py files
from helper_files.trainingeval import (iteration_logging, epoch_logging,
aftertraining_logging, save_predictions,
create_logfile)
from helper_files.trainingeval import compute_per_class_mae, compute_selfentropy_for_mae
from helper_files.resnet34 import BasicBlock
from helper_files.dataset import levels_from_labelbatch
from helper_files.losses import loss_conditional_v2
from helper_files.helper import set_all_seeds, set_deterministic
from helper_files.plotting import plot_training_loss, plot_mae, plot_accuracy
from helper_files.plotting import plot_per_class_mae
from helper_files.dataset import get_labels_from_loader
from helper_files.parser import parse_cmdline_args
# Argparse helper
parser = argparse.ArgumentParser()
args = parse_cmdline_args(parser)
##########################
# Settings and Setup
##########################
NUM_WORKERS = args.numworkers
LEARNING_RATE = args.learningrate
NUM_EPOCHS = args.epochs
BATCH_SIZE = args.batchsize
SKIP_TRAIN_EVAL = args.skip_train_eval
SAVE_MODELS = args.save_models
if args.cuda >= 0 and torch.cuda.is_available():
DEVICE = torch.device(f'cuda:{args.cuda}')
else:
DEVICE = torch.device('cpu')
if args.seed == -1:
RANDOM_SEED = None
else:
RANDOM_SEED = args.seed
PATH = args.outpath
if not os.path.exists(PATH):
os.mkdir(PATH)
cuda_device = DEVICE
if torch.cuda.is_available():
cuda_version = torch.version.cuda
else:
cuda_version = 'NA'
info_dict = {
'settings': {
'script': os.path.basename(__file__),
'pytorch version': torch.__version__,
'cuda device': str(cuda_device),
'cuda version': cuda_version,
'random seed': RANDOM_SEED,
'learning rate': LEARNING_RATE,
'num epochs': NUM_EPOCHS,
'batch size': BATCH_SIZE,
'output path': PATH,
'training logfile': os.path.join(PATH, 'training.log')}
}
create_logfile(info_dict)
# Deterministic CUDA & cuDNN behavior and random seeds
#set_deterministic()
set_all_seeds(RANDOM_SEED)
###################
# Dataset
###################
if args.dataset == 'mnist':
from helper_files.constants import MNIST_INFO as DATASET_INFO
from torchvision.datasets import MNIST as PyTorchDataset
from helper_files.dataset import mnist_train_transform as train_transform
from helper_files.dataset import mnist_validation_transform as validation_transform
elif args.dataset == 'morph2':
from helper_files.constants import MORPH2_INFO as DATASET_INFO
from helper_files.dataset import Morph2Dataset as PyTorchDataset
from helper_files.dataset import morph2_train_transform as train_transform
from helper_files.dataset import morph2_validation_transform as validation_transform
elif args.dataset == 'morph2-balanced':
from helper_files.constants import MORPH2_BALANCED_INFO as DATASET_INFO
from helper_files.dataset import Morph2Dataset as PyTorchDataset
from helper_files.dataset import morph2_train_transform as train_transform
from helper_files.dataset import morph2_validation_transform as validation_transform
elif args.dataset == 'afad-balanced':
from helper_files.constants import AFAD_BALANCED_INFO as DATASET_INFO
from helper_files.dataset import AFADDataset as PyTorchDataset
from helper_files.dataset import afad_train_transform as train_transform
from helper_files.dataset import afad_validation_transform as validation_transform
elif args.dataset == 'aes':
from helper_files.constants import AES_INFO as DATASET_INFO
from helper_files.dataset import AesDataset as PyTorchDataset
from helper_files.dataset import aes_train_transform as train_transform
from helper_files.dataset import aes_validation_transform as validation_transform
else:
raise ValueError('Dataset choice not supported')
###################
# Dataset
###################
if args.dataset == 'mnist':
NUM_CLASSES = 10
GRAYSCALE = True
RESNET34_AVGPOOLSIZE = 1
train_dataset = PyTorchDataset(root='./datasets',
train=True,
download=True,
transform=train_transform())
valid_dataset = PyTorchDataset(root='./datasets',
train=True,
transform=validation_transform(),
download=False)
test_dataset = PyTorchDataset(root='./datasets',
train=False,
transform=validation_transform(),
download=False)
train_indices = torch.arange(1000, 60000)
valid_indices = torch.arange(0, 1000)
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=False, # SubsetRandomSampler shuffles
drop_last=True,
num_workers=NUM_WORKERS,
sampler=train_sampler)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
sampler=valid_sampler)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
else:
GRAYSCALE = False
RESNET34_AVGPOOLSIZE = 4
if args.dataset_train_csv_path:
DATASET_INFO['TRAIN_CSV_PATH'] = args.dataset_train_csv_path
if args.dataset_valid_csv_path:
DATASET_INFO['VALID_CSV_PATH'] = args.dataset_valid_csv_path
if args.dataset_test_csv_path:
DATASET_INFO['TEST_CSV_PATH'] = args.dataset_test_csv_path
if args.dataset_img_path:
DATASET_INFO['IMAGE_PATH'] = args.dataset_img_path
df = pd.read_csv(DATASET_INFO['TRAIN_CSV_PATH'], index_col=0)
classes = df[DATASET_INFO['CLASS_COLUMN']].values
del df
train_labels = torch.tensor(classes, dtype=torch.float)
NUM_CLASSES = torch.unique(train_labels).size()[0]
del classes
train_dataset = PyTorchDataset(csv_path=DATASET_INFO['TRAIN_CSV_PATH'],
img_dir=DATASET_INFO['IMAGE_PATH'],
transform=train_transform())
test_dataset = PyTorchDataset(csv_path=DATASET_INFO['TEST_CSV_PATH'],
img_dir=DATASET_INFO['IMAGE_PATH'],
transform=validation_transform())
valid_dataset = PyTorchDataset(csv_path=DATASET_INFO['VALID_CSV_PATH'],
img_dir=DATASET_INFO['IMAGE_PATH'],
transform=validation_transform())
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=NUM_WORKERS)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
info_dict['dataset'] = DATASET_INFO
info_dict['settings']['num classes'] = NUM_CLASSES
##########################
# MODEL
##########################
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, grayscale):
self.num_classes = num_classes
self.inplanes = 64
if grayscale:
in_dim = 1
else:
in_dim = 3
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(RESNET34_AVGPOOLSIZE)
self.fc = nn.Linear(512, (self.num_classes-1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
logits = logits.view(-1, (self.num_classes-1))
probas = torch.sigmoid(logits)
return logits, probas
def resnet34(num_classes, grayscale):
"""Constructs a ResNet-34 model."""
model = ResNet(block=BasicBlock,
layers=[3, 4, 6, 3],
num_classes=num_classes,
grayscale=grayscale)
return model
###########################################
# Initialize Cost, Model, and Optimizer
###########################################
model = resnet34(NUM_CLASSES, GRAYSCALE)
model.to(DEVICE)
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE,
momentum=0.9)
else:
raise ValueError('--optimizer must be "adam" or "sgd"')
if args.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',
verbose=True)
start_time = time.time()
best_mae, best_rmse, best_epoch = 999, 999, -1
info_dict['training'] = {
'num epochs': NUM_EPOCHS,
'iter per epoch': len(train_loader),
'minibatch loss': [],
'epoch train mae': [],
'epoch train rmse': [],
'epoch train acc': [],
'epoch valid mae': [],
'epoch valid rmse': [],
'epoch valid acc': [],
'best running mae': np.infty,
'best running rmse': np.infty,
'best running acc': 0.,
'best running epoch': -1
}
for epoch in range(1, NUM_EPOCHS+1):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
# FORWARD AND BACK PROP
logits, probas = model(features)
# ### Ordinal loss
loss = loss_conditional_v2(logits, targets, NUM_CLASSES)
# ##--------------------------------------------------------------------###
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ITERATION LOGGING
iteration_logging(info_dict=info_dict, batch_idx=batch_idx,
loss=loss, train_dataset=train_dataset,
frequency=50, epoch=epoch)
# EPOCH LOGGING
# function saves best model as best_model.pt
best_mae = epoch_logging(info_dict=info_dict,
model=model, train_loader=train_loader,
valid_loader=valid_loader,
which_model='conditional',
loss=loss, epoch=epoch, start_time=start_time,
skip_train_eval=SKIP_TRAIN_EVAL)
if args.scheduler:
scheduler.step(info_dict['training']['epoch valid rmse'][-1])
# ####### AFTER TRAINING EVALUATION
# function saves last model as last_model.pt
info_dict['last'] = {}
aftertraining_logging(model=model, which='last', info_dict=info_dict,
train_loader=train_loader,
valid_loader=valid_loader, test_loader=test_loader,
which_model='conditional',
start_time=start_time)
info_dict['best'] = {}
aftertraining_logging(model=model, which='best', info_dict=info_dict,
train_loader=train_loader,
valid_loader=valid_loader, test_loader=test_loader,
which_model='conditional',
start_time=start_time)
# ######### MAKE PLOTS ######
plot_training_loss(info_dict=info_dict, averaging_iterations=100)
plot_mae(info_dict=info_dict)
plot_accuracy(info_dict=info_dict)
# ######### PER-CLASS MAE PLOT #######
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
drop_last=False,
num_workers=NUM_WORKERS)
for best_or_last in ('best', 'last'):
model.load_state_dict(torch.load(
os.path.join(info_dict['settings']['output path'], f'{best_or_last}_model.pt')))
names = {0: 'train',
1: 'test'}
for i, data_loader in enumerate([train_loader, test_loader]):
true_labels = get_labels_from_loader(data_loader)
# ######### SAVE PREDICTIONS ######
all_probas, all_predictions = save_predictions(model=model,
which=best_or_last,
which_model='conditional',
info_dict=info_dict,
data_loader=data_loader,
prefix=names[i])
errors, counts = compute_per_class_mae(actual=true_labels.numpy(),
predicted=all_predictions.numpy())
info_dict[f'per-class mae {names[i]} ({best_or_last} model)'] = errors
#actual_selfentropy_best, best_selfentropy_best =\
# compute_selfentropy_for_mae(errors_best)
#info_dict['test set mae self-entropy'] = actual_selfentropy_best.item()
#info_dict['ideal test set mae self-entropy'] = best_selfentropy_best.item()
plot_per_class_mae(info_dict)
# ######## CLEAN UP ########
json.dump(info_dict, open(os.path.join(PATH, 'info_dict.json'), 'w'), indent=4)
if not SAVE_MODELS:
os.remove(os.path.join(PATH, 'best_model.pt'))
os.remove(os.path.join(PATH, 'last_model.pt'))
|
the-stack_106_21864
|
from datetime import datetime, timedelta
from time import sleep
import requests
import napalm
from napalm.base.exceptions import NapalmException
import yaml
import socket
import time
import re
MONITOR_INTERVAL = 15
DISCOVERY_INTERVAL = 300
def get_version(device, facts):
if device["os"] == "iosxe":
re_version_pattern = r"Version (.*),"
version_match = re.search(re_version_pattern, facts["os_version"])
if version_match:
return version_match.group(1)
else:
return "--"
return facts["os_version"]
def get_devices():
print("\n\n----> Retrieving devices ...", end="")
response = requests.get("http://127.0.0.1:5001/devices")
if response.status_code != 200:
print(f" !!! Failed to retrieve devices from server: {response.reason}")
return {}
print(" Devices successfully retrieved")
return response.json()
def discovery():
# 'discovery' of devices means reading them from the devices.yaml file
print(
"\n\n----- Discovery devices from inventory ---------------------"
)
with open("devices.yaml", "r") as yaml_in:
yaml_devices = yaml_in.read()
devices = yaml.safe_load(yaml_devices)
existing_devices = get_devices()
for device in devices:
try:
device["ip_address"] = socket.gethostbyname(device["hostname"])
except (socket.error, socket.gaierror) as e:
print(f" !!! Error attempting to get ip address for device {device['hostname']}: {e}")
device["ip_address"] = ""
if device["name"] in existing_devices:
existing_devices[device["name"]]["ip_address"] = device["ip_address"]
device = existing_devices[device["name"]]
else:
device["availability"] = False
device["response_time"] = 0.0
device["model"] = ""
device["os_version"] = ""
device["last_heard"] = ""
update_device(device)
def update_device(device):
print(f"----> Updating device status via REST API: {device['name']}", end="")
rsp = requests.put("http://127.0.0.1:5001/devices", params={"name": device["name"]}, json=device)
if rsp.status_code != 204:
print(
f"{str(datetime.now())[:-3]}: Error posting to /devices, response: {rsp.status_code}, {rsp.content}"
)
print(f" !!! Unsuccessful attempt to update device status via REST API: {device['name']}")
else:
print(f" Successfully updated device status via REST API: {device['name']}")
def get_device_facts(device):
try:
if device["os"] == "ios" or device["os"] == "iosxe":
driver = napalm.get_network_driver("ios")
elif device["os"] == "nxos-ssh":
driver = napalm.get_network_driver("nxos_ssh")
elif device["os"] == "nxos":
driver = napalm.get_network_driver("nxos")
else:
driver = napalm.get_network_driver(device["os"]) # try this, it will probably fail
napalm_device = driver(
hostname=device["hostname"],
username=device["username"],
password=device["password"],
optional_args={"port": device["ssh_port"]},
)
napalm_device.open()
time_start = time.time()
facts = napalm_device.get_facts()
response_time = time.time() - time_start
device["os_version"] = get_version(device, facts)
device["model"] = facts["model"]
device["availability"] = True
device["response_time"] = response_time
device["last_heard"] = str(datetime.now())[:-3]
except BaseException as e:
print(f" !!! Failed to get facts for device {device['name']}: {e}")
device["availability"] = False
def main():
last_discovery = datetime.now()-timedelta(days=1)
while True:
if (datetime.now() - last_discovery).total_seconds() > DISCOVERY_INTERVAL:
discovery()
last_discovery = datetime.now()
devices = get_devices()
for device in devices.values():
get_device_facts(device)
update_device(device)
sleep(MONITOR_INTERVAL)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n\nExiting device-monitor")
exit()
|
the-stack_106_21866
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0010_change_page_owner_to_null_on_delete'),
]
operations = [
migrations.CreateModel(
name='WelcomePage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WelcomePageEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('title', models.CharField(max_length=255, null=True, blank=True)),
('url', models.CharField(max_length=4096, null=True, blank=True)),
('show_divider_underneath', models.BooleanField(default=False)),
('page', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailcore.Page', null=True)),
('welcome_page', modelcluster.fields.ParentalKey(related_name='wtf', to='welcome_snippet.WelcomePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
]
|
the-stack_106_21867
|
"""
================================
LASA Handwriting with ProMPs
================================
The LASA Handwriting dataset learned with ProMPs. The dataset consists of
2D handwriting motions. The first and third column of the plot represent
demonstrations and the second and fourth column show the imitated ProMPs
with covariances.
"""
import numpy as np
from bolero.datasets import load_lasa
from bolero.representation import ProMPBehavior
from bolero.representation.promp_behavior import plot_covariance
import matplotlib.pyplot as plt
print(__doc__)
def load(idx):
X, _, _, _, _ = load_lasa(idx)
y = X.transpose(2, 1, 0)
x = np.linspace(0, 1, 1000)
return (x, y)
def learn(x, y):
traj = ProMPBehavior(
1.0, 1.0 / 999.0, n_features, learn_covariance=True, use_covar=True)
traj.init(4, 4)
traj.imitate(y.transpose(2, 1, 0))
return traj
def draw(x, y, traj, idx, axs):
h = int(idx / width)
w = int(idx % width) * 2
axs[h, w].plot(y.transpose(2, 1, 0)[0], y.transpose(2, 1, 0)[1])
mean, _, covar = traj.trajectory()
axs[h, w + 1].plot(mean[:, 0], mean[:, 1])
plot_covariance(axs[h, w + 1], mean, covar.reshape(-1, 4, 4))
axs[h, w + 1].set_xlim(axs[h, w].get_xlim())
axs[h, w + 1].set_ylim(axs[h, w].get_ylim())
axs[h, w].get_yaxis().set_visible(False)
axs[h, w].get_xaxis().set_visible(False)
axs[h, w + 1].get_yaxis().set_visible(False)
axs[h, w + 1].get_xaxis().set_visible(False)
n_features = 30 # how many weights shall be used
num_shapes = 10
width = 2
height = 5
fig, axs = plt.subplots(int(height), int(width * 2))
for i in range(num_shapes):
x, y = load(i)
traj = learn(x, y)
draw(x, y, traj, i, axs)
plt.show()
|
the-stack_106_21868
|
import argparse
import os
from dataManipulation import *
from utils import summary, summary_raw, get_support_from_mcmc
from vbpi import VBPI
from utils import namenum
import time
import torch
import numpy as np
import datetime
import math
parser = argparse.ArgumentParser()
######### Load arguments
parser.add_argument('--date', default=None, help='Specify the experiment date. i.e. 2021-05-17')
######### Data arguments
parser.add_argument('--dataset', required=True, help=' DS1 | DS2 | DS3 | DS4 | DS5 | DS6 | DS7 | DS8 ')
parser.add_argument('--empFreq', default=False, action='store_true', help='emprical frequence for KL computation')
######### Model arguments
parser.add_argument('--flow_type', type=str, default='identity', help=' identity | planar | realnvp ')
parser.add_argument('--psp', type=bool, default=True, help=' turn on psp branch length feature, default=True')
parser.add_argument('--nf', type=int, default=2, help=' branch length feature embedding dimension ')
parser.add_argument('--sh', type=list, default=[100], help=' list of the hidden layer sizes for permutation invariant flow ')
parser.add_argument('--Lnf', type=int, default=5, help=' number of layers for permutation invariant flow ')
######### Optimizer arguments
parser.add_argument('--stepszTree', type=float, default=0.001, help=' step size for tree topology parameters ')
parser.add_argument('--stepszBranch', type=float, default=0.001, help=' stepsz for branch length parameters ')
parser.add_argument('--maxIter', type=int, default=400000, help=' number of iterations for training, default=400000')
parser.add_argument('--invT0', type=float, default=0.001, help=' initial inverse temperature for annealing schedule, default=0.001')
parser.add_argument('--nwarmStart', type=float, default=100000, help=' number of warm start iterations, default=100000')
parser.add_argument('--nParticle', type=int, default=10, help='number of particles for variational objectives, default=10')
parser.add_argument('--ar', type=float, default=0.75, help='step size anneal rate, default=0.75')
parser.add_argument('--af', type=int, default=20000, help='step size anneal frequency, default=20000')
parser.add_argument('--tf', type=int, default=1000, help='monitor frequency during training, default=1000')
parser.add_argument('--lbf', type=int, default=5000, help='lower bound test frequency, default=5000')
parser.add_argument('--seed_val', type=int, default=11, help='seed value')
args = parser.parse_args()
args.result_folder = 'results/' + args.dataset
if not os.path.exists(args.result_folder):
os.makedirs(args.result_folder)
args.save_to_path = args.result_folder + '/'
if args.flow_type != 'identity':
args.save_to_path = args.save_to_path + args.flow_type + '_' + str(args.Lnf)
else:
args.save_to_path += 'base'
args.save_to_path = args.save_to_path + '_' + str(datetime.date.today()) + '.pt'
print('Model with the following settings: {}'.format(args))
###### Load Data
print('\nLoading Data set: {} ......'.format(args.dataset))
run_time = -time.time()
tree_dict_ufboot, tree_names_ufboot = summary_raw(args.dataset, 'data/' + args.dataset + '/ufboot/')
data, taxa = loadData('data/' + args.dataset + '/' + args.dataset + '.fasta', 'fasta')
run_time += time.time()
print('Support loaded in {:.1f} seconds'.format(run_time))
rootsplit_supp_dict, subsplit_supp_dict = get_support_from_mcmc(taxa, tree_dict_ufboot, tree_names_ufboot)
del tree_dict_ufboot, tree_names_ufboot
L = 1000 # n_particles
model_list = []
for model_seed in [4, 15, 23, 42, 108]:
model = VBPI(taxa, rootsplit_supp_dict, subsplit_supp_dict, data, pden=np.ones(4)/4., subModel=('JC', 1.0),
emp_tree_freq=None, feature_dim=args.nf, hidden_sizes=args.sh, num_of_layers_nf=args.Lnf,
flow_type=args.flow_type)
# Load Model State and Sample Trees
model_fname = args.result_folder + "/" + "m" + str(model_seed) + "_" + args.flow_type + '_' + str(args.Lnf) + ".pt"
model.load_from(model_fname)
model_list.append(model)
S = len(model_list)
print('Models are loaded. \tNumber of models (S): ', len(model_list), "\tNumber of particles (L): ", L)
########
t_start = time.time()
print("\nSampling trees and branch lengths...")
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
########
t_start = time.time()
print("\nCalculating IWELBO Original...")
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
lower_bounds = []
for s in range(S):
cur_model = model_list[s]
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
lb = cur_model.lower_bound(n_particles=L, n_runs=1)
lower_bounds.append(lb)
iwelbo_orig = np.mean(lower_bounds)
print("Time taken by lower bound computation: ", str(time.time() - t_start))
print("IWELBO: ", iwelbo_orig)
print("Mean LB: ", np.mean(lower_bounds), "\tStd LB: ", np.std(lower_bounds),
"\tMin LB: ", np.min(lower_bounds), "\tMax LB: ", np.max(lower_bounds))
print("LBs: ", lower_bounds)
########
t_start = time.time()
print("\nSampling trees and branch lengths...")
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
lower_bounds = []
for s in range(S):
cur_model = model_list[s]
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
samp_trees, samp_log_branch, log_q_branch, logq_tree, log_q_joint, log_ll, log_p_prior, log_p_joint \
= cur_model.sample_trees_and_branch_lengths(n_particles=L)
lower_bounds.append(torch.logsumexp(log_ll + log_p_prior + cur_model.log_p_tau - log_q_branch - logq_tree - math.log(L), 0).item())
iwelbo = np.mean(lower_bounds)
print("Time taken by lower bound computation: ", str(time.time() - t_start))
print("IWELBO: ", iwelbo)
print("Mean LB: ", np.mean(lower_bounds), "\tStd LB: ", np.std(lower_bounds),
"\tMin LB: ", np.min(lower_bounds), "\tMax LB: ", np.max(lower_bounds))
print("LBs: ", lower_bounds)
########
t_start = time.time()
print("\nCalculating IWELBO Presampled...")
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
# Sample trees and get corresponding branch length parameters
tree_list, params_list, samp_log_branch_base_list = [], [], []
final_samp_log_branch_list, final_logq_branch_list = [], []
log_p_joint_list = []
log_ll_list, log_p_prior_list = [], []
for s in range(S):
cur_model = model_list[s]
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
# Sample Trees
samp_trees = [cur_model.tree_model.sample_tree() for particle in range(L)]
[namenum(tree, cur_model.taxa) for tree in samp_trees]
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
samp_log_branch, logq_branch, neigh_ss_idxes, mean, std \
= cur_model.branch_model.mean_std_encoder(samp_trees, return_details=True)
tree_list.append(samp_trees)
params_list.append([mean, std, neigh_ss_idxes])
samp_log_branch_base_list.append(samp_log_branch)
samp_log_branch, logq_branch = cur_model.branch_model.invariant_flow(samp_log_branch, logq_branch, neigh_ss_idxes)
final_samp_log_branch_list.append(samp_log_branch)
final_logq_branch_list.append(logq_branch)
log_ll = torch.stack([cur_model.phylo_model.loglikelihood(log_branch, tree)
for log_branch, tree in zip(*[samp_log_branch, samp_trees])])
logp_prior = cur_model.phylo_model.logprior(samp_log_branch)
log_p_joint_list.append(log_ll + logp_prior + cur_model.log_p_tau)
log_ll_list.append(log_ll)
log_p_prior_list.append(logp_prior)
print("Time taken by sampling: ", str(time.time() - t_start))
# Evaluate the lower bound V1 (using final values)
t_start = time.time()
lower_bounds = []
for s in range(S):
cur_model = model_list[s]
samp_trees = tree_list[s]
samp_log_branch = final_samp_log_branch_list[s]
logq_branch = final_logq_branch_list[s]
logq_tree = torch.stack([cur_model.logq_tree(tree) for tree in samp_trees])
log_ll = torch.stack([cur_model.phylo_model.loglikelihood(log_branch, tree)
for log_branch, tree in zip(*[samp_log_branch, samp_trees])])
log_p_prior = cur_model.phylo_model.logprior(samp_log_branch)
lower_bounds.append(torch.logsumexp(log_ll + log_p_prior + cur_model.log_p_tau - logq_tree - logq_branch - math.log(L), 0).item())
iwelbo = np.mean(lower_bounds)
print("Time taken by lower bound computation: ", str(time.time() - t_start))
print("IWELBO: ", iwelbo)
print("Mean LB: ", np.mean(lower_bounds), "\tStd LB: ", np.std(lower_bounds),
"\tMin LB: ", np.min(lower_bounds), "\tMax LB: ", np.max(lower_bounds))
print("LBs: ", lower_bounds)
# Evaluate the lower bound V2 (use base values to reach final values)
lower_bounds = []
with torch.no_grad():
for s in range(S):
cur_model = model_list[s]
samp_trees = tree_list[s]
mean, std, neigh_ss_idxes = params_list[s]
samp_log_branch_base = samp_log_branch_base_list[s]
log_p_joint = log_p_joint_list[s]
logq_branch = torch.sum(-0.5 * math.log(2 * math.pi) - std - (0.5 * (samp_log_branch_base - mean) ** 2) / (std.exp() ** 2), -1)
samp_log_branch, logq_branch = cur_model.branch_model.invariant_flow(samp_log_branch_base, logq_branch, neigh_ss_idxes)
logq_tree = torch.stack([cur_model.logq_tree(tree) for tree in samp_trees])
log_q_joint = logq_tree + logq_branch
lower_bounds.append(torch.logsumexp(log_p_joint - log_q_joint - math.log(L), 0).item())
iwelbo = np.mean(lower_bounds)
print("Time taken by lower bound computation: ", str(time.time() - t_start))
print("IWELBO: ", iwelbo)
print("Mean LB: ", np.mean(lower_bounds), "\tStd LB: ", np.std(lower_bounds),
"\tMin LB: ", np.min(lower_bounds), "\tMax LB: ", np.max(lower_bounds))
print("LBs: ", lower_bounds)
########
t_start = time.time()
print("\nCalculating MISELBO Presampled...")
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
lower_bounds = []
with torch.no_grad():
for s in range(S):
cur_model = model_list[s]
samp_trees = tree_list[s]
samp_log_branch_base = samp_log_branch_base_list[s]
log_p_joint = log_p_joint_list[s]
log_q_part = []
for s_prime in range(S):
cur_model_s_prime = model_list[s_prime]
mean, std, neigh_ss_idxes = params_list[s_prime]
logq_branch = torch.sum(-0.5 * math.log(2 * math.pi) - std - (0.5 * (samp_log_branch_base - mean) ** 2) / (std.exp() ** 2), -1)
samp_log_branch, logq_branch = cur_model_s_prime.branch_model.invariant_flow(samp_log_branch_base, logq_branch, neigh_ss_idxes)
logq_tree = torch.stack([cur_model_s_prime.logq_tree(tree) for tree in samp_trees])
log_q_joint = logq_tree + logq_branch
log_q_part.append(log_q_joint)
denominator = torch.logsumexp(torch.stack(log_q_part) - math.log(S), 0)
lower_bounds.append(torch.logsumexp(log_p_joint - denominator - math.log(L), 0).item())
miselbo = np.mean(lower_bounds)
print("Time taken by lower bound computation: ", str(time.time() - t_start))
print("MISELBO: ", miselbo)
print("Mean LB: ", np.mean(lower_bounds), "\tStd LB: ", np.std(lower_bounds),
"\tMin LB: ", np.min(lower_bounds), "\tMax LB: ", np.max(lower_bounds))
print("LBs: ", lower_bounds)
#########
print("\nSummary:")
print("\tFinal IWELBO: ", iwelbo)
print("\tFinal MISELBO: ", miselbo)
print("\tDifference (MISELBO - IWELBO): ", miselbo - iwelbo)
if miselbo - iwelbo < 0:
print("WARNING! The difference is negative! ", miselbo - iwelbo)
print("\n\tIWELBO_orig: ", iwelbo_orig)
print("\tDifference (IWELBO_orig - IWELBO): ", iwelbo_orig - iwelbo)
if iwelbo_orig - iwelbo != 0:
print("WARNING! The difference is non-zero! ", iwelbo_orig - iwelbo)
########
t_start = time.time()
print("\nCalculating JSD Presampled...")
torch.manual_seed(args.seed_val)
np.random.seed(args.seed_val)
lower_bounds = []
with torch.no_grad():
for s in range(S):
cur_model = model_list[s]
samp_trees = tree_list[s]
samp_log_branch_base = samp_log_branch_base_list[s]
log_qs = 0
log_q_part = []
for s_prime in range(S):
cur_model_s_prime = model_list[s_prime]
mean, std, neigh_ss_idxes = params_list[s_prime]
logq_branch = torch.sum(-0.5 * math.log(2 * math.pi) - std - (0.5 * (samp_log_branch_base - mean) ** 2) / (std.exp() ** 2), -1)
samp_log_branch, logq_branch = cur_model_s_prime.branch_model.invariant_flow(samp_log_branch_base, logq_branch, neigh_ss_idxes)
logq_tree = torch.stack([cur_model_s_prime.logq_tree(tree) for tree in samp_trees])
log_q_joint = logq_tree + logq_branch
log_q_part.append(log_q_joint)
if s_prime == s:
log_qs = log_q_joint
log_q_mixture = torch.logsumexp(torch.stack(log_q_part), 0)
lower_bounds.append(torch.mean(log_qs - log_q_mixture).item())
jsd = np.log(S) + np.mean(lower_bounds)
print("Time taken by lower bound computation: ", str(time.time() - t_start))
print("JSD: ", jsd)
print("Range: [0, ", np.log(S), "]")
if jsd < 0 or jsd > np.log(S):
print("WARNING! The JSD is out of range! ", jsd, ". [0, ", np.log(S), "]")
print("LBs: ", lower_bounds)
|
the-stack_106_21869
|
#!/usr/bin/env python3
# ver 0.1 - coding python by Hyuntae Jung on 4/1/2018
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='average files from .npy or text files which are the same size like (value, *)s' )
## args
parser.add_argument('-i', '--input', nargs='?',
help='input 1D profile file generated by .txt, .npy, else')
parser.add_argument('-num', '--number', default=0, nargs='?', type=int,
help='index of files like args.input.[0:N-1]')
parser.add_argument('-o', '--output', default='INPUT', nargs='?',
help='output file of averaged 1D profile (.avg)')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.2')
# read args
args = parser.parse_args()
# check args
print(" input arguments: {0}".format(args))
## import modules
import hjung
from hjung import *
import numpy as np
import copy
# default for args
if 'INPUT' in args.output:
new_name = args.input.replace('.npy','')
new_name = new_name.replace('.txt','')
args.output = new_name + '.avg'
else:
args.output = args.output + '.avg'
## timer
start_proc, start_prof = hjung.time.init()
## read input file
for ifile in range(args.number):
filename = args.input + '.' + str(ifile)
if '.npy' in filename:
data = np.load(filename)
else:
data = np.loadtxt(filename)
#print(data.shape)
#print(len(data))
if 'data_collect' not in locals():
data_collect = np.zeros((args.number,len(data))) # we only use numbers in the first column
data_collect[ifile] = copy.copy(data[:,0])
data_mean = np.mean(data_collect,axis=0)
data_std = np.std(data_collect,axis=0)
data_prof = np.column_stack((data_mean,data_std))
## save averaged profile
print("="*30)
np.savetxt(args.output, data_prof, fmt='%f')
np.save(args.output, data_prof)
print("Finished saving averages.")
## timer
hjung.time.end_print(start_proc, start_prof)
|
the-stack_106_21870
|
#!/usr/local/bin/python3
"""
Unicoder
Version:
1.0.1
Copyright:
2019, Tony Smith (@smittytone)
License:
MIT (terms attached to this repo)
"""
##########################################################################
# Program library imports #
##########################################################################
import sys
##########################################################################
# Application-specific constants #
##########################################################################
APP_VERSION = "1.0.1"
##########################################################################
# Functions #
##########################################################################
def de_code(code, mode):
"""
Process the specified code.
Args:
code (str): The UTF-8 character code (eg. 'U+10348')
Returns:
bool: Whether the op was successful (True) or not (False)
"""
if code:
# Remove the 'U+', if present
code_head = code[:2]
if code_head == "U+": code = code[2:]
# Pad the hex with zeroes as needed
if len(code) % 2 != 0: code = "0" + code
if len(code) == 2: code = "00" + code
# How many bytes are required for the string?
num_bytes = 1
code_val = int(code, 16)
if 0x80 <= code_val <= 0x7FFF: num_bytes = 2
if 0x800 <= code_val <= 0xFFFF: num_bytes = 3
if code_val >= 0x10000: num_bytes = 4
if code_val >= 0x10FFFF:
print("ERROR -- Invalid UTF-8 code supplied (U+" + code + ")")
return False
if num_bytes == 1:
byte_1 = int(code[-2:], 16)
print(output([byte_1], mode))
if num_bytes == 2:
byte_1 = 0xC0 | ((code_val & 0xF0) >> 6)
byte_2 = 0x80 | (code_val & 0x3F)
print(output([byte_1, byte_2], mode))
if num_bytes == 3:
byte_1 = 0xE0 | ((code_val & 0xF000) >> 12)
byte_2 = 0x80 | ((code_val & 0x0FC0) >> 6)
byte_3 = 0x80 | (code_val & 0x3F)
print(output([byte_1, byte_2, byte_3], mode))
if num_bytes == 4:
byte_1 = 0xF0 | ((code_val & 0x1C0000) >> 19)
byte_2 = 0x80 | ((code_val & 0x03F000) >> 12)
byte_3 = 0x80 | ((code_val & 0x000FC0) >> 6)
byte_4 = 0x80 | (code_val & 0x3F)
print(output([byte_1, byte_2, byte_3, byte_4], mode))
return True
return False
def output(the_bytes, as_squirrel=True):
"""
Format the output string.
Args:
the_bytes (list): The individual integer byte values.
as_squirrel (bool): Should we output as Squirrel code? Default: True
Returns:
str: The formatted output.
"""
out_str = "local unicodeString=\""
end_str = "\";"
if as_squirrel is False:
out_str = ""
end_str = ""
for a_byte in the_bytes:
out_str += (("\\x" if as_squirrel is True else "") + "{0:02X}".format(a_byte))
return out_str + end_str
def show_help():
'''
Display Unicoder's help information.
'''
print("Unicoder " + APP_VERSION)
print(" ")
print("Unicoder converts UTF-8 character codes, eg. 'U+20AC' for €, to hex strings")
print("that can be transferred between systems, eg. in JSON.")
print(" ")
print("Usage:")
print(" unicoder.py [-h][-j] [<UTF-8_code_1> <UTF-8_code_2> ... <UTF-8_code_n>]")
print(" ")
print("Options:")
print(" -j / --justhex - Print output as plain hex values, eg. 'EA20AC'.")
print(" -h / --help - Print Unicoder help information (this screen).")
print(" ")
##########################################################################
# Main entry point #
##########################################################################
if __name__ == '__main__':
if len(sys.argv) > 1:
squirrel_mode = True
for index, item in enumerate(sys.argv):
if index > 0:
if item in ("-h", "--help"):
show_help()
sys.exit(0)
elif item in ("-j", "--justhex"):
squirrel_mode = False
elif item[0] == "-":
print("ERROR -- unknown option specified (" + item + ")")
sys.exit(1)
for index, item in enumerate(sys.argv):
if index > 0 and item[0] != "-":
result = de_code(item, squirrel_mode)
if result is False:
sys.exit(1)
else:
print("ERROR -- no UTF-8 chracter specified (eg. 'U+20AC')")
sys.exit(1)
|
the-stack_106_21871
|
# PyChain Ledger
################################################################################
# You’ll make the following updates to the provided Python file for this
# Challenge, which already contains the basic `PyChain` ledger structure that
# you created throughout the module:
# Step 1: Create a Record Data Class
# * Create a new data class named `Record`. This class will serve as the
# blueprint for the financial transaction records that the blocks of the ledger
# will store.
# Step 2: Modify the Existing Block Data Class to Store Record Data
# * Change the existing `Block` data class by replacing the generic `data`
# attribute with a `record` attribute that’s of type `Record`.
# Step 3: Add Relevant User Inputs to the Streamlit Interface
# * Create additional user input areas in the Streamlit application. These
# input areas should collect the relevant information for each financial record
# that you’ll store in the `PyChain` ledger.
# Step 4: Test the PyChain Ledger by Storing Records
# * Test your complete `PyChain` ledger.
################################################################################
# Imports
import streamlit as st
from dataclasses import dataclass
from typing import Any, List
import datetime as datetime
import pandas as pd
import hashlib
################################################################################
# Step 1:
# Create a Record Data Class
# Define a new Python data class named `Record`. Give this new class a
# formalized data structure that consists of the `sender`, `receiver`, and
# `amount` attributes. To do so, complete the following steps:
# 1. Define a new class named `Record`.
# 2. Add the `@dataclass` decorator immediately before the `Record` class
# definition.
# 3. Add an attribute named `sender` of type `str`.
# 4. Add an attribute named `receiver` of type `str`.
# 5. Add an attribute named `amount` of type `float`.
# Note that you’ll use this new `Record` class as the data type of your `record` attribute in the next section.
# @TODO
# Create a Record Data Class that consists of the `sender`, `receiver`, and
# `amount` attributes
@dataclass
class Record:
sender: str
receiver: str
amount: float
################################################################################
# Step 2:
# Modify the Existing Block Data Class to Store Record Data
# Rename the `data` attribute in your `Block` class to `record`, and then set
# it to use an instance of the new `Record` class that you created in the
# previous section. To do so, complete the following steps:
# 1. In the `Block` class, rename the `data` attribute to `record`.
# 2. Set the data type of the `record` attribute to `Record`.
@dataclass
class Block:
# @TODO
# Rename the `data` attribute to `record`, and set the data type to `Record`
record: Record
creator_id: int
prev_hash: str = "0"
timestamp: str = datetime.datetime.utcnow().strftime("%H:%M:%S")
nonce: int = 0
def hash_block(self):
sha = hashlib.sha256()
record = str(self.record).encode()
sha.update(record)
creator_id = str(self.creator_id).encode()
sha.update(creator_id)
timestamp = str(self.timestamp).encode()
sha.update(timestamp)
prev_hash = str(self.prev_hash).encode()
sha.update(prev_hash)
nonce = str(self.nonce).encode()
sha.update(nonce)
return sha.hexdigest()
@dataclass
class PyChain:
chain: List[Block]
difficulty: int = 4
def proof_of_work(self, block):
calculated_hash = block.hash_block()
num_of_zeros = "0" * self.difficulty
while not calculated_hash.startswith(num_of_zeros):
block.nonce += 1
calculated_hash = block.hash_block()
print("Wining Hash", calculated_hash)
return block
def add_block(self, candidate_block):
block = self.proof_of_work(candidate_block)
self.chain += [block]
def is_valid(self):
block_hash = self.chain[0].hash_block()
for block in self.chain[1:]:
if block_hash != block.prev_hash:
print("Blockchain is invalid!")
return False
block_hash = block.hash_block()
print("Blockchain is Valid")
return True
################################################################################
# Streamlit Code
# Adds the cache decorator for Streamlit
@st.cache(allow_output_mutation=True)
def setup():
print("Initializing Chain")
return PyChain([Block("Genesis", 0)])
st.markdown("# PyChain")
st.markdown("## Store a Transaction Record in the PyChain")
pychain = setup()
################################################################################
# Step 3:
# Add Relevant User Inputs to the Streamlit Interface
# Code additional input areas for the user interface of your Streamlit
# application. Create these input areas to capture the sender, receiver, and
# amount for each transaction that you’ll store in the `Block` record.
# To do so, complete the following steps:
# 1. Delete the `input_data` variable from the Streamlit interface.
# 2. Add an input area where you can get a value for `sender` from the user.
# 3. Add an input area where you can get a value for `receiver` from the user.
# 4. Add an input area where you can get a value for `amount` from the user.
# 5. As part of the Add Block button functionality, update `new_block` so that `Block` consists of an attribute named `record`, which is set equal to a `Record` that contains the `sender`, `receiver`, and `amount` values. The updated `Block`should also include the attributes for `creator_id` and `prev_hash`.
# @TODO:
# Delete the `input_data` variable from the Streamlit interface.
# input_data = st.text_input("Block Data")
# @TODO:
# Add an input area where you can get a value for `sender` from the user.
input_sender = st.text_input("Input the Sender")
# @TODO:
# Add an input area where you can get a value for `receiver` from the user.
input_receiver = st.text_input("Input the Receiver")
# @TODO:
# Add an input area where you can get a value for `amount` from the user.
input_amount = st.text_input("Input the Amount")
if st.button("Add Block"):
prev_block = pychain.chain[-1]
prev_block_hash = prev_block.hash_block()
# @TODO
# Update `new_block` so that `Block` consists of an attribute named `record`
# which is set equal to a `Record` that contains the `sender`, `receiver`,
# and `amount` values
new_block = Block(
record = Record(sender = input_sender, receiver = input_receiver, amount = input_amount),
creator_id=42,
prev_hash=prev_block_hash
)
pychain.add_block(new_block)
st.balloons()
################################################################################
# Streamlit Code (continues)
st.markdown("## The PyChain Ledger")
pychain_df = pd.DataFrame(pychain.chain).astype(str)
st.write(pychain_df)
difficulty = st.sidebar.slider("Block Difficulty", 1, 5, 2)
pychain.difficulty = difficulty
st.sidebar.write("# Block Inspector")
selected_block = st.sidebar.selectbox(
"Which block would you like to see?", pychain.chain
)
st.sidebar.write(selected_block)
if st.button("Validate Chain"):
st.write(pychain.is_valid())
################################################################################
# Step 4:
# Test the PyChain Ledger by Storing Records
# Test your complete `PyChain` ledger and user interface by running your
# Streamlit application and storing some mined blocks in your `PyChain` ledger.
# Then test the blockchain validation process by using your `PyChain` ledger.
# To do so, complete the following steps:
# 1. In the terminal, navigate to the project folder where you've coded the
# Challenge.
# 2. In the terminal, run the Streamlit application by
# using `streamlit run pychain.py`.
# 3. Enter values for the sender, receiver, and amount, and then click the "Add
# Block" button. Do this several times to store several blocks in the ledger.
# 4. Verify the block contents and hashes in the Streamlit drop-down menu.
# Take a screenshot of the Streamlit application page, which should detail a
# blockchain that consists of multiple blocks. Include the screenshot in the
# `README.md` file for your Challenge repository.
# 5. Test the blockchain validation process by using the web interface.
# Take a screenshot of the Streamlit application page, which should indicate
# the validity of the blockchain. Include the screenshot in the `README.md`
# file for your Challenge repository.
|
the-stack_106_21872
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(0, mgr.Equals(x_y, mgr.Times(y, y)))
h_y = Hint("h_y5", env, frozenset([y]), symbs)
h_y.set_locs([loc0])
res.append(h_y)
return frozenset(res)
|
the-stack_106_21874
|
#!/usr/bin/env python3
"""tests for rummikub.py"""
import os
import re
import random
import string
from subprocess import getstatusoutput
prg = './rummikub.py'
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput('{} {}'.format(prg, flag))
assert rv == 0
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def test_nothing_found():
"""runs"""
for seed in [0, 10]:
rv, out = getstatusoutput('{} -s {}'.format(prg, seed))
assert rv == 0
assert out == 'Found no sets.'
# --------------------------------------------------
def test_runs1():
"""runs"""
rv, out = getstatusoutput('{} --seed 1'.format(prg))
assert rv == 0
sets = parse_sets(out)
assert len(sets) == 1
assert ('B3 K3 Y3') in sets
# --------------------------------------------------
def test_runs2():
"""runs"""
rv, out = getstatusoutput('{} -s2'.format(prg))
print(out)
assert rv == 0
sets = parse_sets(out)
assert len(sets) == 3
assert ('K8 K9 K10') in sets
assert ('K9 K10 K11') in sets
assert ('K8 K9 K10 K11') in sets
# --------------------------------------------------
def test_runs5():
"""runs"""
rv, out = getstatusoutput('{} -s 5'.format(prg))
print(out)
assert rv == 0
sets = parse_sets(out)
assert len(sets) == 5
assert ('K11 K12 K13') in sets
assert ('B2 B3 B4') in sets
assert ('B3 B4 B5') in sets
assert ('B2 K2 Y2') in sets
assert ('B2 B3 B4 B5') in sets
# --------------------------------------------------
def parse_sets(out):
"""parse_sets"""
sets = list()
regex = re.compile(r'\d+:\s+(.+)')
for line in out.splitlines():
match = regex.search(line)
if match:
sets.append(match.group(1))
return sets
|
the-stack_106_21876
|
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Lease management for machines leased from the Machine Provider.
Keeps a list of machine types which should be leased from the Machine Provider
and the list of machines of each type currently leased.
Swarming integration with Machine Provider
==========================================
handlers_backend.py contains a cron job which looks at each MachineType and
ensures there are at least as many MachineLeases in the datastore which refer
to that MachineType as the target_size in MachineType specifies by numbering
them 0 through target_size - 1 If there are MachineType entities numbered
target_size or greater which refer to that MachineType, those MachineLeases
are marked as drained.
Each MachineLease manages itself. A cron job in handlers_backend.py will trigger
self-management jobs for each entity. If there is no associated lease and the
MachineLease is not drained, issue a request to the Machine Provider for a
matching machine. If there is an associated request, check the status of that
request. If it is fulfilled, ensure the existence of a BotInfo entity (see
server/bot_management.py) corresponding to the machine provided for the lease.
Include the lease ID and lease_expiration_ts as fields in the BotInfo. If it
is expired, clear the associated lease. If there is no associated lease and
the MachineLease is drained, delete the MachineLease entity.
TODO(smut): If there is an associated request and the MachineLease is drained,
release the lease immediately (as long as the bot is not mid-task).
"""
import base64
import collections
import datetime
import json
import logging
import math
from google.appengine.api import app_identity
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from protorpc.remote import protojson
import ts_mon_metrics
from components import datastore_utils
from components import machine_provider
from components import pubsub
from components import utils
from server import bot_groups_config
from server import bot_management
from server import task_queues
from server import task_request
from server import task_result
from server import task_pack
from server import task_scheduler
# Name of the topic the Machine Provider is authorized to publish
# lease information to.
PUBSUB_TOPIC = 'machine-provider'
# Name of the pull subscription to the Machine Provider topic.
PUBSUB_SUBSCRIPTION = 'machine-provider'
class MachineLease(ndb.Model):
"""A lease request for a machine from the Machine Provider.
Key:
id: A string in the form <machine type id>-<number>.
kind: MachineLease. Is a root entity.
"""
# Bot ID for the BotInfo created for this machine.
bot_id = ndb.StringProperty(indexed=False)
# Request ID used to generate this request.
client_request_id = ndb.StringProperty(indexed=True)
# DateTime indicating when the bot first connected to the server.
connection_ts = ndb.DateTimeProperty()
# Whether or not this MachineLease should issue lease requests.
drained = ndb.BooleanProperty(indexed=True)
# Number of seconds ahead of lease_expiration_ts to release leases.
early_release_secs = ndb.IntegerProperty(indexed=False)
# Hostname of the machine currently allocated for this request.
hostname = ndb.StringProperty()
# DateTime indicating when the instruction to join the server was sent.
instruction_ts = ndb.DateTimeProperty()
# Duration to lease for.
lease_duration_secs = ndb.IntegerProperty(indexed=False)
# DateTime indicating lease expiration time.
lease_expiration_ts = ndb.DateTimeProperty()
# Lease ID assigned by Machine Provider.
lease_id = ndb.StringProperty(indexed=False)
# ndb.Key for the MachineType this MachineLease is created for.
machine_type = ndb.KeyProperty()
# machine_provider.Dimensions describing the machine.
mp_dimensions = msgprop.MessageProperty(
machine_provider.Dimensions, indexed=False)
# Last request number used.
request_count = ndb.IntegerProperty(default=0, required=True)
# Base string to use as the request ID.
request_id_base = ndb.StringProperty(indexed=False)
# Task ID for the termination task scheduled for this machine.
termination_task = ndb.StringProperty(indexed=False)
class MachineType(ndb.Model):
"""A type of machine which should be leased from the Machine Provider.
Key:
id: A human-readable name for this machine type.
kind: MachineType. Is a root entity.
"""
# Description of this machine type for humans.
description = ndb.StringProperty(indexed=False)
# Number of seconds ahead of lease_expiration_ts to release leases.
early_release_secs = ndb.IntegerProperty(indexed=False)
# Whether or not to attempt to lease machines of this type.
enabled = ndb.BooleanProperty(default=True)
# Duration to lease each machine for.
lease_duration_secs = ndb.IntegerProperty(indexed=False)
# machine_provider.Dimensions describing the machine.
mp_dimensions = msgprop.MessageProperty(
machine_provider.Dimensions, indexed=False)
# Target number of machines of this type to have leased at once.
target_size = ndb.IntegerProperty(indexed=False, required=True)
class MachineTypeUtilization(ndb.Model):
"""Utilization numbers for a MachineType.
Key:
id: Name of the MachineType these utilization numbers are associated with.
kind: MachineTypeUtilization. Is a root entity.
"""
# Number of busy bots created from this machine type.
busy = ndb.IntegerProperty(indexed=False)
# Number of idle bots created from this machine type.
idle = ndb.IntegerProperty(indexed=False)
# DateTime indicating when busy/idle numbers were last computed.
last_updated_ts = ndb.DateTimeProperty()
@ndb.transactional_tasklet
def create_machine_lease(machine_lease_key, machine_type):
"""Creates a MachineLease from the given MachineType and MachineLease key.
Args:
machine_lease_key: ndb.Key for a MachineLease entity.
machine_type: MachineType entity.
"""
machine_lease = yield machine_lease_key.get_async()
if machine_lease:
return
yield MachineLease(
key=machine_lease_key,
lease_duration_secs=machine_type.lease_duration_secs,
early_release_secs=machine_type.early_release_secs,
machine_type=machine_type.key,
mp_dimensions=machine_type.mp_dimensions,
# Deleting and recreating the MachineLease needs a unique base request ID,
# otherwise it will hit old requests.
request_id_base='%s-%s' % (machine_lease_key.id(), utils.time_time()),
).put_async()
@ndb.transactional_tasklet
def update_machine_lease(machine_lease_key, machine_type):
"""Updates the given MachineLease from the given MachineType.
Args:
machine_lease_key: ndb.Key for a MachineLease entity.
machine_type: MachineType entity.
"""
machine_lease = yield machine_lease_key.get_async()
if not machine_lease:
logging.error('MachineLease not found:\nKey: %s', machine_lease_key)
return
if machine_lease.lease_expiration_ts:
put = False
if machine_lease.early_release_secs != machine_type.early_release_secs:
machine_lease.early_release_secs = machine_type.early_release_secs
put = True
if machine_lease.lease_duration_secs != machine_type.lease_duration_secs:
machine_lease.lease_duration_secs = machine_type.lease_duration_secs
put = True
if machine_lease.mp_dimensions != machine_type.mp_dimensions:
machine_lease.mp_dimensions = machine_type.mp_dimensions
put = True
if put:
yield machine_lease.put_async()
@ndb.tasklet
def ensure_entity_exists(machine_type, n):
"""Ensures the nth MachineLease for the given MachineType exists.
Args:
machine_type: MachineType entity.
n: The MachineLease index.
"""
machine_lease_key = ndb.Key(
MachineLease, '%s-%s' % (machine_type.key.id(), n))
machine_lease = yield machine_lease_key.get_async()
if not machine_lease:
yield create_machine_lease(machine_lease_key, machine_type)
return
# If there is a MachineLease, we may need to update it if the MachineType's
# lease properties have changed. It's only safe to update it if the current
# lease is fulfilled (indicated by the presence of lease_expiration_ts) so
# the changes only go into effect for the next lease request.
if machine_lease.lease_expiration_ts and (
machine_lease.early_release_secs != machine_type.early_release_secs
or machine_lease.lease_duration_secs != machine_type.lease_duration_secs
or machine_lease.mp_dimensions != machine_type.mp_dimensions
):
yield update_machine_lease(machine_lease_key, machine_type)
def machine_type_pb2_to_entity(pb2):
"""Creates a MachineType entity from the given bots_pb2.MachineType.
Args:
pb2: A proto.bots_pb2.MachineType proto.
Returns:
A MachineType entity.
"""
return MachineType(
id=pb2.name,
description=pb2.description,
early_release_secs=pb2.early_release_secs,
enabled=True,
lease_duration_secs=pb2.lease_duration_secs,
mp_dimensions=protojson.decode_message(
machine_provider.Dimensions,
json.dumps(dict(pair.split(':', 1) for pair in pb2.mp_dimensions)),
),
target_size=pb2.target_size,
)
def get_target_size(schedule, machine_type, current, default, now=None):
"""Returns the current target size for the MachineType.
Args:
schedule: A proto.bots_pb2.Schedule proto.
machine_type: ID of the key for the MachineType to get a target size for.
current: The current target_size. Used to ensure load-based target size
recommendations don't drop too quickly.
default: A default to return if now is not within any of config's intervals
or the last-known utilization is not set.
now: datetime.datetime to use as the time to check what the MachineType's
target size currently is. Defaults to use the current time if unspecified.
Returns:
Target size.
"""
now = now or utils.utcnow()
# The validator ensures the given time will fall in at most one interval,
# because intervals are not allowed to intersect. So just search linearly
# for a matching interval.
# TODO(smut): Improve linear search if we end up with many intervals.
for i in schedule.daily:
# If the days of the week given by this interval do not include the current
# day, move on to the next interval. If no days of the week are given by
# this interval at all, then the interval applies every day.
if i.days_of_the_week and now.weekday() not in i.days_of_the_week:
continue
# Get the start and end times of this interval relative to the current day.
h, m = map(int, i.start.split(':'))
start = datetime.datetime(now.year, now.month, now.day, h, m)
h, m = map(int, i.end.split(':'))
end = datetime.datetime(now.year, now.month, now.day, h, m)
if start <= now <= end:
return i.target_size
# Fall back on load-based scheduling. This allows combining scheduled changes
# with load-based changes occurring outside any explicitly given intervals.
# Only one load-based schedule is supported.
if schedule.load_based:
utilization = ndb.Key(MachineTypeUtilization, machine_type).get()
if not utilization:
return default
logging.info(
'Last known utilization for MachineType %s: %s/%s (computed at %s)',
machine_type,
utilization.busy,
utilization.busy + utilization.idle,
utilization.last_updated_ts,
)
# Target 10% more than the number of busy bots, but not more than the
# configured maximum and not less than the configured minimum. In order
# to prevent drastic drops, do not allow the target size to fall below 99%
# of current capacity. Note that this dampens scale downs as a function of
# the frequency with which this function runs, which is currently every
# minute controlled by cron job. Tweak these numbers if the cron frequency
# changes.
# TODO(smut): Tune this algorithm.
# TODO(smut): Move algorithm parameters to luci-config.
target = int(math.ceil(utilization.busy * 1.5))
if target >= schedule.load_based[0].maximum_size:
return schedule.load_based[0].maximum_size
if target < int(0.99 * current):
target = int(0.99 * current)
if target < schedule.load_based[0].minimum_size:
target = schedule.load_based[0].minimum_size
return target
return default
def ensure_entities_exist(max_concurrent=50):
"""Ensures MachineType entities are correct, and MachineLease entities exist.
Updates MachineType entities based on the config and creates corresponding
MachineLease entities.
Args:
max_concurrent: Maximum number of concurrent asynchronous requests.
"""
now = utils.utcnow()
# Seconds and microseconds are too granular for determining scheduling.
now = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute)
# Generate a few asynchronous requests at a time in order to prevent having
# too many in flight at a time.
futures = []
machine_types = bot_groups_config.fetch_machine_types().copy()
for machine_type in MachineType.query():
# Check the MachineType in the datastore against its config.
# If it no longer exists, just disable it here. If it exists but
# doesn't match, update it.
config = machine_types.pop(machine_type.key.id(), None)
# If there is no config, disable the MachineType.
if not config:
if machine_type.enabled:
machine_type.enabled = False
futures.append(machine_type.put_async())
logging.info('Disabling deleted MachineType: %s', machine_type)
continue
put = False
# Re-enable disabled MachineTypes.
if not machine_type.enabled:
logging.info('Enabling MachineType: %s', machine_type)
machine_type.enabled = True
put = True
# Handle scheduled config changes.
if config.schedule:
target_size = get_target_size(
config.schedule,
machine_type.key.id(),
machine_type.target_size,
config.target_size,
now=now,
)
if machine_type.target_size != target_size:
logging.info(
'Adjusting target_size (%s -> %s) for MachineType: %s',
machine_type.target_size,
target_size,
machine_type,
)
machine_type.target_size = target_size
put = True
# If the MachineType does not match the config, update it. Copy the values
# of certain fields so we can compare the MachineType to the config to check
# for differences in all other fields.
config = machine_type_pb2_to_entity(config)
config.target_size = machine_type.target_size
if machine_type != config:
logging.info('Updating MachineType: %s', config)
machine_type = config
put = True
# If there's anything to update, update it once here.
if put:
futures.append(machine_type.put_async())
# If the MachineType isn't enabled, don't create MachineLease entities.
if not machine_type.enabled:
continue
# Ensure the existence of MachineLease entities.
cursor = 0
while cursor < machine_type.target_size:
while len(futures) < max_concurrent and cursor < machine_type.target_size:
futures.append(ensure_entity_exists(machine_type, cursor))
cursor += 1
ndb.Future.wait_any(futures)
# We don't bother checking success or failure. If a transient error
# like TransactionFailed or DeadlineExceeded is raised and an entity
# is not created, we will just create it the next time this is called,
# converging to the desired state eventually.
futures = [future for future in futures if not future.done()]
# Create MachineTypes that never existed before.
# The next iteration of this cron job will create their MachineLeases.
if machine_types:
machine_types = machine_types.values()
while machine_types:
num_futures = len(futures)
if num_futures < max_concurrent:
futures.extend([
machine_type_pb2_to_entity(machine_type).put_async()
for machine_type in machine_types[:max_concurrent - num_futures]
])
machine_types = machine_types[max_concurrent - num_futures:]
ndb.Future.wait_any(futures)
futures = [future for future in futures if not future.done()]
if futures:
ndb.Future.wait_all(futures)
@ndb.transactional_tasklet
def drain_entity(key):
"""Drains the given MachineLease.
Args:
key: ndb.Key for a MachineLease entity.
"""
machine_lease = yield key.get_async()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.drained:
return
logging.info(
'Draining MachineLease:\nKey: %s\nHostname: %s',
key,
machine_lease.hostname,
)
machine_lease.drained = True
yield machine_lease.put_async()
@ndb.tasklet
def ensure_entity_drained(machine_lease):
"""Ensures the given MachineLease is drained.
Args:
machine_lease: MachineLease entity.
"""
if machine_lease.drained:
return
yield drain_entity(machine_lease.key)
def drain_excess(max_concurrent=50):
"""Marks MachineLeases beyond what is needed by their MachineType as drained.
Args:
max_concurrent: Maximum number of concurrent asynchronous requests.
"""
futures = []
for machine_type in MachineType.query():
for machine_lease in MachineLease.query(
MachineLease.machine_type == machine_type.key,
):
try:
index = int(machine_lease.key.id().rsplit('-', 1)[-1])
except ValueError:
logging.error(
'MachineLease index could not be deciphered\n Key: %s',
machine_lease.key,
)
continue
# Drain MachineLeases where the MachineType is not enabled or the index
# exceeds the target_size given by the MachineType. Since MachineLeases
# are created in contiguous blocks, only indices 0 through target_size - 1
# should exist.
if not machine_type.enabled or index >= machine_type.target_size:
if len(futures) == max_concurrent:
ndb.Future.wait_any(futures)
futures = [future for future in futures if not future.done()]
futures.append(ensure_entity_drained(machine_lease))
if futures:
ndb.Future.wait_all(futures)
def schedule_lease_management():
"""Schedules task queues to process each MachineLease."""
now = utils.utcnow()
for machine_lease in MachineLease.query():
# If there's no connection_ts, we're waiting on a bot so schedule the
# management job to check on it. If there is a connection_ts, then don't
# schedule the management job until it's time to release the machine.
if (not machine_lease.connection_ts
or machine_lease.drained
or machine_lease.lease_expiration_ts <= now + datetime.timedelta(
seconds=machine_lease.early_release_secs)):
if not utils.enqueue_task(
'/internal/taskqueue/machine-provider-manage',
'machine-provider-manage',
params={
'key': machine_lease.key.urlsafe(),
},
):
logging.warning(
'Failed to enqueue task for MachineLease: %s', machine_lease.key)
@ndb.transactional
def clear_lease_request(key, request_id):
"""Clears information about given lease request.
Args:
key: ndb.Key for a MachineLease entity.
request_id: ID of the request to clear.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if not machine_lease.client_request_id:
return
if request_id != machine_lease.client_request_id:
# Already cleared and incremented?
logging.warning(
'Request ID mismatch for MachineLease: %s\nExpected: %s\nActual: %s',
key,
request_id,
machine_lease.client_request_id,
)
return
machine_lease.bot_id = None
machine_lease.client_request_id = None
machine_lease.connection_ts = None
machine_lease.hostname = None
machine_lease.instruction_ts = None
machine_lease.lease_expiration_ts = None
machine_lease.lease_id = None
machine_lease.termination_task = None
machine_lease.put()
@ndb.transactional
def clear_termination_task(key, task_id):
"""Clears the termination task associated with the given lease request.
Args:
key: ndb.Key for a MachineLease entity.
task_id: ID for a termination task.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if not machine_lease.termination_task:
return
if task_id != machine_lease.termination_task:
logging.error(
'Task ID mismatch\nKey: %s\nExpected: %s\nActual: %s',
key,
task_id,
machine_lease.task_id,
)
return
machine_lease.termination_task = None
machine_lease.put()
@ndb.transactional
def associate_termination_task(key, hostname, task_id):
"""Associates a termination task with the given lease request.
Args:
key: ndb.Key for a MachineLease entity.
hostname: Hostname of the machine the termination task is for.
task_id: ID for a termination task.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if hostname != machine_lease.hostname:
logging.error(
'Hostname mismatch\nKey: %s\nExpected: %s\nActual: %s',
key,
hostname,
machine_lease.hostname,
)
return
if machine_lease.termination_task:
return
logging.info(
'Associating termination task\nKey: %s\nHostname: %s\nTask ID: %s',
key,
machine_lease.hostname,
task_id,
)
machine_lease.termination_task = task_id
machine_lease.put()
@ndb.transactional
def log_lease_fulfillment(
key, request_id, hostname, lease_expiration_ts, lease_id):
"""Logs lease fulfillment.
Args:
key: ndb.Key for a MachineLease entity.
request_id: ID of the request being fulfilled.
hostname: Hostname of the machine fulfilling the request.
lease_expiration_ts: UTC seconds since epoch when the lease expires.
lease_id: ID of the lease assigned by Machine Provider.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if request_id != machine_lease.client_request_id:
logging.error(
'Request ID mismatch\nKey: %s\nExpected: %s\nActual: %s',
key,
request_id,
machine_lease.client_request_id,
)
return
if (hostname == machine_lease.hostname
and lease_expiration_ts == machine_lease.lease_expiration_ts
and lease_id == machine_lease.lease_id):
return
machine_lease.hostname = hostname
machine_lease.lease_expiration_ts = datetime.datetime.utcfromtimestamp(
lease_expiration_ts)
machine_lease.lease_id = lease_id
machine_lease.put()
@ndb.transactional
def update_client_request_id(key):
"""Sets the client request ID used to lease a machine.
Args:
key: ndb.Key for a MachineLease entity.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.drained:
logging.info('MachineLease is drained\nKey: %s', key)
return
if machine_lease.client_request_id:
return
machine_lease.request_count += 1
machine_lease.client_request_id = '%s-%s' % (
machine_lease.request_id_base, machine_lease.request_count)
machine_lease.put()
@ndb.transactional
def delete_machine_lease(key):
"""Deletes the given MachineLease if it is drained and has no active lease.
Args:
key: ndb.Key for a MachineLease entity.
"""
machine_lease = key.get()
if not machine_lease:
return
if not machine_lease.drained:
logging.warning('MachineLease not drained: %s', key)
return
if machine_lease.client_request_id:
return
key.delete()
@ndb.transactional
def associate_bot_id(key, bot_id):
"""Associates a bot with the given machine lease.
Args:
key: ndb.Key for a MachineLease entity.
bot_id: ID for a bot.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.bot_id == bot_id:
return
machine_lease.bot_id = bot_id
machine_lease.put()
def ensure_bot_info_exists(machine_lease):
"""Ensures a BotInfo entity exists and has Machine Provider-related fields.
Args:
machine_lease: MachineLease instance.
"""
if machine_lease.bot_id == machine_lease.hostname:
return
bot_info = bot_management.get_info_key(machine_lease.hostname).get()
if not (
bot_info
and bot_info.lease_id
and bot_info.lease_expiration_ts
and bot_info.machine_type
):
logging.info(
'Creating BotEvent\nKey: %s\nHostname: %s\nBotInfo: %s',
machine_lease.key,
machine_lease.hostname,
bot_info,
)
bot_management.bot_event(
event_type='bot_leased',
bot_id=machine_lease.hostname,
external_ip=None,
authenticated_as=None,
dimensions=None,
state=None,
version=None,
quarantined=False,
maintenance_msg=None,
task_id='',
task_name=None,
lease_id=machine_lease.lease_id,
lease_expiration_ts=machine_lease.lease_expiration_ts,
machine_type=machine_lease.machine_type.id(),
machine_lease=machine_lease.key.id(),
)
# Occasionally bot_management.bot_event fails to store the BotInfo so
# verify presence of Machine Provider fields. See https://crbug.com/681224.
bot_info = bot_management.get_info_key(machine_lease.hostname).get()
if not (
bot_info
and bot_info.lease_id
and bot_info.lease_expiration_ts
and bot_info.machine_type
and bot_info.machine_lease
):
# If associate_bot_id isn't called, cron will try again later.
logging.error(
'Failed to put BotInfo\nKey: %s\nHostname: %s\nBotInfo: %s',
machine_lease.key,
machine_lease.hostname,
bot_info,
)
return
logging.info(
'Put BotInfo\nKey: %s\nHostname: %s\nBotInfo: %s',
machine_lease.key,
machine_lease.hostname,
bot_info,
)
associate_bot_id(machine_lease.key, machine_lease.hostname)
@ndb.transactional
def associate_instruction_ts(key, instruction_ts):
"""Associates an instruction time with the given machine lease.
Args:
key: ndb.Key for a MachineLease entity.
instruction_ts: DateTime indicating when the leased machine was instructed.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.instruction_ts:
return
machine_lease.instruction_ts = instruction_ts
machine_lease.put()
def send_connection_instruction(machine_lease):
"""Sends an instruction to the given machine to connect to the server.
Args:
machine_lease: MachineLease instance.
"""
now = utils.utcnow()
response = machine_provider.instruct_machine(
machine_lease.client_request_id,
'https://%s' % app_identity.get_default_version_hostname(),
)
if not response:
logging.error(
'MachineLease instruction got empty response:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
elif not response.get('error'):
associate_instruction_ts(machine_lease.key, now)
elif response['error'] == 'ALREADY_RECLAIMED':
# Can happen if lease duration is very short or there is a significant delay
# in creating the BotInfo or instructing the machine. Consider it an error.
logging.error(
'MachineLease expired before machine connected:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
else:
logging.warning(
'MachineLease instruction error:\nKey: %s\nHostname: %s\nError: %s',
machine_lease.key,
machine_lease.hostname,
response['error'],
)
@ndb.transactional
def associate_connection_ts(key, connection_ts):
"""Associates a connection time with the given machine lease.
Args:
key: ndb.Key for a MachineLease entity.
connection_ts: DateTime indicating when the bot first connected.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.connection_ts:
return
machine_lease.connection_ts = connection_ts
machine_lease.put()
def check_for_connection(machine_lease):
"""Checks for a bot_connected event.
Args:
machine_lease: MachineLease instance.
"""
assert machine_lease.instruction_ts
# Technically this query is wrong because it looks at events in reverse
# chronological order. The connection time we find here is actually the
# most recent connection when we want the earliest. However, this function
# is only called for new bots and stops being called once the connection
# time is recorded, so the connection time we record should end up being the
# first connection anyways. Iterating in the correct order would require
# building a new, large index.
for event in bot_management.get_events_query(machine_lease.bot_id, True):
# We don't want to find a bot_connected event from before we sent the
# connection instruction (e.g. in the event of hostname reuse), so do not
# look at events from before the connection instruction was sent.
if event.ts < machine_lease.instruction_ts:
break
if event.event_type == 'bot_connected':
logging.info(
'Bot connected:\nKey: %s\nHostname: %s\nTime: %s',
machine_lease.key,
machine_lease.hostname,
event.ts,
)
associate_connection_ts(machine_lease.key, event.ts)
ts_mon_metrics.on_machine_connected_time(
(event.ts - machine_lease.instruction_ts).total_seconds(),
fields={
'machine_type': machine_lease.machine_type.id(),
},
)
return
# The bot hasn't connected yet. If it's dead or missing, release the lease.
# At this point we have sent the connection instruction so the bot could still
# connect after we release the lease but before Machine Provider actually
# deletes the bot. Therefore we also schedule a termination task if releasing
# the bot. That way, if the bot connects, it will just shut itself down.
bot_info = bot_management.get_info_key(machine_lease.hostname).get()
if not bot_info:
logging.error(
'BotInfo missing:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
task = task_request.create_termination_task(
machine_lease.hostname, wait_for_capacity=True)
task_scheduler.schedule_request(task, secret_bytes=None)
if release(machine_lease):
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
return
if bot_info.is_dead:
logging.warning(
'Bot failed to connect in time:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
task = task_request.create_termination_task(
machine_lease.hostname, wait_for_capacity=True)
task_scheduler.schedule_request(task, secret_bytes=None)
if release(machine_lease):
cleanup_bot(machine_lease)
def cleanup_bot(machine_lease):
"""Cleans up entities after a bot is removed."""
bot_root_key = bot_management.get_root_key(machine_lease.hostname)
# The bot is being removed, remove it from the task queues.
task_queues.cleanup_after_bot(bot_root_key)
bot_management.get_info_key(machine_lease.hostname).delete()
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
def last_shutdown_ts(hostname):
"""Returns the time the given bot posted a final bot_shutdown event.
The bot_shutdown event is only considered if it is the last recorded event.
Args:
hostname: Hostname of the machine.
Returns:
datetime.datetime or None if the last recorded event is not bot_shutdown.
"""
bot_event = bot_management.get_events_query(hostname, True).get()
if bot_event and bot_event.event_type == 'bot_shutdown':
return bot_event.ts
def release(machine_lease):
"""Releases the given lease.
Args:
machine_lease: MachineLease instance.
Returns:
True if the lease was released, False otherwise.
"""
response = machine_provider.release_machine(machine_lease.client_request_id)
if response.get('error'):
error = machine_provider.LeaseReleaseRequestError.lookup_by_name(
response['error'])
if error not in (
machine_provider.LeaseReleaseRequestError.ALREADY_RECLAIMED,
machine_provider.LeaseReleaseRequestError.NOT_FOUND,
):
logging.error(
'Lease release failed\nKey: %s\nRequest ID: %s\nError: %s',
machine_lease.key,
response['client_request_id'],
response['error'],
)
return False
logging.info(
'MachineLease released:\nKey%s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
return True
def handle_termination_task(machine_lease):
"""Checks the state of the termination task, releasing the lease if completed.
Args:
machine_lease: MachineLease instance.
"""
assert machine_lease.termination_task
task_result_summary = task_pack.unpack_result_summary_key(
machine_lease.termination_task).get()
if task_result_summary.state in task_result.State.STATES_EXCEPTIONAL:
logging.info(
'Termination failed:\nKey: %s\nHostname: %s\nTask ID: %s\nState: %s',
machine_lease.key,
machine_lease.hostname,
machine_lease.termination_task,
task_result.State.to_string(task_result_summary.state),
)
clear_termination_task(machine_lease.key, machine_lease.termination_task)
return
if task_result_summary.state == task_result.State.COMPLETED:
# There is a race condition where the bot reports the termination task as
# completed but hasn't exited yet. The last thing it does before exiting
# is post a bot_shutdown event. Check for the presence of a bot_shutdown
# event which occurred after the termination task was completed.
shutdown_ts = last_shutdown_ts(machine_lease.bot_id)
if not shutdown_ts or shutdown_ts < task_result_summary.completed_ts:
logging.info(
'Machine terminated but not yet shut down:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
return
if release(machine_lease):
cleanup_bot(machine_lease)
def handle_early_release(machine_lease):
"""Handles the early release of a leased machine.
Args:
machine_lease: MachineLease instance.
"""
assert not machine_lease.termination_task, machine_lease.termination_task
early_expiration_ts = machine_lease.lease_expiration_ts - datetime.timedelta(
seconds=machine_lease.early_release_secs)
if machine_lease.drained or early_expiration_ts <= utils.utcnow():
logging.info(
'MachineLease ready to be released:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
task = task_request.create_termination_task(
machine_lease.hostname, wait_for_capacity=True)
task_result_summary = task_scheduler.schedule_request(
task, secret_bytes=None)
associate_termination_task(
machine_lease.key, machine_lease.hostname, task_result_summary.task_id)
def manage_leased_machine(machine_lease):
"""Manages a leased machine.
Args:
machine_lease: MachineLease instance with client_request_id, hostname,
lease_expiration_ts set.
"""
assert machine_lease.client_request_id, machine_lease.key
assert machine_lease.hostname, machine_lease.key
assert machine_lease.lease_expiration_ts, machine_lease.key
# Handle a newly leased machine.
if not machine_lease.bot_id:
ensure_bot_info_exists(machine_lease)
# Once BotInfo is created, send the instruction to join the server.
if not machine_lease.instruction_ts:
send_connection_instruction(machine_lease)
return
# Once the instruction is sent, check for connection.
if not machine_lease.connection_ts:
check_for_connection(machine_lease)
# Handle an expired lease.
if machine_lease.lease_expiration_ts <= utils.utcnow():
logging.info(
'MachineLease expired:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
cleanup_bot(machine_lease)
return
# Handle an active lease with a termination task scheduled.
# TODO(smut): Check if the bot got terminated by some other termination task.
if machine_lease.termination_task:
logging.info(
'MachineLease pending termination:\nKey: %s\nHostname: %s\nTask ID: %s',
machine_lease.key,
machine_lease.hostname,
machine_lease.termination_task,
)
handle_termination_task(machine_lease)
return
# Handle a lease ready for early release.
if machine_lease.early_release_secs or machine_lease.drained:
handle_early_release(machine_lease)
return
def handle_lease_request_error(machine_lease, response):
"""Handles an error in the lease request response from Machine Provider.
Args:
machine_lease: MachineLease instance.
response: Response returned by components.machine_provider.lease_machine.
"""
error = machine_provider.LeaseRequestError.lookup_by_name(response['error'])
if error in (
machine_provider.LeaseRequestError.DEADLINE_EXCEEDED,
machine_provider.LeaseRequestError.TRANSIENT_ERROR,
):
logging.warning(
'Transient failure: %s\nRequest ID: %s\nError: %s',
machine_lease.key,
response['client_request_id'],
response['error'],
)
else:
logging.error(
'Lease request failed\nKey: %s\nRequest ID: %s\nError: %s',
machine_lease.key,
response['client_request_id'],
response['error'],
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
def handle_lease_request_response(machine_lease, response):
"""Handles a successful lease request response from Machine Provider.
Args:
machine_lease: MachineLease instance.
response: Response returned by components.machine_provider.lease_machine.
"""
assert not response.get('error')
state = machine_provider.LeaseRequestState.lookup_by_name(response['state'])
if state == machine_provider.LeaseRequestState.FULFILLED:
if not response.get('hostname'):
# Lease has already expired. This shouldn't happen, but it indicates the
# lease expired faster than we could tell it even got fulfilled.
logging.error(
'Request expired\nKey: %s\nRequest ID:%s\nExpired: %s',
machine_lease.key,
machine_lease.client_request_id,
response['lease_expiration_ts'],
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
else:
logging.info(
'Request fulfilled: %s\nRequest ID: %s\nHostname: %s\nExpires: %s',
machine_lease.key,
machine_lease.client_request_id,
response['hostname'],
response['lease_expiration_ts'],
)
log_lease_fulfillment(
machine_lease.key,
machine_lease.client_request_id,
response['hostname'],
int(response['lease_expiration_ts']),
response['request_hash'],
)
elif state == machine_provider.LeaseRequestState.DENIED:
logging.warning(
'Request denied: %s\nRequest ID: %s',
machine_lease.key,
machine_lease.client_request_id,
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
def manage_pending_lease_request(machine_lease):
"""Manages a pending lease request.
Args:
machine_lease: MachineLease instance with client_request_id set.
"""
assert machine_lease.client_request_id, machine_lease.key
logging.info(
'Sending lease request: %s\nRequest ID: %s',
machine_lease.key,
machine_lease.client_request_id,
)
response = machine_provider.lease_machine(
machine_provider.LeaseRequest(
dimensions=machine_lease.mp_dimensions,
# TODO(smut): Vary duration so machines don't expire all at once.
duration=machine_lease.lease_duration_secs,
request_id=machine_lease.client_request_id,
),
)
if response.get('error'):
handle_lease_request_error(machine_lease, response)
return
handle_lease_request_response(machine_lease, response)
def manage_lease(key):
"""Manages a MachineLease.
Args:
key: ndb.Key for a MachineLease entity.
"""
machine_lease = key.get()
if not machine_lease:
return
# Manage a leased machine.
if machine_lease.lease_expiration_ts:
manage_leased_machine(machine_lease)
return
# Lease expiration time is unknown, so there must be no leased machine.
assert not machine_lease.hostname, key
assert not machine_lease.termination_task, key
# Manage a pending lease request.
if machine_lease.client_request_id:
manage_pending_lease_request(machine_lease)
return
# Manage an uninitiated lease request.
if not machine_lease.drained:
update_client_request_id(key)
return
# Manage an uninitiated, drained lease request.
delete_machine_lease(key)
def compute_utilization():
"""Computes bot utilization per machine type."""
# A query that requires multiple batches may produce duplicate results. To
# ensure each bot is only counted once, map machine types to [busy, idle]
# sets of bots.
machine_types = collections.defaultdict(lambda: [set(), set()])
def process(bot):
bot_id = bot.key.parent().id()
if bot.task_id:
machine_types[bot.machine_type][0].add(bot_id)
machine_types[bot.machine_type][1].discard(bot_id)
else:
machine_types[bot.machine_type][0].discard(bot_id)
machine_types[bot.machine_type][1].add(bot_id)
# Expectation is ~2000 entities, so batching is valuable but local caching is
# not. Can't use a projection query because 'cannot use projection on a
# property with an equality filter'.
now = utils.utcnow()
q = bot_management.BotInfo.query()
q = bot_management.filter_availability(
q, quarantined=None, in_maintenance=None, is_dead=False, is_busy=None,
is_mp=True)
q.map(process, batch_size=128, use_cache=False)
# The number of machine types isn't very large, in the few tens, so no need to
# rate limit parallelism yet.
futures = []
for machine_type, (busy, idle) in machine_types.iteritems():
busy = len(busy)
idle = len(idle)
logging.info('Utilization for %s: %s/%s', machine_type, busy, busy + idle)
# TODO(maruel): This should be a single entity.
# TODO(maruel): Historical data would be useful.
obj = MachineTypeUtilization(
id=machine_type, busy=busy, idle=idle, last_updated_ts=now)
futures.append(obj.put_async())
for f in futures:
f.get_result()
def set_global_metrics():
"""Set global Machine Provider-related ts_mon metrics."""
# Consider utilization metrics over 2 minutes old to be outdated.
outdated = utils.utcnow() - datetime.timedelta(minutes=2)
payload = {}
for machine_type in MachineType.query():
data = {
'enabled': machine_type.enabled,
'target_size': machine_type.target_size,
}
utilization = ndb.Key(MachineTypeUtilization, machine_type.key.id()).get()
if utilization and utilization.last_updated_ts > outdated:
data['busy'] = utilization.busy
data['idle'] = utilization.idle
payload[machine_type.key.id()] = data
ts_mon_metrics.set_global_metrics('mp', payload)
|
the-stack_106_21878
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
from nslocalized import *
def test_read_utf8_no_bom():
"""Test that we can read UTF-8 strings files."""
data='''\
/* Test string */
"åéîøü" = "ÅÉÎØÜ";
'''.encode('utf-8')
with io.BytesIO(data) as f:
st = StringTable.read(f)
assert st['åéîøü'] == 'ÅÉÎØÜ'
assert st.lookup('åéîøü').comment == 'Test string'
def test_read_encodings():
"""Test that we can read UTF-8 and UTF-16 strings files."""
text = '''\ufeff\
/* Test string */
"åéîøü" = "ÅÉÎØÜ";
'''
for encoding in ['utf_8', 'utf_16_be', 'utf_16_le']:
data = text.encode(encoding)
with io.BytesIO(data) as f:
st = StringTable.read(f)
assert st['åéîøü'] == 'ÅÉÎØÜ'
assert st.lookup('åéîøü').comment == 'Test string'
def test_escapes():
"""Test that we can read escaped strings properly."""
text = '''\
/* C escapes */
"\\a\\b\\f\\n\\r\\t\\v" = "abfnrtv";
/* Octal escapes */
"\\101" = "A";
/* Hex escapes */
"\\x42" = "B";
/* BMP escapes */
"\\u2030" = "PER MILLE";
/* Full Unicode escapes */
"\\U0001f600" = "GRINNING FACE";
/* Quotes */
"This is \\"quoted\\" text." = "This is “quoted” text.";
/* Backslashes and others */
"This \\\\ is a backslash. This \\* is an asterisk." = "Backslash test";
'''
with io.BytesIO(text.encode('utf_8')) as f:
st = StringTable.read(f)
assert st['\a\b\f\n\r\t\v'] == 'abfnrtv'
assert st['A'] == 'A'
assert st['B'] == 'B'
assert st['‰'] == 'PER MILLE'
assert st['\U0001f600'] == 'GRINNING FACE'
assert st['This is "quoted" text.'] == "This is “quoted” text."
assert st['This \\ is a backslash. This * is an asterisk.'] == "Backslash test"
def test_writing():
"""Test that we can write strings files."""
text='''\ufeff\
/* Try some accents åéîøü */
"åéîøü" = "ÅÉÎØÜ";
/* And some escapes */
"\\a\\b\\f\\n\\r\\t\\v" = "\\101 \\x42 \\u2030 \\U0001f600";
/* And some more escapes */
"\x03\u200e\u202a\ufe05\U000e0101" = "\\" \\' \\*";
'''
with io.BytesIO(text.encode('utf_16_be')) as f:
st = StringTable.read(f)
# We do this by testing that we can round-trip; note that some of the escaped
# things above will be un-escaped(!)
for encoding in ['utf_8', 'utf_16_be', 'utf_16_le']:
with io.BytesIO() as f:
st.write(f, encoding=encoding)
f.seek(0)
s2 = StringTable.read(f)
assert st == s2
def test_raw_keys():
"""Test that unquoted keys are parsed properly."""
text = '''\
/* Name of the app. */
CFBundleDisplayName = "My Cool App";
NSPhotoLibraryUsageDescription = "Sharing photos is fun!";
'''
with io.BytesIO(text.encode('utf_8')) as f:
st = StringTable.read(f)
assert st['CFBundleDisplayName'] == 'My Cool App'
assert st.lookup('CFBundleDisplayName').comment == 'Name of the app.'
assert st['NSPhotoLibraryUsageDescription'] == 'Sharing photos is fun!'
assert st.lookup('NSPhotoLibraryUsageDescription').comment is None
def test_include_empty_comments():
"""Test writing and not writing empty comments."""
text = '''\
"A" = "A";
'''
text_with_empty_comments = '''\
/* No description */
"A" = "A";
'''
with io.BytesIO(text.encode('utf_8')) as f:
st = StringTable.read(f)
with io.BytesIO() as f:
st.write(f, encoding='utf-8')
f.seek(0)
text2 = f.read().decode('utf-8')
assert text == text2
with io.BytesIO() as f:
st.include_empty_comments = True
st.write(f, encoding='utf-8')
f.seek(0)
text2 = f.read().decode('utf-8')
assert text_with_empty_comments == text2
def test_comments():
"""Test that comments are parsed properly."""
text = '''\
/* This is a C-style comment which goes over
multiple lines */
"A" = "A";
/* This is a C-style comment with a
/* nested start
comment */
"B" = "B";
/* This is a C-style comment with what looks like a key inside
"NotAKey" = "NotAValue";
*/
"C" = "C";
// This is a C++-style comment
"D" = "D";
// This C++-style comment goes over
// multiple lines
"E" = "E";
"ThisHasNoComment" = "NoComment";
'''
with io.BytesIO(text.encode('utf_8')) as f:
st = StringTable.read(f)
assert st['A'] == 'A'
assert st.lookup('A').comment == 'This is a C-style comment which goes over multiple lines'
assert st['B'] == 'B'
assert st.lookup('B').comment == 'This is a C-style comment with a /* nested start comment'
assert st['C'] == 'C'
assert st.lookup('C').comment == 'This is a C-style comment with what looks like a key inside "NotAKey" = "NotAValue";'
assert st['D'] == 'D'
assert st.lookup('D').comment == 'This is a C++-style comment'
assert st['E'] == 'E'
assert st.lookup('E').comment == 'This C++-style comment goes over multiple lines'
assert st['ThisHasNoComment'] == 'NoComment'
assert st.lookup('ThisHasNoComment').comment is None
|
the-stack_106_21879
|
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_binding(base_resource):
""" Binding class showing the resources that can be bound to crvserver_binding.
"""
def __init__(self) :
self._name = None
self.crvserver_spilloverpolicy_binding = []
self.crvserver_filterpolicy_binding = []
self.crvserver_icapolicy_binding = []
self.crvserver_analyticsprofile_binding = []
self.crvserver_cmppolicy_binding = []
self.crvserver_lbvserver_binding = []
self.crvserver_appflowpolicy_binding = []
self.crvserver_responderpolicy_binding = []
self.crvserver_policymap_binding = []
self.crvserver_feopolicy_binding = []
self.crvserver_cachepolicy_binding = []
self.crvserver_rewritepolicy_binding = []
self.crvserver_cspolicy_binding = []
self.crvserver_appqoepolicy_binding = []
self.crvserver_appfwpolicy_binding = []
self.crvserver_crpolicy_binding = []
@property
def name(self) :
r"""Name of a cache redirection virtual server about which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of a cache redirection virtual server about which to display detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def crvserver_spilloverpolicy_bindings(self) :
r"""spilloverpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_spilloverpolicy_binding
except Exception as e:
raise e
@property
def crvserver_policymap_bindings(self) :
r"""policymap that can be bound to crvserver.
"""
try :
return self._crvserver_policymap_binding
except Exception as e:
raise e
@property
def crvserver_icapolicy_bindings(self) :
r"""icapolicy that can be bound to crvserver.
"""
try :
return self._crvserver_icapolicy_binding
except Exception as e:
raise e
@property
def crvserver_cachepolicy_bindings(self) :
r"""cachepolicy that can be bound to crvserver.
"""
try :
return self._crvserver_cachepolicy_binding
except Exception as e:
raise e
@property
def crvserver_lbvserver_bindings(self) :
r"""lbvserver that can be bound to crvserver.
"""
try :
return self._crvserver_lbvserver_binding
except Exception as e:
raise e
@property
def crvserver_appfwpolicy_bindings(self) :
r"""appfwpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_appfwpolicy_binding
except Exception as e:
raise e
@property
def crvserver_analyticsprofile_bindings(self) :
r"""analyticsprofile that can be bound to crvserver.
"""
try :
return self._crvserver_analyticsprofile_binding
except Exception as e:
raise e
@property
def crvserver_responderpolicy_bindings(self) :
r"""responderpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_responderpolicy_binding
except Exception as e:
raise e
@property
def crvserver_filterpolicy_bindings(self) :
r"""filterpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_filterpolicy_binding
except Exception as e:
raise e
@property
def crvserver_cmppolicy_bindings(self) :
r"""cmppolicy that can be bound to crvserver.
"""
try :
return self._crvserver_cmppolicy_binding
except Exception as e:
raise e
@property
def crvserver_appqoepolicy_bindings(self) :
r"""appqoepolicy that can be bound to crvserver.
"""
try :
return self._crvserver_appqoepolicy_binding
except Exception as e:
raise e
@property
def crvserver_feopolicy_bindings(self) :
r"""feopolicy that can be bound to crvserver.
"""
try :
return self._crvserver_feopolicy_binding
except Exception as e:
raise e
@property
def crvserver_cspolicy_bindings(self) :
r"""cspolicy that can be bound to crvserver.
"""
try :
return self._crvserver_cspolicy_binding
except Exception as e:
raise e
@property
def crvserver_crpolicy_bindings(self) :
r"""crpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_crpolicy_binding
except Exception as e:
raise e
@property
def crvserver_rewritepolicy_bindings(self) :
r"""rewritepolicy that can be bound to crvserver.
"""
try :
return self._crvserver_rewritepolicy_binding
except Exception as e:
raise e
@property
def crvserver_appflowpolicy_bindings(self) :
r"""appflowpolicy that can be bound to crvserver.
"""
try :
return self._crvserver_appflowpolicy_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name="", option_="") :
r""" Use this API to fetch crvserver_binding resource.
"""
try :
if not name :
obj = crvserver_binding()
response = obj.get_resources(service, option_)
elif type(name) is not list :
obj = crvserver_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [crvserver_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class crvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_binding = [crvserver_binding() for _ in range(length)]
|
the-stack_106_21880
|
import numpy as np
import math
import time
def fastLms(signalInput, desiredOutput, M, step=0.1, forgetness=0.9):
blocks = int(math.ceil(1.0 * signalInput.size/M))
coefficients = np.random.rand(2*M)
P = np.ones(2*M)
totalOut = []
startTime = time.time()
for i in range(blocks):
des = desiredOutput[i*M:(i+1)*M]
if (des.size < M):
des = np.append(des, np.random.rand(M-des.size))
inp = []
remaining = signalInput.size - (i-1)*M
if i == 0:
padding = np.zeros(M)
inp = np.append(padding, signalInput[:M])
elif remaining < 2*M:
padding = np.zeros(2*M-remaining)
inp = np.append(signalInput[(i-1)*M:], padding)
else:
inp = signalInput[(i-1)*M:(i+1)*M]
block = flmsBlock(inp, des, coefficients, M, i, P)
output, coefficients, P = flmsChunk(block, step, forgetness)
totalOut.append(output)
totalOut = np.asarray(totalOut).ravel()[:signalInput.size]
elapsed = time.time() - startTime
return totalOut, elapsed
class flmsBlock(object):
inBlk = []
outBlk = []
desiredOutBlk = []
W = []
filterLen = 0
idx = 0
pwr = []
def __init__(self, inputBlk, desiredOutBlk, coefficients, filterLen, idx, pwr):
self.inBlk = inputBlk
self.desiredOutBlk = desiredOutBlk
self.W = coefficients
self.filterLen = filterLen
self.idx = idx
self.pwr = pwr
def flmsChunk(block, step, forgetness):
U = np.fft.fft(block.inBlk)
UH = conjugateUpdate(U)
signalOutput = outputUpdate(U, block.W, block.filterLen)
error = errorUpdate(block.desiredOutBlk, signalOutput, block.filterLen)
P, D = updatePow(block.pwr, U, forgetness)
constraint = gradientConstraint(error, UH, D, block.filterLen)
coefficients = coeffUpdate(constraint, block.W, step)
return np.real(signalOutput), coefficients, P
def gradientConstraint (error, UH, D, M):
convolution = D * UH * error
invConvolution = np.fft.ifft(convolution)
paddedInv = np.append(invConvolution[:M], np.zeros(M))
return np.fft.fft(paddedInv)
def coeffUpdate (constrain, coefficients, step):
convolution = step * constrain
update = convolution + coefficients
return update
def errorUpdate (desiredBlock, outputBlock, M):
if (desiredBlock.size != outputBlock.size):
print(desiredBlock.size, outputBlock.size)
error = desiredBlock - outputBlock
paddedError = np.append(np.zeros(M), error)
return np.fft.fft(paddedError)
def outputUpdate (matrixU, coefficients, M):
convolution = matrixU * coefficients
invConvolution = np.fft.ifft(convolution)
return (invConvolution[M:2*M])
def conjugateUpdate (U):
return np.conj(U)
def updatePow(P, U, gamma):
P = gamma * P + (1 - gamma) * np.abs(U)**2
D = 1 / P
return P, D
|
the-stack_106_21886
|
from typing import Type, Tuple, Union
import numpy as np
from autoconf import cached_property
from .abstract import AbstractMessage
from .transform import AbstractDensityTransform
class TransformedMessage(AbstractMessage):
_Message: Type[AbstractMessage]
_transform: Union[AbstractDensityTransform, Type[AbstractDensityTransform]]
_depth = 0
def __init__(self, *args, **kwargs):
self.instance = self._Message(*args, **kwargs)
super().__init__(
*self.instance.parameters,
log_norm=self.instance.log_norm,
lower_limit=self.instance.lower_limit,
upper_limit=self.instance.upper_limit,
id_=self.instance.id
)
@property
def natural_parameters(self):
return self.instance.natural_parameters
@property
def log_partition(self) -> np.ndarray:
return self.instance.log_partition
def __getattr__(self, item):
if item == "__setstate__":
raise AttributeError()
return getattr(
self.instance,
item
)
@classmethod
def invert_natural_parameters(
cls,
natural_parameters
):
return cls._Message.invert_natural_parameters(
natural_parameters
)
@classmethod
def invert_sufficient_statistics(
cls,
sufficient_statistics
):
return cls._Message.invert_sufficient_statistics(
sufficient_statistics
)
def value_for(self, unit):
return self._transform.inv_transform(
self.instance.value_for(
unit
)
)
# noinspection PyMethodOverriding
@classmethod
def _reconstruct( # type: ignore
cls,
Message: 'AbstractMessage',
clsname: str,
transform: AbstractDensityTransform,
parameters: Tuple[np.ndarray, ...],
log_norm: float,
id_,
lower_limit,
upper_limit
):
# Reconstructs TransformedMessage during unpickling
Transformed = Message.transformed(transform, clsname)
return Transformed(
*parameters,
log_norm=log_norm,
id_=id_,
lower_limit=lower_limit,
upper_limit=upper_limit
)
def __reduce__(self):
# serialises TransformedMessage during pickling
return (
TransformedMessage._reconstruct,
(
self._Message,
self.__class__.__name__,
self._transform,
self.parameters,
self.log_norm,
self.id,
self.lower_limit,
self.upper_limit
),
)
@classmethod
def calc_log_base_measure(cls, x) -> np.ndarray:
x = cls._transform.transform(x)
log_base = cls._Message.calc_log_base_measure(x)
return log_base
@classmethod
def to_canonical_form(cls, x) -> np.ndarray:
x = cls._transform.transform(x)
return cls._Message.to_canonical_form(x)
@cached_property
def mean(self) -> np.ndarray:
# noinspection PyUnresolvedReferences
return self._transform.inv_transform(
self.instance.mean
)
@cached_property
def variance(self) -> np.ndarray:
# noinspection PyUnresolvedReferences
jac = self._transform.jacobian(self.instance.mean)
return jac.quad(self._Message.variance.func(self))
def _sample(self, n_samples) -> np.ndarray:
x = self.instance._sample(n_samples)
return self._transform.inv_transform(x)
@classmethod
def _factor(
cls,
self,
x: np.ndarray,
) -> np.ndarray:
x, log_det = cls._transform.transform_det(x)
eta = self._broadcast_natural_parameters(x)
t = cls._Message.to_canonical_form(x)
log_base = self.calc_log_base_measure(x) + log_det
return self.natural_logpdf(eta, t, log_base, self.log_partition)
@classmethod
def _factor_gradient(
cls,
self,
x: np.ndarray,
) -> np.ndarray:
x, logd, logd_grad, jac = cls._transform.transform_det_jac(x)
logl, grad = cls._Message._logpdf_gradient(self, x)
return logl + logd, grad * jac + logd_grad
def factor(self, x):
return self._factor(self, x)
def factor_gradient(self, x):
return self._factor_gradient(self, x)
@classmethod
def _logpdf_gradient( # type: ignore
cls,
self,
x: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
x, jac = cls._transform.transform_jac(x)
logl, grad = cls._Message._logpdf_gradient(self, x)
return logl, grad * jac
def sample(self, n_samples=None) -> np.ndarray:
return self._sample(n_samples)
def logpdf_gradient(
self, x: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
return self._logpdf_gradient(self, x)
# @classmethod
# def _logpdf_gradient_hessian( # type: ignore
# cls,
# self,
# x: np.ndarray,
# ) -> Tuple[np.ndarray, np.ndarray]:
# x, jac = cls._transform.transform_jac(x)
# logl, grad, hess = cls._Message._logpdf_gradient_hessian(self, x)
# return logl, grad * jac, jac.quad(hess)
# def logpdf_gradient_hessian(
# self, x: np.ndarray
# ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# return self._logpdf_gradient_hessian(self, x)
|
the-stack_106_21887
|
#!/usr/bin/env python
#
# Copyright (c) 2019, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
from pycoproc import Pycoproc
__version__ = '1.4.0'
class Pysense(Pycoproc):
def __init__(self, i2c=None, sda='P22', scl='P21'):
Pycoproc.__init__(self, i2c, sda, scl)
|
the-stack_106_21890
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from time import sleep
import pytest
import torch
from tests.backends import ddp_model
from tests.backends.launcher import DDPLauncher
from tests.utilities.distributed import call_training_script
@pytest.mark.parametrize('cli_args', [
pytest.param('--max_epochs 1 --gpus 2 --distributed_backend ddp'),
])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model_ddp_fit_only(tmpdir, cli_args):
# call the script
std, err = call_training_script(ddp_model, cli_args, 'fit', tmpdir, timeout=120)
# load the results of the script
result_path = os.path.join(tmpdir, 'ddp.result')
result = torch.load(result_path)
# verify the file wrote the expected outputs
assert result['status'] == 'complete'
@pytest.mark.parametrize('cli_args', [
pytest.param('--max_epochs 1 --gpus 2 --distributed_backend ddp'),
])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model_ddp_test_only(tmpdir, cli_args):
# call the script
call_training_script(ddp_model, cli_args, 'test', tmpdir)
# load the results of the script
result_path = os.path.join(tmpdir, 'ddp.result')
result = torch.load(result_path)
# verify the file wrote the expected outputs
assert result['status'] == 'complete'
@pytest.mark.parametrize('cli_args', [
pytest.param('--max_epochs 1 --gpus 2 --distributed_backend ddp'),
])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model_ddp_fit_test(tmpdir, cli_args):
# call the script
call_training_script(ddp_model, cli_args, 'fit_test', tmpdir, timeout=20)
# load the results of the script
result_path = os.path.join(tmpdir, 'ddp.result')
result = torch.load(result_path)
# verify the file wrote the expected outputs
assert result['status'] == 'complete'
model_outs = result['result']
for out in model_outs:
assert out['test_acc'] > 0.90
# START: test_cli ddp test
@pytest.mark.skipif(os.getenv("PL_IN_LAUNCHER", '0') == '1', reason="test runs only in DDPLauncher")
def internal_test_cli(tmpdir, args=None):
"""
This test verify we can call function using test_cli name
"""
return 1
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_cli(tmpdir):
DDPLauncher.run_from_cmd_line("--max_epochs 1 --gpus 2 --accelerator ddp", internal_test_cli, tmpdir)
# load the results of the script
result_path = os.path.join(tmpdir, 'ddp.result')
result = torch.load(result_path)
# verify the file wrote the expected outputs
assert result['status'] == 'complete'
assert str(result['result']) == '1'
# END: test_cli ddp test
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@DDPLauncher.run("--max_epochs [max_epochs] --gpus 2 --accelerator [accelerator]",
max_epochs=["1"],
accelerator=["ddp", "ddp_spawn"])
def test_cli_to_pass(tmpdir, args=None):
"""
This test verify we can call function using test_cli name
"""
return '1'
|
the-stack_106_21891
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestAllBlends(vtk.test.Testing.vtkTest):
def testAllBlends(self):
# This script calculates the luminance of an image
renWin = vtk.vtkRenderWindow()
renWin.SetSize(512, 256)
# Image pipeline
image1 = vtk.vtkTIFFReader()
image1.SetFileName(VTK_DATA_ROOT + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
image1.SetOrientationType(4)
image2 = vtk.vtkBMPReader()
image2.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp")
# shrink the images to a reasonable size
color = vtk.vtkImageShrink3D()
color.SetInputConnection(image1.GetOutputPort())
color.SetShrinkFactors(2, 2, 1)
backgroundColor = vtk.vtkImageShrink3D()
backgroundColor.SetInputConnection(image2.GetOutputPort())
backgroundColor.SetShrinkFactors(2, 2, 1)
# create a greyscale version
luminance = vtk.vtkImageLuminance()
luminance.SetInputConnection(color.GetOutputPort())
backgroundLuminance = vtk.vtkImageLuminance()
backgroundLuminance.SetInputConnection(backgroundColor.GetOutputPort())
# create an alpha mask
table = vtk.vtkLookupTable()
table.SetTableRange(220, 255)
table.SetValueRange(1, 0)
table.SetSaturationRange(0, 0)
table.Build()
alpha = vtk.vtkImageMapToColors()
alpha.SetInputConnection(luminance.GetOutputPort())
alpha.SetLookupTable(table)
alpha.SetOutputFormatToLuminance()
# make luminanceAlpha and colorAlpha versions
luminanceAlpha = vtk.vtkImageAppendComponents()
luminanceAlpha.AddInputConnection(luminance.GetOutputPort())
luminanceAlpha.AddInputConnection(alpha.GetOutputPort())
colorAlpha = vtk.vtkImageAppendComponents()
colorAlpha.AddInputConnection(color.GetOutputPort())
colorAlpha.AddInputConnection(alpha.GetOutputPort())
foregrounds = ["luminance", "luminanceAlpha", "color", "colorAlpha"]
backgrounds = ["backgroundColor", "backgroundLuminance"]
deltaX = 1.0 / 4.0
deltaY = 1.0 / 2.0
blend = dict()
mapper = dict()
actor = dict()
imager = dict()
for row, bg in enumerate(backgrounds):
for column, fg in enumerate(foregrounds):
blend.update({bg:{fg:vtk.vtkImageBlend()}})
blend[bg][fg].AddInputConnection(eval(bg + '.GetOutputPort()'))
if bg == "backgroundColor" or fg == "luminance" or fg == "luminanceAlpha":
blend[bg][fg].AddInputConnection(eval(fg + '.GetOutputPort()'))
blend[bg][fg].SetOpacity(1, 0.8)
mapper.update({bg:{fg:vtk.vtkImageMapper()}})
mapper[bg][fg].SetInputConnection(blend[bg][fg].GetOutputPort())
mapper[bg][fg].SetColorWindow(255)
mapper[bg][fg].SetColorLevel(127.5)
actor.update({bg:{fg:vtk.vtkActor2D()}})
actor[bg][fg].SetMapper(mapper[bg][fg])
imager.update({bg:{fg:vtk.vtkRenderer()}})
imager[bg][fg].AddActor2D(actor[bg][fg])
imager[bg][fg].SetViewport(column * deltaX, row * deltaY, (column + 1) * deltaX, (row + 1) * deltaY)
renWin.AddRenderer(imager[bg][fg])
column += 1
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestAllBlends.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestAllBlends, 'test')])
|
the-stack_106_21892
|
#!/usr/bin/python3
'''
--- Day 1: Report Repair ---
After saving Christmas five years in a row, you've decided to take a vacation at a nice resort on a tropical island. Surely, Christmas will go on without you.
The tropical island has its own currency and is entirely cash-only. The gold coins used there have a little picture of a starfish; the locals just call them stars. None of the currency exchanges seem to have heard of them, but somehow, you'll need to find fifty of these coins by the time you arrive so you can pay the deposit on your room.
To save your vacation, you need to get all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
Before you leave, the Elves in accounting just need you to fix your expense report (your puzzle input); apparently, something isn't quite adding up.
Specifically, they need you to find the two entries that sum to 2020 and then multiply those two numbers together.
For example, suppose your expense report contained the following:
1721
979
366
299
675
1456
In this list, the two entries that sum to 2020 are 1721 and 299. Multiplying them together produces 1721 * 299 = 514579, so the correct answer is 514579.
Of course, your expense report is much larger. Find the two entries that sum to 2020; what do you get if you multiply them together?
--- Part Two ---
The Elves in accounting are thankful for your help; one of them even offers you a starfish coin they had left over from a past vacation. They offer you a second one if you can find three numbers in your expense report that meet the same criteria.
Using the above example again, the three entries that sum to 2020 are 979, 366, and 675. Multiplying them together produces the answer, 241861950.
In your expense report, what is the product of the three entries that sum to 2020?
'''
import os
DAY = "DAY 1"
def PrepareList(aList):
numList = [int(item) for item in aList]
return numList
def PrintAnswers(aPart1, aPart2):
print (DAY)
print (" Part 1: {}".format(aPart1))
print (" Part 2: {}".format(aPart2))
def Answer(aList):
# part 1
part1 = "?"
for item in aList:
if ((2020-item) in aList):
part1 = item * (2020-item)
for item2 in aList:
if ((2020-item-item2) in aList):
part2 = item * item2 * (2020-item-item2)
# part 2
# part2 = "?"
PrintAnswers(part1, part2)
def Main():
inputFileName = __file__.replace(".py", ".input")
if not os.path.isfile(inputFileName):
print ("Input file ({}) does not exist.".format(inputFileName))
return
with open(inputFileName, 'r') as fh:
lines = [line.strip() for line in fh]
# Prepare line list (as necessary)
numList = PrepareList(lines)
# Part 1/2 function call(s)
Answer(numList)
# if run stand-alone
if __name__ == '__main__':
Main()
|
the-stack_106_21893
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 19:55:38 2018
@author: saschajecklin
"""
import sys
sys.path.append("..")
import random
import numpy as np
from collections import deque
from keras.models import Model
from keras.layers import Dense, Flatten, Input, Conv2D
from keras.optimizers import RMSprop
from keras.regularizers import l2
from keras.layers.merge import Add
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Activation
GAMMA = 0.95
MEMORY_SIZE = 1000000
BATCH_SIZE = 512
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.998
class DQNSolver:
def __init__(self, observation_space, action_space):
self.exploration_rate = EXPLORATION_MAX
self.observation_space = observation_space
self.action_space = action_space
self.memory = deque(maxlen=MEMORY_SIZE)
in_x = x = Input((self.observation_space[0], self.observation_space[1], 2)) # stack of own(6x7) and enemy(6x7) field
x = Conv2D(128, 3, padding="same", kernel_regularizer=l2(1e-4),
data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
for _ in range(2):
x = self._build_residual_block(x)
x = Conv2D(filters=8, kernel_size=1, kernel_regularizer=l2(1e-4),
data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
x = Flatten()(x)
policy_out = Dense(action_space, kernel_regularizer=l2(1e-4), activation="softmax", name="policy_out")(x)
self.model = Model(in_x, policy_out, name="connect4_model")
self.optimizer = RMSprop(lr=0.00025, rho=0.9, epsilon=1e-6, decay=0.0) #SGD(lr=1e-2, momentum=0.9)
# self.optimizer = SGD(lr=1e-2, momentum=0.9)
self.model.compile(optimizer=self.optimizer, loss='categorical_crossentropy')
def _build_residual_block(self, x):
in_x = x
x = Conv2D(filters=128, kernel_size=3, padding="same",
kernel_regularizer=l2(1e-4), data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
x = Conv2D(filters=128, kernel_size=3, padding="same",
kernel_regularizer=l2(1e-4), data_format="channels_last")(x)
x = BatchNormalization(axis=-1)(x)
x = Add()([in_x, x])
x = Activation("relu")(x)
return x
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
# mirror state, next_state and action to produce twice as much training data
self.memory.append((np.flip(state, 1), (self.action_space-1)-action, reward, np.flip(next_state, 1), done))
def pop(self):
for i in range(2): # pop 2 becauses mirrored entries in remeber()
self.memory.pop()
def act(self, state, env): # state doesnt have to be the state in env. could be inverted
if np.random.rand() < self.exploration_rate:
return env.sample()
state = np.expand_dims(state, axis=0)
q_values = self.model.predict(state)
mask = (np.expand_dims(env.validMoves(),0) == 0)
q_values[mask] = float('-inf') # guard for valid moves
return np.argmax(q_values[0])
def experience_replay(self):
if len(self.memory) < BATCH_SIZE:
return
batch = random.sample(self.memory, BATCH_SIZE)
state_batch = np.zeros((BATCH_SIZE, self.observation_space[0], self.observation_space[1], 2))
q_values_batch = np.zeros((BATCH_SIZE, self.action_space))
idx = 0
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
state_next = np.expand_dims(state_next, axis=0)
q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0]))
state = np.expand_dims(state, axis=0)
q_values = self.model.predict(state)
q_values[0][action] = q_update
state_batch[idx, ...] = state
q_values_batch[idx, ...] = q_values
idx = idx + 1
self.model.fit(state_batch, q_values_batch, verbose=0)
self.exploration_rate *= EXPLORATION_DECAY
self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)
def save(self, path='weights.h5'):
self.model.save_weights(filepath=path)
def load(self, path='weights.h5'):
self.model.load_weights(filepath=path)
|
the-stack_106_21895
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import given, assume
import hypothesis.strategies as st
from itertools import izip
from caffe2.python import core, cnn
import caffe2.python.hypothesis_test_util as hu
class TestLeakyRelu(hu.HypothesisTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32)
# default step size is 0.05
input_data[np.logical_and(
input_data >= 0, input_data <= 0.051)] = 0.051
input_data[np.logical_and(
input_data <= 0, input_data >= -0.051)] = -0.051
if order == 'NHWC':
input_data = np.transpose(input_data, axes=(0, 2, 3, 1))
return input_data,
def _get_op(self, device_option, alpha, order, inplace=False):
outputs = ['output' if not inplace else "input"]
op = core.CreateOperator(
'LeakyRelu',
['input'],
outputs,
alpha=alpha,
device_option=device_option)
return op
def _feed_inputs(self, input_blobs, device_option):
names = ['input', 'scale', 'bias']
for name, blob in izip(names, input_blobs):
self.ws.create_blob(name).feed(blob, device_option=device_option)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 3),
C=st.integers(2, 3),
H=st.integers(2, 3),
W=st.integers(2, 3),
alpha=st.floats(0, 1),
order=st.sampled_from(['NCHW', 'NHWC']),
seed=st.integers(0, 1000))
def test_leaky_relu_gradients(self, gc, dc, N, C, H, W, order, alpha, seed):
np.random.seed(seed)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order)
input_blobs = self._get_inputs(N, C, H, W, order)
self.assertDeviceChecks(dc, op, input_blobs, [0])
self.assertGradientChecks(gc, op, input_blobs, 0, [0])
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000))
def test_leaky_relu_layout(self, gc, dc, N, C, H, W, alpha, seed):
outputs = {}
for order in ('NCHW', 'NHWC'):
np.random.seed(seed)
input_blobs = self._get_inputs(N, C, H, W, order)
self._feed_inputs(input_blobs, device_option=gc)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order)
self.ws.run(op)
outputs[order] = self.ws.blobs['output'].fetch()
np.testing.assert_allclose(
outputs['NCHW'],
outputs['NHWC'].transpose((0, 3, 1, 2)),
atol=1e-4,
rtol=1e-4)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000),
inplace=st.booleans())
def test_leaky_relu_reference_check(self, gc, dc, N, C, H, W, order, alpha,
seed, inplace):
np.random.seed(seed)
if order != "NCHW":
assume(not inplace)
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order,
inplace=inplace)
def ref(input_blob):
result = input_blob.copy()
result[result < 0] *= alpha
return result,
self.assertReferenceChecks(gc, op, inputs, ref)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000))
def test_leaky_relu_device_check(self, gc, dc, N, C, H, W, order, alpha,
seed):
np.random.seed(seed)
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000))
def test_leaky_relu_cnn_helper(self, N, C, H, W, order, alpha, seed):
np.random.seed(seed)
model = cnn.CNNModelHelper(order=order)
model.LeakyRelu(
'input',
'output',
alpha=alpha)
input_blob = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
input_blob = np.transpose(input_blob, axes=(0, 2, 3, 1))
self.ws.create_blob('input').feed(input_blob)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
output_blob = self.ws.blobs['output'].fetch()
if order == 'NHWC':
output_blob = np.transpose(output_blob, axes=(0, 3, 1, 2))
assert output_blob.shape == (N, C, H, W)
if __name__ == '__main__':
import unittest
unittest.main()
|
the-stack_106_21896
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The plugin serving the interactive inference tab."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import json_format
from grpc.framework.interfaces.face.face import AbortionError
from werkzeug import wrappers
import tensorflow as tf
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.interactive_inference.utils import common_utils
from tensorboard.plugins.interactive_inference.utils import inference_utils
from tensorboard.plugins.interactive_inference.utils import platform_utils
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
# Max number of examples to scan along the `examples_path` in order to return
# statistics and sampling for features.
NUM_EXAMPLES_TO_SCAN = 50
# Max number of mutants to show per feature (i.e. num of points along x-axis).
NUM_MUTANTS = 10
class InteractiveInferencePlugin(base_plugin.TBPlugin):
"""Plugin for understanding/debugging model inference.
"""
# This string field is used by TensorBoard to generate the paths for routes
# provided by this plugin. It must thus be URL-friendly. This field is also
# used to uniquely identify this plugin throughout TensorBoard. See BasePlugin
# for details.
plugin_name = 'whatif'
examples = []
updated_example_indices = set()
sprite = None
example_class = tf.train.Example
# The standard name for encoded image features in TensorFlow.
image_feature_name = 'image/encoded'
# The width and height of the thumbnail for any images for Facets Dive.
sprite_thumbnail_dim_px = 32
# The vocab of inference class indices to label names for the model.
label_vocab = []
def __init__(self, context):
"""Constructs an interactive inference plugin for TensorBoard.
Args:
context: A base_plugin.TBContext instance.
"""
self._logdir = context.logdir
self._has_auth_group = (context.flags and
'authorized_groups' in context.flags and
context.flags.authorized_groups is not '')
def get_plugin_apps(self):
"""Obtains a mapping between routes and handlers. Stores the logdir.
Returns:
A mapping between routes and handlers (functions that respond to
requests).
"""
return {
'/infer': self._infer,
'/update_example': self._update_example,
'/examples_from_path': self._examples_from_path_handler,
'/sprite': self._serve_sprite,
'/duplicate_example': self._duplicate_example,
'/delete_example': self._delete_example,
'/infer_mutants': self._infer_mutants_handler,
'/eligible_features': self._eligible_features_from_example_handler,
}
def is_active(self):
"""Determines whether this plugin is active.
Returns:
A boolean. Whether this plugin is active.
"""
# TODO(jameswex): Maybe enable if config flags were specified?
return False
def frontend_metadata(self):
# TODO(#2338): Keep this in sync with the `registerDashboard` call
# on the frontend until that call is removed.
return base_plugin.FrontendMetadata(
element_name='tf-interactive-inference-dashboard',
tab_name='What-If Tool',
)
def generate_sprite(self, example_strings):
# Generate a sprite image for the examples if the examples contain the
# standard encoded image feature.
feature_list = (self.examples[0].features.feature
if self.example_class == tf.train.Example
else self.examples[0].context.feature)
self.sprite = (
inference_utils.create_sprite_image(example_strings)
if (len(self.examples) and self.image_feature_name in feature_list) else
None)
@wrappers.Request.application
def _examples_from_path_handler(self, request):
"""Returns JSON of the specified examples.
Args:
request: A request that should contain 'examples_path' and 'max_examples'.
Returns:
JSON of up to max_examlpes of the examples in the path.
"""
examples_count = int(request.args.get('max_examples'))
examples_path = request.args.get('examples_path')
sampling_odds = float(request.args.get('sampling_odds'))
self.example_class = (tf.train.SequenceExample
if request.args.get('sequence_examples') == 'true'
else tf.train.Example)
try:
platform_utils.throw_if_file_access_not_allowed(examples_path,
self._logdir,
self._has_auth_group)
example_strings = platform_utils.example_protos_from_path(
examples_path, examples_count, parse_examples=False,
sampling_odds=sampling_odds, example_class=self.example_class)
self.examples = [
self.example_class.FromString(ex) for ex in example_strings]
self.generate_sprite(example_strings)
json_examples = [
json_format.MessageToJson(example) for example in self.examples
]
self.updated_example_indices = set(range(len(json_examples)))
return http_util.Respond(
request,
{'examples': json_examples,
'sprite': True if self.sprite else False}, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, {'error': e.message},
'application/json', code=400)
@wrappers.Request.application
def _serve_sprite(self, request):
return http_util.Respond(request, self.sprite, 'image/png')
@wrappers.Request.application
def _update_example(self, request):
"""Updates the specified example.
Args:
request: A request that should contain 'index' and 'example'.
Returns:
An empty response.
"""
if request.method != 'POST':
return http_util.Respond(request, {'error': 'invalid non-POST request'},
'application/json', code=405)
example_json = request.form['example']
index = int(request.form['index'])
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
new_example = self.example_class()
json_format.Parse(example_json, new_example)
self.examples[index] = new_example
self.updated_example_indices.add(index)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
@wrappers.Request.application
def _duplicate_example(self, request):
"""Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
new_example = self.example_class()
new_example.CopyFrom(self.examples[index])
self.examples.append(new_example)
self.updated_example_indices.add(len(self.examples) - 1)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
@wrappers.Request.application
def _delete_example(self, request):
"""Deletes the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
del self.examples[index]
self.updated_example_indices = set([
i if i < index else i - 1 for i in self.updated_example_indices])
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
def _parse_request_arguments(self, request):
"""Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
"""
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures
@wrappers.Request.application
def _infer(self, request):
"""Returns JSON for the `vz-line-chart`s for a feature.
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.
Returns:
A list of JSON objects, one for each chart.
"""
label_vocab = inference_utils.get_label_vocab(
request.args.get('label_vocab_path'))
try:
if request.method != 'GET':
logger.error('%s requests are forbidden.', request.method)
return http_util.Respond(request, {'error': 'invalid non-GET request'},
'application/json', code=405)
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
indices_to_infer = sorted(self.updated_example_indices)
examples_to_infer = [self.examples[index] for index in indices_to_infer]
infer_objs = []
for model_num in xrange(len(inference_addresses)):
serving_bundle = inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
request.args.get('model_type'),
model_versions[model_num],
model_signatures[model_num],
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'))
(predictions, _) = inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle)
infer_objs.append(predictions)
resp = {'indices': indices_to_infer, 'results': infer_objs}
self.updated_example_indices = set()
return http_util.Respond(request, {'inferences': json.dumps(resp),
'vocab': json.dumps(label_vocab)},
'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, {'error': e.message},
'application/json', code=400)
except AbortionError as e:
return http_util.Respond(request, {'error': e.details},
'application/json', code=400)
@wrappers.Request.application
def _eligible_features_from_example_handler(self, request):
"""Returns a list of JSON objects for each feature in the example.
Args:
request: A request for features.
Returns:
A list with a JSON object for each feature.
Numeric features are represented as {name: observedMin: observedMax:}.
Categorical features are repesented as {name: samples:[]}.
"""
features_list = inference_utils.get_eligible_features(
self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
return http_util.Respond(request, features_list, 'application/json')
@wrappers.Request.application
def _infer_mutants_handler(self, request):
"""Returns JSON for the `vz-line-chart`s for a feature.
Args:
request: A request that should contain 'feature_name', 'example_index',
'inference_address', 'model_name', 'model_type', 'model_version', and
'model_signature'.
Returns:
A list of JSON objects, one for each chart.
"""
try:
if request.method != 'GET':
logger.error('%s requests are forbidden.', request.method)
return http_util.Respond(request, {'error': 'invalid non-GET request'},
'application/json', code=405)
example_index = int(request.args.get('example_index', '0'))
feature_name = request.args.get('feature_name')
examples = (self.examples if example_index == -1
else [self.examples[example_index]])
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
serving_bundles = []
for model_num in xrange(len(inference_addresses)):
serving_bundles.append(inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
request.args.get('model_type'),
model_versions[model_num],
model_signatures[model_num],
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor')))
viz_params = inference_utils.VizParams(
request.args.get('x_min'), request.args.get('x_max'),
self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
request.args.get('feature_index_pattern'))
json_mapping = inference_utils.mutant_charts_for_feature(
examples, feature_name, serving_bundles, viz_params)
return http_util.Respond(request, json_mapping, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, {'error': e.message},
'application/json', code=400)
|
the-stack_106_21899
|
import os
from typing import Callable, Optional, Dict
import dill
from easypl.datasets.base import PathBaseDataset
class DirDatasetClassification(PathBaseDataset):
"""
Dataset implementation for images in directory on disk (stored images paths in RAM).
Require root_path/.../image_path structure.
Attributes
----------
root_path: str
path of directory with images
transform: Optional
albumentations transform or None
return_label: bool
if True return (image, label), else return only image
label_parser: Callable
function for parsing label from relative path
"""
def __init__(
self,
root_path: str,
label_parser: Callable,
transform: Optional = None,
return_label: bool = True
):
super().__init__(image_prefix=root_path, transform=transform)
self.return_label = return_label
self.label_parser = label_parser
self.image_paths = []
self.labels = []
self.__load(root_path)
def __get_label(self, path):
if self.label_parser is not None:
return dill.loads(self.label_parser)(path)
def __load(self, root_path):
for root, _, files in os.walk(root_path):
for file_name in files:
self.image_paths.append(file_name)
if self.return_label:
self.labels.append(self.__get_label(file_name))
def __len__(
self
) -> int:
"""
Return length of dataset
Returns
-------
int
"""
return len(self.image_paths)
def __getitem__(
self,
idx: int
) -> Dict:
"""
Read object of dataset by index
Attributes
----------
idx: int
index of object in dataset
Returns
-------
Dict
{"image": ...} or {"image": ..., "target": ...}
"""
image_path = self.image_paths[idx]
image = self._read_image(image_path)
if not self.return_label:
return {
'image': image
}
label = self.labels[idx]
return {
'image': image,
'target': label
}
|
the-stack_106_21900
|
# Calculate weignted average of coins from coinmarketcap.com
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
from influxdb import InfluxDBClient
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
parameters = {
'start':'1',
'limit':'10',
'convert':'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': 'd9cada5c-a4cd-452f-bcc7-8d96559720d7',
}
session = Session()
session.headers.update(headers)
client = InfluxDBClient('localhost', 8086, 'root', 'root', 'Market Cap Weighted Average')
client.create_database('Market Cap Weighted Average')
try:
response = session.get(url, params=parameters)
coin_data = json.loads(response.text)
json_string = json.dumps( coin_data, indent =2)
#print(json_string)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
exit()
print()
total_market_cap=0
marketCapWeight = []
for i in range(0,10):
total_market_cap+=int(coin_data["data"][i]["quote"]["USD"]["market_cap"])
for i in range(0,10):
marketCapWeight.append(int(coin_data["data"][i]["quote"]["USD"]["market_cap"]) / total_market_cap)
json_body = [
{
"measurement": coin_data['data'][i]['name'],
"time": coin_data["status"]["timestamp"] ,
"fields": {
"Coin Weight": marketCapWeight[i]
}
}
]
client.write_points(json_body)
query_string = "select * from " + coin_data['data'][0]['name'] + ';'
ans = list(client.query(query_string))
print(ans)
result = []
for i in range(0,10):
str = 'hel'
# cpu_points = list(ans.get_points(measurement = coin_data['data'][i]['name']))
# json_string = json.dumps( cpu_points, indent =2)
# # result.append(json_string)
print(result)
|
the-stack_106_21901
|
import numpy as np
import pdb
def muscleVolumeCalculator(subjectHeight,subjectMass):
# This function calculates subject's muscle volume based on its height (in meters) and mass (in kg).
# Import OpenSim Libraries
# import org.opensim.modeling.*
# filename = 'muscleData.xlsx';
# [pathstr,~,~] = fileparts(mfilename('fullpath'));
# subjectHeight = 1.706; %input('Enter subject height in [m]: ');
# subjectMass = 76.2; %input('Enter subject mass in [kg]: ');
# [~,~,~,~,~,~] = importMuscleData([pathstr,'\',filename]);
# Raw data from Handsfield (2014)
# DO NOT CHANGE ANYTHING, UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING!!!
name = ['gluteus maximus','adductor magnus','gluteus medius','psoas','iliacus',
'sartorius','adductor longus','gluteus minimus','adductor brevis','gracilis',
'pectineus','tensor fasciae latae','obturator externus','piriformis',
'quadratus femoris','obturator internus','small ext. rotators','vastus lateralis',
'vastus medialis','vastus intermedius','rectus femoris','semimembranosus',
'biceps femoris: l.h.','semitendinosus','biceps femoris: s.h.','popliteus',
'soleus','med gastrocnemius','lat gastrocnemius','tibialis anterior',
'peroneals (brev/long)','tibialis posterior','extensors (EDL/EHL)',
'flexor hallucis longus','flexor digit. longus']
osimAbbr = ['glut_max','add_mag','glut_med','psoas','iliacus',
'sar','add_long','glut_min','add_brev','grac',
'pect','tfl', '','peri','quad_fem', '', '',
'vas_lat','vas_med','vas_int',
'rect_fem','semimem','bifemlh','semiten','bifemsh', '',
'soleus','med_gas','lat_gas','tib_ant',
'per_','tib_post','ext_','flex_hal','flex_dig']
b1 = np.array([0.123,0.0793,0.0478,0.055,0.0248,0.0256,0.0259,0.0129,0.0137,0.0138,0.0107,0.0136,0.00349,
0.00372,0.00475,0.00252,0.00172,0.125,0.0631,0.0273,0.0371,0.0319,0.0256,0.0285,0.016,0.00298,
0.0507,0.0348,0.0199,0.0161,0.0194,0.0104,0.0132,0.0137,0.00259])
b2 = np.array([-25.4,-4.7,-16.9,-117,0.383,-18.2,-21.9,12.6,6.2,6.07,-9.94,-31.5,28.3,16.2,-1.45,8.68,
3.85,-55.7,-16.3,76.5,4.97,18.2,24.3,-16.8,-13.8,2.11,78.2,9.42,8.21,20.3,-7.43,30.8,
8.7,-18.9,11.6])
# Currently not used. PCSA and pennation angle are read from the OpenSim model
# PCSA = [46.8;45.5;45.6;16.5;12.4;3.4;15.4;8.2;9.7;4.7;5.1;4;5.4;4.7;4.4;3.3;2;59.3;59.1;39;...
# 34.8;37.8;25.9;9.3;7.8;2.5;124.1;50.1;23;15.8;19.3;28.4;10.2;16.9;7.5];
#
# penAngle = [21.9;15.5;20.5;10.6;14.3;1.3;7.1;0;6.1;8.2;0;0;0;0;0;0;0;18.4;29.6;4.5;0;15.1;11.6;...
# 12.9;12.3;0;28.3;9.9;12;9.6;0;13.7;10;16.9;13.6];
# Calculate muscle volumes.
V_m_tot = 47 * subjectHeight * subjectMass + 1285
muscleVolume = b1 * V_m_tot + b2
return osimAbbr, muscleVolume
|
the-stack_106_21902
|
import os
import pprint
import argparse
import wandb
import torch
import numpy as np
from dataset import ShapeNet15k
from models.networks import SetTransformer, Generator
from models.pct import PCT
from trainers.losses import DSMLoss, AnnealedDSMLoss
from trainers.samplers import LangevinSampler, AnnealedLangevinSampler
from trainers.base import Trainer
def parse_args():
root_dir = os.path.abspath(os.path.dirname(__file__))
parser = argparse.ArgumentParser()
# Environment settings
parser.add_argument(
"--data_dir",
type=str,
default=os.path.join(root_dir, "data"),
help="Path to dataset directory.",
)
parser.add_argument(
"--ckpt_dir",
type=str,
default=os.path.join(root_dir, "checkpoints"),
help=(
"Path to checkpoint directory. "
"A new one will be created if the directory does not exist."
),
)
parser.add_argument(
"--name",
type=str,
required=True,
help=(
"Name of the current experiment. "
"Checkpoints will be stored in '{ckpt_dir}/{name}/'. "
"A new one will be created if the directory does not exist."
),
)
# Training settings
parser.add_argument(
"--seed", type=int, default=0, help="Manual seed for reproducibility."
)
parser.add_argument(
"--cate", type=str, default="airplane", help="ShapeNet15k category."
)
parser.add_argument(
"--resume",
default=False,
action="store_true",
help="Resumes training using the last checkpoint in ckpt_dir.",
)
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Minibatch size used during training and testing.",
)
parser.add_argument(
"--tr_sample_size",
type=int,
default=2048,
help="Number of points sampled from each training sample.",
)
parser.add_argument(
"--te_sample_size",
type=int,
default=1024,
help="Number of points sampled from each testing sample.",
)
parser.add_argument(
"--max_epoch", type=int, default=2000, help="Total training epoch."
)
parser.add_argument(
"--log_every_n_step",
type=int,
default=20,
help="Trigger logger at every N step.",
)
parser.add_argument(
"--val_every_n_epoch",
type=int,
default=50,
help="Validate model at every N epoch.",
)
parser.add_argument(
"--ckpt_every_n_epoch",
type=int,
default=200,
help="Checkpoint trainer at every N epoch.",
)
parser.add_argument(
"--device",
type=str,
default=("cuda:0" if torch.cuda.is_available() else "cpu"),
help="Accelerator to use.",
)
return parser.parse_args()
def main(args):
"""
Training entry point.
"""
# Print args
pprint.pprint(vars(args))
# Fix seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Setup checkpoint directory
if not os.path.exists(args.ckpt_dir):
os.mkdir(args.ckpt_dir)
ckpt_subdir = os.path.join(args.ckpt_dir, args.name)
if not os.path.exists(ckpt_subdir):
os.mkdir(ckpt_subdir)
# Setup logging
wandb.init(project="setdiffusion")
# Setup dataloaders
train_loader = torch.utils.data.DataLoader(
dataset=ShapeNet15k(
root=args.data_dir,
cate=args.cate,
split="train",
random_sample=True,
sample_size=args.tr_sample_size,
),
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=True,
drop_last=True,
)
val_loader = torch.utils.data.DataLoader(
dataset=ShapeNet15k(
root=args.data_dir,
cate=args.cate,
split="val",
random_sample=False,
sample_size=args.te_sample_size,
),
batch_size=args.batch_size,
shuffle=False,
num_workers=2,
pin_memory=True,
drop_last=False,
)
# Setup model, loss, optimizer and scheduler
#net = SetTransformer(dim_input=4)
net = Generator()
loss = AnnealedDSMLoss()
sample = AnnealedLangevinSampler(loss.sigmas)
opt = torch.optim.Adam(net.parameters(), lr=1e-4, betas=(0.9, 0.999))
sch = torch.optim.lr_scheduler.LambdaLR(
opt, lr_lambda=lambda e: 1.0 - max(0, (e / args.max_epoch) - 0.5)
)
# Setup trainer
trainer = Trainer(
net=net,
loss=loss,
sample=sample,
opt=opt,
sch=sch,
device=args.device,
batch_size=args.batch_size,
max_epoch=args.max_epoch,
log_every_n_step=args.log_every_n_step,
val_every_n_epoch=args.val_every_n_epoch,
ckpt_every_n_epoch=args.ckpt_every_n_epoch,
ckpt_dir=ckpt_subdir,
)
# Load checkpoint
if args.resume:
trainer.load_checkpoint()
# Start training
trainer.train(train_loader, val_loader)
if __name__ == "__main__":
main(parse_args())
|
the-stack_106_21905
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Bart model."""
import random
from typing import Dict, Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
TFSharedEmbeddings,
TFWrappedEmbeddings,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_bart import BartConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/bart-large"
_CONFIG_FOR_DOC = "BartConfig"
_TOKENIZER_FOR_DOC = "BartTokenizer"
LARGE_NEGATIVE = -1e8
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
class TFBartLearnedPositionalEmbedding(TFSharedEmbeddings):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return super().call(positions + self.offset)
class TFBartAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training=False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
class TFBartEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFBartAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False):
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`tf.Tensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*
"""
residual = hidden_states
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states, self_attn_weights
class TFBartDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFBartAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFBartAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training=False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`tf.Tensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`tf.Tensor`):
cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
*(decoder_attention_heads,)*
cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
*(decoder_attention_heads,)*
past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFBartPretrainedModel(TFPreTrainedModel):
config_class = BartConfig
base_model_prefix = "model"
@property
def dummy_inputs(self):
pad_token = 1
input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
dummy_inputs = {
"decoder_input_ids": decoder_input_ids,
"attention_mask": tf.math.not_equal(input_ids, pad_token),
"input_ids": input_ids,
}
return dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
BART_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`BartConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
BART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import BartTokenizer, TFBartForConditionalGeneration, BartConfig
>>> model = TFBartForConditionalGeneration.from_pretrained('facebook/bart-large') >>> tokenizer =
BartTokenizer.from_pretrained('facebook/bart-large')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs =
tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='tf')
>>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5,
early_stopping=True) >>> print([tokenizer.decode(g, skip_special_tokens=True,
clean_up_tokenization_spaces=False) for g in summary_ids])
Mask filling example::
>>> from transformers import BartTokenizer, TFBartForConditionalGeneration >>> tokenizer =
BartTokenizer.from_pretrained('facebook/bart-large') >>> TXT = "My friends are <mask> but they eat too many
carbs."
>>> model = TFBartForConditionalGeneration.from_pretrained('facebook/bart-large') >>> input_ids =
tokenizer([TXT], return_tensors='tf')['input_ids'] >>> logits = model(input_ids).logits >>> probs =
tf.nn.softmax(logits[0]) >>> # probs[5] is associated with the mask token
"""
BART_INPUTS_DOCSTRING = r"""
Args:
input_ids (`tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tf.FloatTensor`, *optional*):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`). Set to `False` during training, `True` during generation
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFBartEncoder(tf.keras.layers.Layer):
config_class = BartConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TFBartEncoderLayer`].
Args:
config: BartConfig
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
self.embed_positions = TFBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.layers = [TFBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs["inputs_embeds"] + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout(hidden_states, training=inputs["training"])
# check attention mask and invert
if inputs["attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(inputs["attention_mask"])
else:
attention_mask = None
encoder_states = () if inputs["output_hidden_states"] else None
all_attentions = () if inputs["output_attentions"] else None
# check if head_mask has a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if inputs["head_mask"] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs["head_mask"])[0],
len(self.layers),
message=f"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs['head_mask'])[0]}.",
)
# encoder layers
for idx, encoder_layer in enumerate(self.layers):
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
)
if inputs["output_attentions"]:
all_attentions += (attn,)
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
if not inputs["return_dict"]:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFBartDecoder(tf.keras.layers.Layer):
config_class = BartConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBartDecoderLayer`]
Args:
config: BartConfig
embed_tokens: output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.padding_idx = config.pad_token_id
self.embed_tokens = embed_tokens
self.layerdrop = config.decoder_layerdrop
self.embed_positions = TFBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.layers = [TFBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all ``decoder_input_ids``` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = (
shape_list(inputs["past_key_values"][0][0])[2] if inputs["past_key_values"] is not None else 0
)
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
hidden_states = inputs["inputs_embeds"]
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if inputs["attention_mask"] is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(
inputs["attention_mask"], tgt_len=input_shape[-1]
)
if inputs["encoder_hidden_states"] is not None and inputs["encoder_attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
inputs["encoder_attention_mask"] = _expand_mask(inputs["encoder_attention_mask"], tgt_len=input_shape[-1])
hidden_states = self.layernorm_embedding(hidden_states + positions)
hidden_states = self.dropout(hidden_states, training=inputs["training"])
# decoder layers
all_hidden_states = () if inputs["output_hidden_states"] else None
all_self_attns = () if inputs["output_attentions"] else None
all_cross_attns = () if (inputs["output_attentions"] and inputs["encoder_hidden_states"] is not None) else None
present_key_values = () if inputs["use_cache"] else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
for attn_mask in ["head_mask", "cross_attn_head_mask"]:
if inputs[attn_mask] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs[attn_mask])[0],
len(self.layers),
message=f"The {attn_mask} should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs[attn_mask])[0]}.",
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop):
continue
past_key_value = inputs["past_key_values"][idx] if inputs["past_key_values"] is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=inputs["encoder_hidden_states"],
encoder_attention_mask=inputs["encoder_attention_mask"],
layer_head_mask=inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
cross_attn_layer_head_mask=inputs["cross_attn_head_mask"][idx]
if inputs["cross_attn_head_mask"] is not None
else None,
past_key_value=past_key_value,
)
if inputs["use_cache"]:
present_key_values += (present_key_value,)
if inputs["output_attentions"]:
all_self_attns += (layer_self_attn,)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns += (layer_cross_attn,)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
if inputs["output_attentions"]:
all_self_attns = list(all_self_attns)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns = list(all_cross_attns)
if inputs["use_cache"]:
present_key_values = (inputs["encoder_hidden_states"], present_key_values)
if not inputs["return_dict"]:
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFBartMainLayer(tf.keras.layers.Layer):
config_class = BartConfig
def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared")
# set tf scope correctly
if load_weight_prefix is None:
load_weight_prefix = "model.shared"
with tf.compat.v1.variable_scope(load_weight_prefix) as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
embed_tokens.vocab_size = self.shared.vocab_size
embed_tokens.hidden_size = self.shared.hidden_size
self.encoder = TFBartEncoder(config, embed_tokens, name="encoder")
self.decoder = TFBartDecoder(config, embed_tokens, name="decoder")
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared.weight = new_embeddings
self.shared.vocab_size = self.shared.weight.shape[0]
# retrieve correct absolute scope for embed token wrapper
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
self.encoder.set_embed_tokens(embed_tokens)
self.decoder.set_embed_tokens(embed_tokens)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["decoder_input_ids"] is None and inputs["decoder_inputs_embeds"] is None:
inputs["use_cache"] = False
inputs["output_hidden_states"] = (
inputs["output_hidden_states"]
if inputs["output_hidden_states"] is not None
else self.config.output_hidden_states
)
if inputs["decoder_input_ids"] is None and inputs["input_ids"] is not None:
inputs["decoder_input_ids"] = shift_tokens_right(
inputs["input_ids"], self.config.pad_token_id, self.config.decoder_start_token_id
)
if inputs["encoder_outputs"] is None:
inputs["encoder_outputs"] = self.encoder(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], TFBaseModelOutput):
inputs["encoder_outputs"] = TFBaseModelOutput(
last_hidden_state=inputs["encoder_outputs"][0],
hidden_states=inputs["encoder_outputs"][1] if len(inputs["encoder_outputs"]) > 1 else None,
attentions=inputs["encoder_outputs"][2] if len(inputs["encoder_outputs"]) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], tuple):
inputs["encoder_outputs"] = inputs["encoder_outputs"].to_tuple()
decoder_outputs = self.decoder(
inputs["decoder_input_ids"],
attention_mask=inputs["decoder_attention_mask"],
encoder_hidden_states=inputs["encoder_outputs"][0],
encoder_attention_mask=inputs["attention_mask"],
head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
if not inputs["return_dict"]:
return decoder_outputs + inputs["encoder_outputs"]
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=inputs["encoder_outputs"].last_hidden_state,
encoder_hidden_states=inputs["encoder_outputs"].hidden_states,
encoder_attentions=inputs["encoder_outputs"].attentions,
)
@add_start_docstrings(
"The bare BART Model outputting raw hidden-states without any specific head on top.",
BART_START_DOCSTRING,
)
class TFBartModel(TFBartPretrainedModel):
_requires_load_weight_prefix = True
def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
encoder_outputs=inputs["encoder_outputs"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
@add_start_docstrings(
"The BART Model with a language modeling head. Can be used for summarization.",
BART_START_DOCSTRING,
)
class TFBartForConditionalGeneration(TFBartPretrainedModel, TFCausalLanguageModelingLoss):
_keys_to_ignore_on_load_unexpected = [
r"model.encoder.embed_tokens.weight",
r"model.decoder.embed_tokens.weight",
]
_requires_load_weight_prefix = True
def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model")
self.use_cache = config.use_cache
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.
self.final_logits_bias = self.add_weight(
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
)
def get_decoder(self):
return self.model.decoder
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
def get_bias(self):
return {"final_logits_bias": self.final_logits_bias}
def set_bias(self, value):
self.final_logits_bias = value["final_logits_bias"]
@add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BART_GENERATION_EXAMPLE)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["labels"] is not None:
inputs["labels"] = tf.where(
inputs["labels"] == self.config.pad_token_id,
tf.fill(shape_list(inputs["labels"]), -100),
inputs["labels"],
)
inputs["use_cache"] = False
if inputs["decoder_input_ids"] is None:
inputs["decoder_input_ids"] = shift_tokens_right(
inputs["labels"], self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
encoder_outputs=inputs["encoder_outputs"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
lm_logits = self.model.shared(outputs[0], mode="linear")
lm_logits = lm_logits + self.final_logits_bias
masked_lm_loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], lm_logits)
if not inputs["return_dict"]:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values, # index 1 of d outputs
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
encoder_attentions=outputs.encoder_attentions, # 2 of e out
)
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past,
attention_mask,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
**kwargs,
) -> Dict:
assert past is not None and len(past) in {1, 2}, f"past has to be an iterable of length 1,2 got {past}"
if len(past) == 1:
assert isinstance(past[0], tf.Tensor), f"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0])
past_key_values = None
else:
assert (
len(past) == 2
), "`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position."
encoder_outputs, past_key_values = past
if isinstance(encoder_outputs, tuple):
assert isinstance(
encoder_outputs[0], tf.Tensor
), f"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0])
elif isinstance(encoder_outputs, tf.Tensor):
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs)
assert (
past_key_values
), f"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past"
decoder_input_ids = decoder_input_ids[:, -1:]
assert isinstance(
encoder_outputs, TFBaseModelOutput
), f"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}."
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past, beam_idx):
if len(past) == 1:
return past
past_key_values = past[1]
reordered_past = ()
for layer_past_key_values in past_key_values:
reordered_past += (
tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values[:2])
+ layer_past_key_values[2:],
)
return (past[0], reordered_past)
|
the-stack_106_21908
|
import numpy
import six
from chainer import cuda
def get_conv_outsize(size, k, s, p, cover_all=False):
if cover_all:
return (size + p * 2 - k + s - 1) // s + 1
else:
return (size + p * 2 - k) // s + 1
def get_deconv_outsize(size, k, s, p, cover_all=False):
if cover_all:
return s * (size - 1) + k - s + 1 - 2 * p
else:
return s * (size - 1) + k - 2 * p
def im2col_cpu(img, kh, kw, sy, sx, ph, pw, pval=0, cover_all=False):
n, c, h, w = img.shape
out_h = get_conv_outsize(h, kh, sy, ph, cover_all)
out_w = get_conv_outsize(w, kw, sx, pw, cover_all)
img = numpy.pad(img,
((0, 0), (0, 0), (ph, ph + sy - 1), (pw, pw + sx - 1)),
mode='constant', constant_values=(pval,))
col = numpy.ndarray((n, c, kh, kw, out_h, out_w), dtype=img.dtype)
for i in six.moves.range(kh):
i_lim = i + sy * out_h
for j in six.moves.range(kw):
j_lim = j + sx * out_w
col[:, :, i, j, :, :] = img[:, :, i:i_lim:sy, j:j_lim:sx]
return col
def im2col_gpu(img, kh, kw, sy, sx, ph, pw, cover_all=False):
n, c, h, w = img.shape
out_h = get_conv_outsize(h, kh, sy, ph, cover_all)
out_w = get_conv_outsize(w, kw, sx, pw, cover_all)
col = cuda.cupy.empty((n, c, kh, kw, out_h, out_w), dtype=img.dtype)
cuda.elementwise(
'raw T img, int32 h, int32 w, int32 out_h, int32 out_w,'
'int32 kh, int32 kw, int32 sy, int32 sx, int32 ph, int32 pw',
'T col',
'''
int c0 = i / (kh * kw * out_h * out_w);
int ky = i / (kw * out_h * out_w) % kh;
int kx = i / (out_h * out_w) % kw;
int out_y = i / out_w % out_h;
int out_x = i % out_w;
int in_y = ky + out_y * sy - ph;
int in_x = kx + out_x * sx - pw;
if (in_y >= 0 && in_y < h && in_x >= 0 && in_x < w) {
col = img[in_x + w * (in_y + h * c0)];
} else {
col = 0;
}
''',
'im2col')(img.reduced_view(),
h, w, out_h, out_w, kh, kw, sy, sx, ph, pw, col)
return col
def col2im_cpu(col, sy, sx, ph, pw, h, w):
n, c, kh, kw, out_h, out_w = col.shape
img = numpy.zeros((n, c, h + 2 * ph + sy - 1, w + 2 * pw + sx - 1),
dtype=col.dtype)
for i in six.moves.range(kh):
i_lim = i + sy * out_h
for j in six.moves.range(kw):
j_lim = j + sx * out_w
img[:, :, i:i_lim:sy, j:j_lim:sx] += col[:, :, i, j, :, :]
return img[:, :, ph:h + ph, pw:w + pw]
def col2im_gpu(col, sy, sx, ph, pw, h, w):
n, c, kh, kw, out_h, out_w = col.shape
img = cuda.cupy.empty((n, c, h, w), dtype=col.dtype)
cuda.elementwise(
'raw T col, int32 h, int32 w, int32 out_h, int32 out_w,'
'int32 kh, int32 kw, int32 sy, int32 sx, int32 ph, int32 pw',
'T img',
'''
int c0 = i / (h * w);
int y = i / w % h + ph;
int x = i % w + pw;
int out_y_0 = max(0, (y - kh + sy) / sy);
int out_y_1 = min(out_h, (y + sy) / sy);
int out_x_0 = max(0, (x - kw + sx) / sx);
int out_x_1 = min(out_w, (x + sx) / sx);
T val = 0;
for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {
int ky = y - out_y * sy;
for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {
int kx = x - out_x * sx;
int k = out_y + out_h * (kx + kw * (ky + kh * c0));
val = val + col[out_x + out_w * k];
}
}
img = val;
''',
'col2im')(col.reduced_view(),
h, w, out_h, out_w, kh, kw, sy, sx, ph, pw, img)
return img
|
the-stack_106_21910
|
# Copyright (c) 2018, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import platform
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
from ... import detect
from ..host_tests_logger import HtrunLogger
class HostTestPluginBase:
"""! Base class for all plugins used with host tests
"""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, CopyMethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
required_parameters = [] # Parameters required for 'kwargs' in plugin APIs: e.g. self.execute()
stable = False # Determine if plugin is stable and can be used
def __init__(self):
""" ctor
"""
# Setting Host Test Logger instance
ht_loggers = {
'BasePlugin' : HtrunLogger('PLGN'),
'CopyMethod' : HtrunLogger('COPY'),
'ResetMethod' : HtrunLogger('REST'),
}
self.plugin_logger = ht_loggers.get(self.type, ht_loggers['BasePlugin'])
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return False
def execute(self, capability, *args, **kwargs):
"""! Executes capability by name
@param capability Capability name
@param args Additional arguments
@param kwargs Additional arguments
@details Each capability e.g. may directly just call some command line program or execute building pythonic function
@return Capability call return value
"""
return False
def is_os_supported(self, os_name=None):
"""!
@return Returns true if plugin works (supportes) under certain OS
@os_name String describing OS.
See self.mbed_os_support() and self.mbed_os_info()
@details In some cases a plugin will not work under particular OS
mainly because command / software used to implement plugin
functionality is not available e.g. on MacOS or Linux.
"""
return True
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
"""! Function prints error in console and exits always with False
@param text Text to print
"""
self.plugin_logger.prn_err(text)
return False
def print_plugin_info(self, text, NL=True):
"""! Function prints notification in console and exits always with True
@param text Text to print
@param NL Deprecated! Newline will be added behind text if this flag is True
"""
self.plugin_logger.prn_inf(text)
return True
def print_plugin_char(self, char):
""" Function prints char on stdout
"""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25, target_id=None, timeout=60):
"""! Waits until destination_disk is ready and can be accessed by e.g. copy commands
@return True if mount point was ready in given time, False otherwise
@param destination_disk Mount point (disk) which will be checked for readiness
@param init_delay - Initial delay time before first access check
@param loop_delay - polling delay for access check
@param timeout Mount point pooling timeout in seconds
"""
if target_id:
# Wait for mount point to appear with mbed-ls
# and if it does check if mount point for target_id changed
# If mount point changed, use new mount point and check if its ready (os.access)
new_destination_disk = destination_disk
# Sometimes OSes take a long time to mount devices (up to one minute).
# Current pooling time: 120x 500ms = 1 minute
self.print_plugin_info("Waiting up to %d sec for '%s' mount point (current is '%s')..."% (timeout, target_id, destination_disk))
timeout_step = 0.5
timeout = int(timeout / timeout_step)
for i in range(timeout):
# mbed_os_tools.detect.create() should be done inside the loop.
# Otherwise it will loop on same data.
mbeds = detect.create()
mbed_list = mbeds.list_mbeds() #list of mbeds present
# get first item in list with a matching target_id, if present
mbed_target = next((x for x in mbed_list if x['target_id']==target_id), None)
if mbed_target is not None:
# Only assign if mount point is present and known (not None)
if 'mount_point' in mbed_target and mbed_target['mount_point'] is not None:
new_destination_disk = mbed_target['mount_point']
break
sleep(timeout_step)
if new_destination_disk != destination_disk:
# Mount point changed, update to new mount point from mbed-ls
self.print_plugin_info("Mount point for '%s' changed from '%s' to '%s'..."% (target_id, destination_disk, new_destination_disk))
destination_disk = new_destination_disk
result = True
# Check if mount point we've promoted to be valid one (by optional target_id check above)
# Let's wait for 30 * loop_delay + init_delay max
if not access(destination_disk, F_OK):
self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
sleep(init_delay)
for i in range(30):
if access(destination_disk, F_OK):
result = True
break
sleep(loop_delay)
self.print_plugin_char('.')
else:
self.print_plugin_error("mount {} is not accessible ...".format(destination_disk))
result = False
return (result, destination_disk)
def check_serial_port_ready(self, serial_port, target_id=None, timeout=60):
"""! Function checks (using mbed-ls) and updates serial port name information for DUT with specified target_id.
If no target_id is specified function returns old serial port name.
@param serial_port Current serial port name
@param target_id Target ID of a device under test which serial port will be checked and updated if needed
@param timeout Serial port pooling timeout in seconds
@return Tuple with result (always True) and serial port read from mbed-ls
"""
# If serial port changed (check using mbed-ls), use new serial port
new_serial_port = None
if target_id:
# Sometimes OSes take a long time to mount devices (up to one minute).
# Current pooling time: 120x 500ms = 1 minute
self.print_plugin_info("Waiting up to %d sec for '%s' serial port (current is '%s')..."% (timeout, target_id, serial_port))
timeout_step = 0.5
timeout = int(timeout / timeout_step)
for i in range(timeout):
# mbed_os_tools.detect.create() should be done inside the loop. Otherwise it will loop on same data.
mbeds = detect.create()
mbed_list = mbeds.list_mbeds() #list of mbeds present
# get first item in list with a matching target_id, if present
mbed_target = next((x for x in mbed_list if x['target_id']==target_id), None)
if mbed_target is not None:
# Only assign if serial port is present and known (not None)
if 'serial_port' in mbed_target and mbed_target['serial_port'] is not None:
new_serial_port = mbed_target['serial_port']
if new_serial_port != serial_port:
# Serial port changed, update to new serial port from mbed-ls
self.print_plugin_info("Serial port for tid='%s' changed from '%s' to '%s'..." % (target_id, serial_port, new_serial_port))
break
sleep(timeout_step)
else:
new_serial_port = serial_port
return new_serial_port
def check_parameters(self, capability, *args, **kwargs):
"""! This function should be ran each time we call execute() to check if none of the required parameters is missing
@param capability Capability name
@param args Additional parameters
@param kwargs Additional parameters
@return Returns True if all parameters are passed to plugin, else return False
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters):
self.print_plugin_error("execute parameter(s) '%s' missing!"% (', '.join(missing_parameters)))
return False
return True
def run_command(self, cmd, shell=True):
"""! Runs command from command line.
@param cmd Command to execute
@param shell True if shell command should be executed (eg. ls, ps)
@details Function prints 'cmd' return code if execution failed
@return True if command successfully executed
"""
result = True
try:
ret = call(cmd, shell=shell)
if ret:
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
self.print_plugin_error(str(e))
return result
def mbed_os_info(self):
"""! Returns information about host OS
@return Returns tuple with information about OS and host platform
"""
result = (os.name,
platform.system(),
platform.release(),
platform.version(),
sys.platform)
return result
def mbed_os_support(self):
"""! Function used to determine host OS
@return Returns None if host OS is unknown, else string with name
@details This function should be ported for new OS support
"""
result = None
os_info = self.mbed_os_info()
if (os_info[0] == 'nt' and os_info[1] == 'Windows'):
result = 'Windows7'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux' and ('Ubuntu' in os_info[3])):
result = 'Ubuntu'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux'):
result = 'LinuxGeneric'
elif (os_info[0] == 'posix' and os_info[1] == 'Darwin'):
result = 'Darwin'
return result
|
the-stack_106_21914
|
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from calendarserver.push.ipush import PushPriority
from calendarserver.push.util import PushScheduler
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.protocol import Factory, ServerFactory
from twisted.protocols import amp
import time
import uuid
log = Logger()
# Control socket message-routing constants
PUSH_ROUTE = "push"
# AMP Commands sent to server
class SubscribeToID(amp.Command):
arguments = [('token', amp.String()), ('id', amp.String())]
response = [('status', amp.String())]
class UnsubscribeFromID(amp.Command):
arguments = [('id', amp.String())]
response = [('status', amp.String())]
# AMP Commands sent to client (and forwarded to Master)
class NotificationForID(amp.Command):
arguments = [('id', amp.String()),
('dataChangedTimestamp', amp.Integer(optional=True)),
('priority', amp.Integer(optional=True))]
response = [('status', amp.String())]
# Server classes
class AMPPushForwardingFactory(Factory):
log = Logger()
def __init__(self, forwarder):
self.forwarder = forwarder
def buildProtocol(self, addr):
protocol = amp.AMP()
self.forwarder.protocols.append(protocol)
return protocol
class AMPPushForwarder(object):
"""
Runs in the slaves, forwards notifications to the master via AMP
"""
log = Logger()
def __init__(self, controlSocket):
self.protocols = []
controlSocket.addFactory(PUSH_ROUTE, AMPPushForwardingFactory(self))
@inlineCallbacks
def enqueue(
self, transaction, id, dataChangedTimestamp=None,
priority=PushPriority.high
):
if dataChangedTimestamp is None:
dataChangedTimestamp = int(time.time())
for protocol in self.protocols:
yield protocol.callRemote(
NotificationForID, id=id,
dataChangedTimestamp=dataChangedTimestamp,
priority=priority.value)
class AMPPushMasterListeningProtocol(amp.AMP):
"""
Listens for notifications coming in over AMP from the slaves
"""
log = Logger()
def __init__(self, master):
super(AMPPushMasterListeningProtocol, self).__init__()
self.master = master
@NotificationForID.responder
def enqueueFromWorker(
self, id, dataChangedTimestamp=None,
priority=PushPriority.high.value
):
if dataChangedTimestamp is None:
dataChangedTimestamp = int(time.time())
self.master.enqueue(
None, id, dataChangedTimestamp=dataChangedTimestamp,
priority=PushPriority.lookupByValue(priority))
return {"status": "OK"}
class AMPPushMasterListenerFactory(Factory):
log = Logger()
def __init__(self, master):
self.master = master
def buildProtocol(self, addr):
protocol = AMPPushMasterListeningProtocol(self.master)
return protocol
class AMPPushMaster(object):
"""
AMPPushNotifierService allows clients to use AMP to subscribe to,
and receive, change notifications.
"""
log = Logger()
def __init__(
self, controlSocket, parentService, port, enableStaggering,
staggerSeconds, reactor=None
):
if reactor is None:
from twisted.internet import reactor
from twisted.application.strports import service as strPortsService
if port:
# Service which listens for client subscriptions and sends
# notifications to them
strPortsService(
str(port), AMPPushNotifierFactory(self),
reactor=reactor).setServiceParent(parentService)
if controlSocket is not None:
# Set up the listener which gets notifications from the slaves
controlSocket.addFactory(
PUSH_ROUTE, AMPPushMasterListenerFactory(self)
)
self.subscribers = []
if enableStaggering:
self.scheduler = PushScheduler(
reactor, self.sendNotification,
staggerSeconds=staggerSeconds)
else:
self.scheduler = None
def addSubscriber(self, p):
self.log.debug("Added subscriber")
self.subscribers.append(p)
def removeSubscriber(self, p):
self.log.debug("Removed subscriber")
self.subscribers.remove(p)
def enqueue(
self, transaction, pushKey, dataChangedTimestamp=None,
priority=PushPriority.high
):
"""
Sends an AMP push notification to any clients subscribing to this pushKey.
@param pushKey: The identifier of the resource that was updated, including
a prefix indicating whether this is CalDAV or CardDAV related.
"/CalDAV/abc/def/"
@type pushKey: C{str}
@param dataChangedTimestamp: Timestamp (epoch seconds) for the data change
which triggered this notification (Only used for unit tests)
@type key: C{int}
"""
# Unit tests can pass this value in; otherwise it defaults to now
if dataChangedTimestamp is None:
dataChangedTimestamp = int(time.time())
tokens = []
for subscriber in self.subscribers:
token = subscriber.subscribedToID(pushKey)
if token is not None:
tokens.append(token)
if tokens:
return self.scheduleNotifications(
tokens, pushKey,
dataChangedTimestamp, priority)
@inlineCallbacks
def sendNotification(self, token, id, dataChangedTimestamp, priority):
for subscriber in self.subscribers:
if subscriber.subscribedToID(id):
yield subscriber.notify(
token, id, dataChangedTimestamp,
priority)
@inlineCallbacks
def scheduleNotifications(self, tokens, id, dataChangedTimestamp, priority):
if self.scheduler is not None:
self.scheduler.schedule(tokens, id, dataChangedTimestamp, priority)
else:
for token in tokens:
yield self.sendNotification(
token, id, dataChangedTimestamp,
priority)
class AMPPushNotifierProtocol(amp.AMP):
log = Logger()
def __init__(self, service):
super(AMPPushNotifierProtocol, self).__init__()
self.service = service
self.subscriptions = {}
self.any = None
def subscribe(self, token, id):
if id == "any":
self.any = token
else:
self.subscriptions[id] = token
return {"status": "OK"}
SubscribeToID.responder(subscribe)
def unsubscribe(self, id):
try:
del self.subscriptions[id]
except KeyError:
pass
return {"status": "OK"}
UnsubscribeFromID.responder(unsubscribe)
def notify(self, token, id, dataChangedTimestamp, priority):
if self.subscribedToID(id) == token:
self.log.debug("Sending notification for {id} to {token}", id=id, token=token)
return self.callRemote(
NotificationForID, id=id,
dataChangedTimestamp=dataChangedTimestamp,
priority=priority.value)
def subscribedToID(self, id):
if self.any is not None:
return self.any
return self.subscriptions.get(id, None)
def connectionLost(self, reason=None):
self.service.removeSubscriber(self)
class AMPPushNotifierFactory(ServerFactory):
log = Logger()
protocol = AMPPushNotifierProtocol
def __init__(self, service):
self.service = service
def buildProtocol(self, addr):
p = self.protocol(self.service)
self.service.addSubscriber(p)
p.service = self.service
return p
# Client classes
class AMPPushClientProtocol(amp.AMP):
"""
Implements the client side of the AMP push protocol. Whenever
the NotificationForID Command arrives, the registered callback
will be called with the id.
"""
def __init__(self, callback):
super(AMPPushClientProtocol, self).__init__()
self.callback = callback
@inlineCallbacks
def notificationForID(self, id, dataChangedTimestamp, priority):
yield self.callback(id, dataChangedTimestamp, PushPriority.lookupByValue(priority))
returnValue({"status": "OK"})
NotificationForID.responder(notificationForID)
class AMPPushClientFactory(Factory):
log = Logger()
protocol = AMPPushClientProtocol
def __init__(self, callback):
self.callback = callback
def buildProtocol(self, addr):
p = self.protocol(self.callback)
return p
# Client helper methods
@inlineCallbacks
def subscribeToIDs(host, port, ids, callback, reactor=None):
"""
Clients can call this helper method to register a callback which
will get called whenever a push notification is fired for any
id in the ids list.
@param host: AMP host name to connect to
@type host: string
@param port: AMP port to connect to
@type port: integer
@param ids: The push IDs to subscribe to
@type ids: list of strings
@param callback: The method to call whenever a notification is
received.
@type callback: callable which is passed an id (string)
"""
if reactor is None:
from twisted.internet import reactor
token = str(uuid.uuid4())
endpoint = TCP4ClientEndpoint(reactor, host, port)
factory = AMPPushClientFactory(callback)
protocol = yield endpoint.connect(factory)
for id in ids:
yield protocol.callRemote(SubscribeToID, token=token, id=id)
returnValue(factory)
|
the-stack_106_21915
|
import numpy as np
#シグモイド関数の実装
def sigmoid(x):
return 1 / (1 + np.exp(-x))
#恒等関数(入力したものに対して何も手を加えずに出力する関数)
def identity_function(x):
return x
#重みとバイアスの初期化
def init_network():
network = {}
network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
network['b1'] = np.array([0.1, 0.2, 0.3])
network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
network['b2'] = np.array([0.1, 0.2])
network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])
network['b3'] = np.array([0.1, 0.2])
return network
#入力から出力方向への伝達処理
def forward(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = identity_function(a3)
return y
network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print('y:', y)
|
the-stack_106_21916
|
'''
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
'''
# This needs some refactoring, I made it "as fast as I could" and could be a
# lot clearer, so far it is spaghetti code
# Import python modules
import os
import socket
import sys
import re
import platform
import logging
import locale
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of platform.linux_distribution()
from platform import _supported_dists
_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
'slamd64', 'enterprise', 'ovs', 'system')
import salt.log
import salt.utils
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}
log = logging.getLogger(__name__)
has_wmi = False
if sys.platform.startswith('win'):
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi
has_wmi = True
except ImportError:
log.exception("Unable to import Python wmi module, some core grains "
"will be missing")
def _windows_cpudata():
'''
Return some cpu information on Windows minions
'''
# Provides:
# num_cpus
# cpu_model
grains = {}
if 'NUMBER_OF_PROCESSORS' in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = platform.processor()
return grains
def _linux_cpudata():
'''
Return some cpu information for Linux minions
'''
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = '/proc/cpuinfo'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with open(cpuinfo, 'r') as _fp:
for line in _fp:
comps = line.split(':')
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == 'processor':
grains['num_cpus'] = int(val) + 1
elif key == 'model name':
grains['cpu_model'] = val
elif key == 'flags':
grains['cpu_flags'] = val.split()
if 'num_cpus' not in grains:
grains['num_cpus'] = 0
if 'cpu_model' not in grains:
grains['cpu_model'] = 'Unknown'
if 'cpu_flags' not in grains:
grains['cpu_flags'] = []
return grains
def _bsd_cpudata(osdata):
'''
Return cpu information for BSD-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.which('sysctl')
arch = salt.utils.which('arch')
cmds = {}
if sysctl:
cmds.update({
'num_cpus': '{0} -n hw.ncpu'.format(sysctl),
'cpuarch': '{0} -n hw.machine'.format(sysctl),
'cpu_model': '{0} -n hw.model'.format(sysctl),
})
if arch and osdata['kernel'] == 'OpenBSD':
cmds['cpuarch'] = '{0} -s'.format(arch)
grains = dict([(k, __salt__['cmd.run'](v)) for k, v in cmds.items()])
grains['cpu_flags'] = []
try:
grains['num_cpus'] = int(grains['num_cpus'])
except ValueError:
grains['num_cpus'] = 1
return grains
def _sunos_cpudata(osdata):
'''
Return the cpu information for Solaris-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
grains = {'num_cpus': 0}
grains['cpuarch'] = __salt__['cmd.run']('uname -p')
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
for line in __salt__['cmd.run'](psrinfo).splitlines():
grains['num_cpus'] += 1
kstat_info = 'kstat -p cpu_info:*:*:implementation'
grains['cpu_model'] = __salt__['cmd.run'](kstat_info).split()[1].strip()
return grains
def _memdata(osdata):
'''
Gather information about the system memory
'''
# Provides:
# mem_total
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
meminfo = '/proc/meminfo'
if os.path.isfile(meminfo):
for line in open(meminfo, 'r').readlines():
comps = line.split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'MemTotal':
grains['mem_total'] = int(comps[1].split()[0]) / 1024
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD'):
sysctl = salt.utils.which('sysctl')
if sysctl:
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
grains['mem_total'] = str(int(mem) / 1024 / 1024)
elif osdata['kernel'] == 'SunOS':
prtconf = '/usr/sbin/prtconf 2>/dev/null'
for line in __salt__['cmd.run'](prtconf).splitlines():
comps = line.split(' ')
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
grains['mem_total'] = int(comps[2].strip())
elif osdata['kernel'] == 'Windows' and has_wmi:
wmi_c = wmi.WMI()
# this is a list of each stick of ram in a system
# WMI returns it as the string value of the number of bytes
tot_bytes = sum(map(lambda x: int(x.Capacity),
wmi_c.Win32_PhysicalMemory()), 0)
# return memory info in gigabytes
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
return grains
def _virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {'virtual': 'physical'}
for command in ('dmidecode', 'lspci'):
cmd = salt.utils.which(command)
if not cmd:
continue
ret = __salt__['cmd.run_all'](cmd)
if ret['retcode'] > 0:
if salt.log.is_logging_configured():
log.warn(
'Although \'{0}\' was found in path, the current user '
'cannot execute it. Grains output might not be '
'accurate.'.format(command)
)
continue
output = ret['stdout']
if command == 'dmidecode':
# Product Name: VirtualBox
if 'Vendor: QEMU' in output:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Vendor: Bochs' in output:
grains['virtual'] = 'kvm'
elif 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware' in output:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif 'Manufacturer: Microsoft' in output and 'Virtual Machine' in output:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in output:
grains['virtual'] = 'Parallels'
# Break out of the loop, lspci parsing is not necessary
break
elif command == 'lspci':
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if 'vmware' in model:
grains['virtual'] = 'VMware'
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH VirtualBox Guest Service
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'virtio' in model:
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
else:
log.warn(
'Both \'dmidecode\' and \'lspci\' failed to execute, either '
'because they do not exist on the system of the user running '
'this instance does not have the necessary permissions to '
'execute them. Grains output might not be accurate.'
)
choices = ('Linux', 'OpenBSD', 'HP-UX')
isdir = os.path.isdir
if osdata['kernel'] in choices:
if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
else:
grains['virtual'] = 'openvzve'
elif isdir('/proc/sys/xen') or isdir('/sys/bus/xen') or isdir('/proc/xen'):
if os.path.isfile('/proc/xen/xsd_kva'):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if grains.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and os.access('/proc/xen/capabilities', os.R_OK):
caps = open('/proc/xen/capabilities')
if 'control_d' not in caps.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains['virtual_subtype'] = 'Xen PV DomU'
else:
# Shouldn't get to this, but just in case
grains['virtual_subtype'] = 'Xen Dom0'
caps.close()
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir('/sys/bus/xen'):
if 'xen' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.listdir('/sys/bus/xen/drivers'):
# An actual DomU will have several drivers
# whereas a paravirt ops kernel will not.
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
grains['virtual'] = 'xen'
if os.path.isfile('/proc/cpuinfo'):
if 'QEMU Virtual CPU' in open('/proc/cpuinfo', 'r').read():
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'FreeBSD':
sysctl = salt.utils.which('sysctl')
kenv = salt.utils.which('kenv')
if kenv:
product = __salt__['cmd.run']('{0} smbios.system.product'.format(kenv))
if product.startswith('VMware'):
grains['virtual'] = 'VMware'
if sysctl:
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl))
jail = __salt__['cmd.run']('{0} -n security.jail.jailed'.format(sysctl))
if jail:
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'SunOS':
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.which('zonename')
if zonename:
zone = __salt__['cmd.run']('{0}'.format(zonename))
if zone != "global":
grains['virtual'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
return grains
def _ps(osdata):
'''
Return the ps grain
'''
grains = {}
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS')
if osdata['os'] in bsd_choices:
grains['ps'] = 'ps auxwww'
elif osdata['os_family'] == 'Solaris':
grains['ps'] = '/usr/ucb/ps auxwww'
elif osdata['os'] == 'Windows':
grains['ps'] = 'tasklist.exe'
elif osdata.get('virtual', '') == 'openvzhn':
grains['ps'] = 'vzps -E 0 -efH|cut -b 6-'
else:
grains['ps'] = 'ps -efH'
return grains
def _windows_platform_data(osdata):
'''
Use the platform module for as much as we can.
'''
# Provides:
# osmanufacturer
# manufacturer
# productname
# biosversion
# osfullname
# timezone
# windowsdomain
if not has_wmi:
return {}
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394239%28v=vs.85%29.aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# the name of the OS comes with a bunch of other data about the install
# location. For example:
# 'Microsoft Windows Server 2008 R2 Standard |C:\\Windows|\\Device\\Harddisk0\\Partition2'
(osfullname, _) = osinfo.Name.split('|', 1)
osfullname = osfullname.strip()
grains = {
'osmanufacturer': osinfo.Manufacturer,
'manufacturer': systeminfo.Manufacturer,
'productname': systeminfo.Model,
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
'biosversion': biosinfo.Name.strip(),
'osfullname': osfullname,
'timezone': timeinfo.Description,
'windowsdomain': systeminfo.Domain,
}
return grains
def id_():
'''
Return the id
'''
return {'id': __opts__['id']}
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
_OS_NAME_MAP = {
'redhatente': 'RedHat',
'debian': 'Debian',
'arch': 'Arch',
'amazonlinu': 'Amazon',
'centoslinu': 'CentOS',
}
# Map the 'os' grain to the 'os_family' grain
_OS_FAMILY_MAP = {
'Ubuntu': 'Debian',
'Fedora': 'RedHat',
'CentOS': 'RedHat',
'GoOSe': 'RedHat',
'Scientific': 'RedHat',
'Amazon': 'RedHat',
'CloudLinux': 'RedHat',
'OVS': 'RedHat',
'OEL': 'RedHat',
'Mandrake': 'Mandriva',
'ESXi': 'VMWare',
'VMWareESX': 'VMWare',
'Bluewhite64': 'Bluewhite',
'Slamd64': 'Slackware',
'SLES': 'Suse',
'SLED': 'Suse',
'openSUSE': 'Suse',
'SUSE': 'Suse',
'Solaris': 'Solaris',
'SmartOS': 'Solaris',
}
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {}
try:
(grains['defaultlanguage'],
grains['defaultencoding']) = locale.getdefaultlocale()
except Exception:
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains['defaultlanguage'] = 'unknown'
grains['defaultencoding'] = 'unknown'
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64', 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server', '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
(grains['kernel'], grains['nodename'],
grains['kernelrelease'], version, grains['cpuarch'], _) = platform.uname()
if grains['kernel'] == 'Windows':
grains['osrelease'] = grains['kernelrelease']
grains['osversion'] = grains['kernelrelease'] = version
grains['os'] = 'Windows'
grains['os_family'] = 'Windows'
grains.update(_memdata(grains))
grains.update(_windows_platform_data(grains))
grains.update(_windows_cpudata())
grains.update(_ps(grains))
return grains
elif grains['kernel'] == 'Linux':
# Add lsb grains on any distro with lsb-release
try:
import lsb_release
release = lsb_release.get_distro_information()
for key, value in release.iteritems():
grains['lsb_{0}'.format(key.lower())] = value # override /etc/lsb-release
except ImportError:
# if the python library isn't available, default to regex
if os.path.isfile('/etc/lsb-release'):
for line in open('/etc/lsb-release').readlines():
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
regex = re.compile('^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?([\w\s\.-_]+)(?:\'|")?')
match = regex.match(line)
if match:
# Adds: lsb_distrib_{id,release,codename,description}
grains['lsb_{0}'.format(match.groups()[0].lower())] = match.groups()[1].rstrip()
# Use the already intelligent platform module to get distro info
(osname, osrelease, oscodename) = platform.linux_distribution(
supported_dists=_supported_dists)
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that platform.linux_distribution() does the /etc/lsb-release
# parsing, but we do it anyway here for the sake for full portability.
grains['osfullname'] = grains.get('lsb_distrib_id', osname)
grains['osrelease'] = grains.get('lsb_distrib_release', osrelease)
grains['oscodename'] = grains.get('lsb_distrib_codename', oscodename)
# return the first ten characters with no spaces, lowercased
shortname = grains['osfullname'].replace(' ', '').lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
grains['os'] = _OS_NAME_MAP.get(shortname, grains['osfullname'])
grains.update(_linux_cpudata())
elif grains['kernel'] == 'SunOS':
grains['os'] = 'Solaris'
if os.path.isfile('/etc/release'):
with open('/etc/release', 'r') as fp_:
rel_data = fp_.read()
if 'SmartOS' in rel_data:
grains['os'] = 'SmartOS'
grains.update(_sunos_cpudata(grains))
elif grains['kernel'] == 'VMkernel':
grains['os'] = 'ESXi'
elif grains['kernel'] == 'Darwin':
grains['os'] = 'MacOS'
grains.update(_bsd_cpudata(grains))
else:
grains['os'] = grains['kernel']
if grains['kernel'] in ('FreeBSD', 'OpenBSD'):
grains.update(_bsd_cpudata(grains))
if not grains['os']:
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
grains['os_family'] = 'Unknown'
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains['os_family'] = _OS_FAMILY_MAP.get(grains['os'],
grains['os'])
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_ps(grains))
return grains
def hostname():
'''
Return fqdn, hostname, domainname
'''
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
grains = {}
grains['localhost'] = socket.gethostname()
grains['fqdn'] = socket.getfqdn()
(grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2]
return grains
def path():
'''
Return the path
'''
# Provides:
# path
return {'path': os.environ['PATH'].strip()}
def pythonversion():
'''
Return the Python version
'''
# Provides:
# pythonversion
return {'pythonversion': list(sys.version_info)}
def pythonpath():
'''
Return the Python path
'''
# Provides:
# pythonpath
return {'pythonpath': sys.path}
def saltpath():
'''
Return the path of the salt module
'''
# Provides:
# saltpath
path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt import __version__
return {'saltversion': __version__}
# Relatively complex mini-algorithm to iterate over the various
# sections of dmidecode output and return matches for specific
# lines containing data we want, but only in the right section.
def _dmidecode_data(regex_dict):
'''
Parse the output of dmidecode in a generic fashion that can
be used for the multiple system types which have dmidecode.
'''
# NOTE: This function might gain support for smbios instead
# of dmidecode when salt gets working Solaris support
ret = {}
# No use running if dmidecode isn't in the path
if not salt.utils.which('dmidecode'):
return ret
out = __salt__['cmd.run']('dmidecode')
for section in regex_dict:
section_found = False
# Look at every line for the right section
for line in out.splitlines():
if not line:
continue
# We've found it, woohoo!
if re.match(section, line):
section_found = True
continue
if not section_found:
continue
# Now that a section has been found, find the data
for item in regex_dict[section]:
# Examples:
# Product Name: 64639SU
# Version: 7LETC1WW (2.21 )
regex = re.compile('\s+{0}\s+(.*)$'.format(item))
grain = regex_dict[section][item]
# Skip to the next iteration if this grain
# has been found in the dmidecode output.
if grain in ret:
continue
match = regex.match(line)
# Finally, add the matched data to the grains returned
if match:
ret[grain] = match.group(1).strip()
return ret
def _hw_data(osdata):
'''
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
.. versionadded:: 0.9.5
'''
grains = {}
# TODO: *BSD dmidecode output
if osdata['kernel'] == 'Linux':
linux_dmi_regex = {
'BIOS [Ii]nformation': {
'[Vv]ersion:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
'[Ss]ystem [Ii]nformation': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(linux_dmi_regex))
# On FreeBSD /bin/kenv (already in base system) can be used instead of dmidecode
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.which('kenv')
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
'biosversion': 'smbios.bios.version',
'manufacturer': 'smbios.system.maker',
'serialnumber': 'smbios.system.serial',
'productname': 'smbios.system.product',
'biosreleasedate': 'smbios.bios.reldate',
}
for key, val in fbsd_hwdata.items():
grains[key] = __salt__['cmd.run']('{0} {1}'.format(kenv, val))
elif osdata['kernel'] == 'OpenBSD':
sysctl = salt.utils.which('sysctl')
hwdata = {'biosversion': 'hw.version',
'manufacturer': 'hw.vendor',
'productname': 'hw.product',
'serialnumber': 'hw.serialno'}
for key, oid in hwdata.items():
value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
if not value.endswith(' value is not available'):
grains[key] = value
return grains
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID like this.
'''
# Provides:
# server_id
return {'server_id': abs(hash(__opts__['id']) % (2 ** 31))}
|
the-stack_106_21919
|
import time
import threading
import config
from config import *
class IBlock:
def __init__(self):
self.cells = 4 # Number of cells occupied by the block
config.block_count += 1
config.item_id["blocks"][f"{config.block_count}"] = {} # Add a new key to dictionary to add block IDs
for n in range(self.cells):
# Loop draws the complete block on the top of the board
# Generate an ID for each cell occupied by the block
config.item_id["blocks"][f"{config.block_count}"][f"{n}"] = dpg.generate_uuid()
# Make a list of the initial cells occupied by the blocks
config.cells_occupied.append([3 + n, 19])
# Draw the cell
dpg.draw_image(texture_tag=item_id["block_texture"]["I_block"], pmin=[3 + n, 20], pmax=[4 + n, 19],
parent=item_id["windows"]["tetris_board"],
id=config.item_id["blocks"][f"{config.block_count}"][f"{n}"])
# Update statistics
# Take the value shown, add 1 and set value
dpg.configure_item(item=item_id["displays"]["I_block_stat"],
text=int(
dpg.get_item_configuration(item=item_id["displays"]["I_block_stat"])["text"]) + 1)
dpg.set_value(item=item_id["displays"]["Total_block_stat"],
value=int(dpg.get_value(item=item_id["displays"]["Total_block_stat"])) + 1)
def move_blockDispatcher(self):
# Function creates a new thread that controls the continuous movement of the new blocks
move_block_thread = threading.Thread(name="move block", target=self.move_block, args=(), daemon=True)
move_block_thread.start()
def move_block(self):
# Function controls the continuous downward movement of the blocks
config.block_moving_flag = 1 # Set to 1=IBlock. Block is moving
while True:
for n in range(self.cells):
config.cells_occupied[-1 - n][1] -= 1 # Shift the Y Coordinate down by 1 unit
if any(item in config.cells_occupied[-self.cells:] for item in config.cell_boundary) or \
any(item in config.cells_occupied[-self.cells:] for item in config.cells_occupied[:-self.cells]):
# Check if any cells have touched the wall or other blocks. If so, stop the movement
for n in range(self.cells):
config.cells_occupied[-1 - n][1] += 1 # Reset the Y coordinate
config.block_moving_flag = 0 # Block has stopped moving
return
for n in range(self.cells):
# Draw after all cells are updated
dpg.configure_item(item=config.item_id["blocks"][f"{config.block_count}"][f"{n}"],
pmin=[config.cells_occupied[-1 - n][0], config.cells_occupied[-1 - n][1] + 1],
pmax=[config.cells_occupied[-1 - n][0] + 1, config.cells_occupied[-1 - n][1]])
time.sleep(config.speed) # Wait at each cell
def draw_next_IBlock():
for n in range(4):
# Loop draws the complete block on the "next" board
# Draw the cell
dpg.draw_image(texture_tag=item_id["block_texture"]["I_block"], pmin=[2 + n, 4], pmax=[3 + n, 3],
parent=item_id["windows"]["next_block_board"])
def draw_statistics_IBlock():
for n in range(4):
# Loop draws the complete block on the "next" board
# Draw the cell
dpg.draw_image(texture_tag=item_id["block_texture"]["I_block"], pmin=[2 + n, 16], pmax=[3 + n, 15],
parent=item_id["windows"]["statistics_window"])
dpg.draw_line(p1=[6.5, 15.5], p2=[7.5, 15.5], thickness=0.1, color=[168, 168, 168],
parent=item_id["windows"]["statistics_window"])
dpg.draw_text(pos=[8.5, 15.8], text="0", size=0.5, color=[168, 168, 168],
id=item_id["displays"]["I_block_stat"])
|
the-stack_106_21920
|
import numpy as np
from scipy.signal import find_peaks
__all__ = ['bls_peakfinder']
def bls_peakfinder(results):
"""
Find peaks in a Box Least Squares spectrum.
Parameters
----------
results : `~astropy.timeseries.BoxLeastSquaresResults`
BLS results
Returns
-------
inds : `~numpy.ndarray`
Indices with the top powers, sorted in order of peak height
significance : float
Ratio of the height of the tallest peak to the height of the
second tallest peak
"""
maxima = find_peaks(results.power, distance=100)[0]
top_power_inds = maxima[np.argsort(results.power[maxima])[::-1]]
highest_peak = results.power[top_power_inds[0]]
next_highest_peak = results.power[top_power_inds[1]]
significance = highest_peak / next_highest_peak
return top_power_inds, significance
|
the-stack_106_21921
|
import cv2
import keras
import math
import matplotlib.pyplot as plt
import numpy as np
import random
import warnings
from generators.utils import get_affine_transform, affine_transform
from generators.utils import gaussian_radius, draw_gaussian, gaussian_radius_2, draw_gaussian_2
class Generator(keras.utils.Sequence):
"""
Abstract generator class.
"""
def __init__(
self,
multi_scale=False,
multi_image_sizes=(320, 352, 384, 416, 448, 480, 512, 544, 576, 608),
misc_effect=None,
visual_effect=None,
batch_size=1,
group_method='ratio', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
input_size=512,
max_objects=100
):
"""
Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
input_size:
max_objects:
"""
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.input_size = input_size
self.output_size = self.input_size // 4
self.max_objects = max_objects
self.groups = None
self.multi_scale = multi_scale
self.multi_image_sizes = multi_image_sizes
self.current_index = 0
# Define groups
self.group_images()
# Shuffle when initializing
if self.shuffle_groups:
random.shuffle(self.groups)
#epoch结束时,current_index清零,顺便shuffle
def on_epoch_end(self):
if self.shuffle_groups:
random.shuffle(self.groups)
self.current_index = 0
def size(self):
"""
Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def num_classes(self):
"""
Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def has_label(self, label):
"""
Returns True if label is a known label.
"""
raise NotImplementedError('has_label method not implemented')
def has_name(self, name):
"""
Returns True if name is a known class.
"""
raise NotImplementedError('has_name method not implemented')
def name_to_label(self, name):
"""
Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
"""
Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
#输入index load_annotations在具体加载数据集的py文件中定义
def load_annotations_group(self, group):
"""
Load annotations for all images in group.
"""
# load_annotations {'labels': np.array, 'annotations': np.array}
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert (isinstance(annotations,
dict)), '\'load_annotations\' should return a list of dictionaries, received: {}'.format(
type(annotations))
assert (
'labels' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
assert (
'bboxes' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
return annotations_group
#去异常值(图外或w、h<0)
def filter_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
#要删的
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
(annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
(annotations['bboxes'][:, 0] < 0) |
(annotations['bboxes'][:, 1] < 0) |
(annotations['bboxes'][:, 2] <= 0) |
(annotations['bboxes'][:, 3] <= 0) |
(annotations['bboxes'][:, 2] > image.shape[1]) |
(annotations['bboxes'][:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
annotations['bboxes'][invalid_indices, :]
))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
if annotations['bboxes'].shape[0] == 0:
warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(
group[index],
image.shape,
))
return image_group, annotations_group
def clip_transformed_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
filtered_image_group = []
filtered_annotations_group = []
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
# x1
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2) #截取box宽度[0:img_wid-2]
# y1
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)
# x2
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)
# y2
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
#剪完,太小的box全部去掉
small_indices = np.where(
(annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 10) |
(annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 10)
)[0]
# delete invalid indices
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
# import cv2
# for invalid_index in small_indices:
# x1, y1, x2, y2 = annotations['bboxes'][invalid_index]
# label = annotations['labels'][invalid_index]
# class_name = self.labels[label]
# print('width: {}'.format(x2 - x1))
# print('height: {}'.format(y2 - y1))
# cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)
# cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey(0)
if annotations_group[index]['bboxes'].shape[0] != 0:
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
else:
warnings.warn('Image with id {} (shape {}) contains no valid boxes after transform'.format(
group[index],
image.shape,
))
return filtered_image_group, filtered_annotations_group
def load_image_group(self, group):
"""
Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_visual_effect_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# apply visual effect
image = self.visual_effect(image)
return image, annotations
def random_visual_effect_group(self, image_group, annotations_group):
"""
Randomly apply visual effect on each image.
"""
assert (len(image_group) == len(annotations_group))
if self.visual_effect is None:
# do nothing
return image_group, annotations_group
for index in range(len(image_group)):
# apply effect on a single group entry
image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(
image_group[index], annotations_group[index]
)
return image_group, annotations_group
def random_transform_group_entry(self, image, annotations, transform=None):
"""
Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
if transform is not None or self.transform_generator:
if transform is None:
transform = adjust_transform_for_image(next(self.transform_generator), image,
self.transform_parameters.relative_translation)
# apply transformation to image
image = apply_transform(transform, image, self.transform_parameters)
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = annotations['bboxes'].copy()
for index in range(annotations['bboxes'].shape[0]):
annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])
return image, annotations
def random_transform_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_transform_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def random_misc_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
assert annotations['bboxes'].shape[0] != 0
# randomly transform both image and annotations
image, boxes = self.misc_effect(image, annotations['bboxes'])
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = boxes
return image, annotations
def random_misc_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
if self.misc_effect is None:
return image_group, annotations_group
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def preprocess_group_entry(self, image, annotations):
"""
Preprocess image and its annotations.
"""
# preprocess the image
image, scale, offset_h, offset_w = self.preprocess_image(image)
# apply resizing to annotations too
annotations['bboxes'] *= scale
annotations['bboxes'][:, [0, 2]] += offset_w
annotations['bboxes'][:, [1, 3]] += offset_h
# print(annotations['bboxes'][:, [2, 3]] - annotations['bboxes'][:, [0, 1]])
return image, annotations
def preprocess_group(self, image_group, annotations_group):
"""
Preprocess each image and its annotations in its group.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# preprocess a single group entry
image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def group_images(self):
"""
Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
# 生成输入数据
# batch图像 圆形heatmap 宽高 残差(hm wh reg) 残差mask index
def compute_inputs(self, image_group, annotations_group):
"""
Compute inputs for the network using an image_group.
"""
# construct an image batch object
batch_images = np.zeros((len(image_group), self.input_size, self.input_size, 3), dtype=np.float32)
batch_hms = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_hms_2 = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_whs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_regs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_reg_masks = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
batch_indices = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
# copy all images to the upper left part of the image batch object
for b, (image, annotations) in enumerate(zip(image_group, annotations_group)):
c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32) #中心点坐标
s = max(image.shape[0], image.shape[1]) * 1.0 #最大边长
trans_input = get_affine_transform(c, s, self.input_size) #放射变形为input_size
# inputs
image = self.preprocess_image(image, c, s, tgt_w=self.input_size, tgt_h=self.input_size) #将图片变形(长宽一半)
batch_images[b] = image
# outputs
bboxes = annotations['bboxes']
assert bboxes.shape[0] != 0
class_ids = annotations['labels']
assert class_ids.shape[0] != 0
trans_output = get_affine_transform(c, s, self.output_size) #仿射变换
for i in range(bboxes.shape[0]): #对每个box,生成高斯中心点热图
bbox = bboxes[i].copy()
cls_id = class_ids[i]
# (x1, y1)
bbox[:2] = affine_transform(bbox[:2], trans_output)
# (x2, y2)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.output_size - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.output_size - 1) #box超出output_size的部分剪裁掉
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0] #计算wh
if h > 0 and w > 0:
radius_h, radius_w = gaussian_radius((math.ceil(h), math.ceil(w))) #根据图像宽高,生成高斯半径(三种r中最小的)
radius_h = max(0, int(radius_h))
radius_w = max(0, int(radius_w))
radius = gaussian_radius_2((math.ceil(h), math.ceil(w))) #圆形高斯半径
radius = max(0, int(radius))
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) #剪裁后box中心点
ct_int = ct.astype(np.int32)
draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius_h, radius_w) #生成cls_id类的带高斯椭圆的heatmap
draw_gaussian_2(batch_hms_2[b, :, :, cls_id], ct_int, radius) #圆的
batch_whs[b, i] = 1. * w, 1. * h #第b张图 i个box的w h
batch_indices[b, i] = ct_int[1] * self.output_size + ct_int[0] #index=y*w + x
batch_regs[b, i] = ct - ct_int #残差(float变int的残差)
batch_reg_masks[b, i] = 1 #有残差
# hm = batch_hms[b, :, :, cls_id]
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = batch_hms_2[b, :, :, cls_id]
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# cv2.rectangle(hm, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.waitKey()
# print(np.sum(batch_reg_masks[b]))
# for i in range(self.num_classes()):
# plt.subplot(4, 5, i + 1)
# hm = batch_hms[b, :, :, i]
# plt.imshow(hm, cmap='gray')
# plt.axis('off')
# plt.show()
# hm = np.sum(batch_hms[0], axis=-1)
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = np.sum(batch_hms_2[0], axis=-1)
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# for i in range(bboxes.shape[0]):
# x1, y1 = np.round(affine_transform(bboxes[i, :2], trans_input)).astype(np.int32)
# x2, y2 = np.round(affine_transform(bboxes[i, 2:], trans_input)).astype(np.int32)
# x1_, y1_ = np.round(affine_transform(bboxes[i, :2], trans_output)).astype(np.int32)
# x2_, y2_ = np.round(affine_transform(bboxes[i, 2:], trans_output)).astype(np.int32)
# class_id = class_ids[i]
# cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)
# cv2.putText(image, str(class_id), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 0), 3)
# cv2.rectangle(hm, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey()
return [batch_images, batch_hms_2, batch_whs, batch_regs, batch_reg_masks, batch_indices]
def compute_targets(self, image_group, annotations_group):
"""
Compute target outputs for the network using images and their annotations.
计算目标输出
"""
return np.zeros((len(image_group),))
#输入index group 输出 input = [batch图像 圆形heatmap 宽高 残差(hm wh reg) 残差mask index] target = np.zeros((len(image_group),))
def compute_inputs_targets(self, group):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
#
# # randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
#
# # perform preprocessing steps
# image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
#
# # check validity of annotations
# image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
if len(image_group) == 0:
return None, None
# compute network inputs
inputs = self.compute_inputs(image_group, annotations_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __len__(self):
"""
Number of batches for generator.
"""
return len(self.groups)
#确定多尺度图片的size,进行input和target计算
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
group = self.groups[self.current_index] #当先训练group的图片序号
if self.multi_scale: #多尺度
if self.current_index % 10 == 0: #每十张换一个尺度
random_size_index = np.random.randint(0, len(self.multi_image_sizes)) #随机选择一个scale序号
self.image_size = self.multi_image_sizes[random_size_index] #选定的scale
inputs, targets = self.compute_inputs_targets(group)
while inputs is None:
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups)) #再重新开始过一遍所有group
self.current_index = current_index
group = self.groups[self.current_index]
inputs, targets = self.compute_inputs_targets(group)
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups))
self.current_index = current_index
return inputs, targets
def preprocess_image(self, image, c, s, tgt_w, tgt_h): #变为输出的size
trans_input = get_affine_transform(c, s, (tgt_w, tgt_h)) #生成变换矩阵
image = cv2.warpAffine(image, trans_input, (tgt_w, tgt_h), flags=cv2.INTER_LINEAR)
image = image.astype(np.float32)
#255
image[..., 0] -= 103.939
image[..., 1] -= 116.779
image[..., 2] -= 123.68 #标准化?
return image
def get_transformed_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
return image_group, annotations_group
def get_cropped_and_rotated_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_crop_group(image_group, annotations_group)
image_group, annotations_group = self.random_rotate_group(image_group, annotations_group)
return image_group, annotations_group
|
the-stack_106_21922
|
from pathlib import Path
def mkdir(out_dir):
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True, exist_ok=True)
def load_current_env():
'''获取当前目录的决对路径,且添加 Python 环境'''
import os
# 获取根目录
try: # colab 目录
from google.colab import drive
root = '/content/drive' # colab 训练
drive.mount(root) # 挂载磁盘
root = f'{root}/MyDrive'
except:
root = '.' # 本地目录
# 添加当前路径为 Python 包所在环境
# 保证 colab 可以获取自定义的 .py 文件
os.chdir(root)
|
the-stack_106_21924
|
#!/usr/bin/env python3
import argparse
import jinja2
import os
import yaml
import pnrg.filters
from distutils import dir_util
import logging, sys
class OutputFormat(object):
def __init__(self, arg_name, template_extension, output_suffix):
self.arg_name = arg_name
self.template_extension = template_extension
self.output_suffix = output_suffix if output_suffix else ""
# maps output format to template file extension
_OUTPUT_FORMATS = {
'latex': OutputFormat('latex', 'tex', None),
'formatted_text': OutputFormat('formatted_text', 'txt', '_formatted'),
'plain_text': OutputFormat('plain_text', 'txt', None)
}
def load_templates(_format, template_dir=os.path.join(os.getcwd(), 'template')):
loader = jinja2.FileSystemLoader(template_dir)
environment = jinja2.environment.Environment(loader=loader, trim_blocks=True, lstrip_blocks=True, extensions=['jinja2.ext.with_'])
# Use a different template syntax for tex files, because tex is heavily dependent '{', '}', and '%'.
# kang'd from (with permission!) http://flask.pocoo.org/snippets/55/
if _format == "latex":
#environment = jinja2.environment.Environment('((*', '*))', '(((', ')))', '((=', '=))',
environment.block_start_string = '((*'
environment.block_end_string = '*))'
environment.variable_start_string = '((('
environment.variable_end_string = ')))'
environment.comment_start_string = '((='
environment.comment_end_string = '=))'
environment.filters['escape_tex'] = pnrg.filters.escape_tex
_register_filters(environment)
return environment
def _register_filters(environment):
environment.filters['right'] = pnrg.filters.do_right
environment.filters['strftime'] = pnrg.filters.strftime
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generates multiple resume outputs from a singular YAML-formatted source")
parser.add_argument('--formats', '-f', help="output formats to generate. There must be a template of the same name in template/", nargs='+', choices=_OUTPUT_FORMATS.keys(), default=_OUTPUT_FORMATS.keys())
parser.add_argument('--destination', '-d', help="directory used to write generated documents", default="output")
parser.add_argument('--output-name', '-o', dest='output_name', help="base name used for generated files in 'destination'", default="document")
parser.add_argument('source_file', help="yaml-formatted containing the desired resume sections")
parser.add_argument('--verbose', '-v', help="Enable verbose logging", action='store_true')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
with open(args.source_file, 'r') as source_file:
# copy static-content into destination directory
# use distutils' dir_util instead of shutil.copytree, because copytree requires the destination to not exist before calling.
# removing the entire output directory every time just to use the convenient copytree() is confusing for users.
dir_util.copy_tree('./static-content', args.destination, update=True)
raw = yaml.safe_load(source_file)
# generate all requested formats
for doc_format in args.formats:
environment = load_templates(_format=doc_format)
logging.debug("found templates {}".format(environment.list_templates()))
template_ext = _OUTPUT_FORMATS[doc_format].template_extension
output_ext = template_ext # all existing templates generate files with the same file extension
suffix = _OUTPUT_FORMATS[doc_format].output_suffix
output_file = os.path.join(args.destination, args.output_name + suffix + os.path.extsep + output_ext)
with open(output_file, 'w') as output:
try:
template_name = os.path.join(doc_format, 'base' + os.path.extsep + template_ext)
logging.debug("template name = {}".format(template_name))
logging.debug("template name in template list: {}".format(template_name in environment.list_templates()))
template = environment.get_template(template_name)
output.write(template.render(root=raw))
except jinja2.TemplateNotFound as tnf:
print("Unable to find base template {}:\n{}".format(template_name, tnf))
|
the-stack_106_21925
|
from datetime import (
date,
datetime,
)
import subprocess
import sys
import numpy as np
import pytest
import pandas._config.config as cf
import pandas.util._test_decorators as td
from pandas import (
Index,
Period,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.plotting import (
deregister_matplotlib_converters,
register_matplotlib_converters,
)
from pandas.tseries.offsets import (
Day,
Micro,
Milli,
Second,
)
try:
from pandas.plotting._matplotlib import converter
except ImportError:
# try / except, rather than skip, to avoid internal refactoring
# causing an improper skip
pass
pytest.importorskip("matplotlib.pyplot")
dates = pytest.importorskip("matplotlib.dates")
def test_registry_mpl_resets():
# Check that Matplotlib converters are properly reset (see issue #27481)
code = (
"import matplotlib.units as units; "
"import matplotlib.dates as mdates; "
"n_conv = len(units.registry); "
"import pandas as pd; "
"pd.plotting.register_matplotlib_converters(); "
"pd.plotting.deregister_matplotlib_converters(); "
"assert len(units.registry) == n_conv"
)
call = [sys.executable, "-c", code]
subprocess.check_output(call)
def test_timtetonum_accepts_unicode():
assert converter.time2num("00:01") == converter.time2num("00:01")
class TestRegistration:
def test_dont_register_by_default(self):
# Run in subprocess to ensure a clean state
code = (
"import matplotlib.units; "
"import pandas as pd; "
"units = dict(matplotlib.units.registry); "
"assert pd.Timestamp not in units"
)
call = [sys.executable, "-c", code]
assert subprocess.check_call(call) == 0
@td.skip_if_no("matplotlib", min_version="3.1.3")
def test_registering_no_warning(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
_, ax = plt.subplots()
# Set to the "warn" state, in case this isn't the first test run
register_matplotlib_converters()
ax.plot(s.index, s.values)
plt.close()
def test_pandas_plots_register(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
# Set to the "warn" state, in case this isn't the first test run
with tm.assert_produces_warning(None) as w:
s.plot()
try:
assert len(w) == 0
finally:
plt.close()
def test_matplotlib_formatters(self):
units = pytest.importorskip("matplotlib.units")
# Can't make any assertion about the start state.
# We we check that toggling converters off removes it, and toggling it
# on restores it.
with cf.option_context("plotting.matplotlib.register_converters", True):
with cf.option_context("plotting.matplotlib.register_converters", False):
assert Timestamp not in units.registry
assert Timestamp in units.registry
@td.skip_if_no("matplotlib", min_version="3.1.3")
def test_option_no_warning(self):
pytest.importorskip("matplotlib.pyplot")
ctx = cf.option_context("plotting.matplotlib.register_converters", False)
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range("2017", periods=12))
_, ax = plt.subplots()
# Test without registering first, no warning
with ctx:
ax.plot(s.index, s.values)
# Now test with registering
register_matplotlib_converters()
with ctx:
ax.plot(s.index, s.values)
plt.close()
def test_registry_resets(self):
units = pytest.importorskip("matplotlib.units")
dates = pytest.importorskip("matplotlib.dates")
# make a copy, to reset to
original = dict(units.registry)
try:
# get to a known state
units.registry.clear()
date_converter = dates.DateConverter()
units.registry[datetime] = date_converter
units.registry[date] = date_converter
register_matplotlib_converters()
assert units.registry[date] is not date_converter
deregister_matplotlib_converters()
assert units.registry[date] is date_converter
finally:
# restore original stater
units.registry.clear()
for k, v in original.items():
units.registry[k] = v
class TestDateTimeConverter:
def setup_method(self):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert("12:22", None, None)
assert r1 == r2, "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(["2012-1-1"], None, None)[0]
xp = dates.date2num(datetime(2012, 1, 1))
assert rs == xp
rs = self.dtc.convert("2012-1-1", None, None)
assert rs == xp
rs = self.dtc.convert(date(2012, 1, 1), None, None)
assert rs == xp
rs = self.dtc.convert("2012-1-1", None, None)
assert rs == xp
rs = self.dtc.convert(Timestamp("2012-1-1"), None, None)
assert rs == xp
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert("2012-01-01", None, None)
assert rs == xp
rs = self.dtc.convert("2012-01-01 00:00:00+0000", None, None)
assert rs == xp
rs = self.dtc.convert(
np.array(["2012-01-01 00:00:00+0000", "2012-01-02 00:00:00+0000"]),
None,
None,
)
assert rs[0] == xp
# we have a tz-aware date (constructed to that when we turn to utc it
# is the same as our sample)
ts = Timestamp("2012-01-01").tz_localize("UTC").tz_convert("US/Eastern")
rs = self.dtc.convert(ts, None, None)
assert rs == xp
rs = self.dtc.convert(ts.to_pydatetime(), None, None)
assert rs == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]), None, None)
assert rs[1] == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(), None, None)
assert rs[1] == xp
def test_conversion_float(self):
rtol = 0.5 * 10**-9
rs = self.dtc.convert(Timestamp("2012-1-1 01:02:03", tz="UTC"), None, None)
xp = converter.dates.date2num(Timestamp("2012-1-1 01:02:03", tz="UTC"))
tm.assert_almost_equal(rs, xp, rtol=rtol)
rs = self.dtc.convert(
Timestamp("2012-1-1 09:02:03", tz="Asia/Hong_Kong"), None, None
)
tm.assert_almost_equal(rs, xp, rtol=rtol)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
tm.assert_almost_equal(rs, xp, rtol=rtol)
def test_conversion_outofbounds_datetime(self):
# 2579
values = [date(1677, 1, 1), date(1677, 1, 2)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
@pytest.mark.parametrize(
"time,format_expected",
[
(0, "00:00"), # time2num(datetime.time.min)
(86399.999999, "23:59:59.999999"), # time2num(datetime.time.max)
(90000, "01:00"),
(3723, "01:02:03"),
(39723.2, "11:02:03.200"),
],
)
def test_time_formatter(self, time, format_expected):
# issue 18478
result = self.tc(time)
assert result == format_expected
@pytest.mark.parametrize("freq", ("B", "L", "S"))
def test_dateindex_conversion(self, freq):
rtol = 10**-9
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
tm.assert_almost_equal(rs, xp, rtol=rtol)
@pytest.mark.parametrize("offset", [Second(), Milli(), Micro(50)])
def test_resolution(self, offset):
# Matplotlib's time representation using floats cannot distinguish
# intervals smaller than ~10 microsecond in the common range of years.
ts1 = Timestamp("2012-1-1")
ts2 = ts1 + offset
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError(f"{val1} is not less than {val2}.")
def test_convert_nested(self):
inner = [Timestamp("2017-01-01"), Timestamp("2017-01-02")]
data = [inner, inner]
result = self.dtc.convert(data, None, None)
expected = [self.dtc.convert(x, None, None) for x in data]
assert (np.array(result) == expected).all()
class TestPeriodConverter:
def setup_method(self):
self.pc = converter.PeriodConverter()
class Axis:
pass
self.axis = Axis()
self.axis.freq = "D"
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert("2012-1-1", None, self.axis)
assert r1 == r2
def test_conversion(self):
rs = self.pc.convert(["2012-1-1"], None, self.axis)[0]
xp = Period("2012-1-1").ordinal
assert rs == xp
rs = self.pc.convert("2012-1-1", None, self.axis)
assert rs == xp
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
assert rs == xp
rs = self.pc.convert([Timestamp("2012-1-1")], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(Timestamp("2012-1-1"), None, self.axis)
assert rs == xp
rs = self.pc.convert("2012-01-01", None, self.axis)
assert rs == xp
rs = self.pc.convert("2012-01-01 00:00:00+0000", None, self.axis)
assert rs == xp
rs = self.pc.convert(
np.array(
["2012-01-01 00:00:00+0000", "2012-01-02 00:00:00+0000"],
dtype="datetime64[ns]",
),
None,
self.axis,
)
assert rs[0] == xp
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
assert rs == xp
def test_convert_nested(self):
data = ["2012-1-1", "2012-1-2"]
r1 = self.pc.convert([data, data], None, self.axis)
r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)]
assert r1 == r2
class TestTimeDeltaConverter:
"""Test timedelta converter"""
@pytest.mark.parametrize(
"x, decimal, format_expected",
[
(0.0, 0, "00:00:00"),
(3972320000000, 1, "01:06:12.3"),
(713233432000000, 2, "8 days 06:07:13.43"),
(32423432000000, 4, "09:00:23.4320"),
],
)
def test_format_timedelta_ticks(self, x, decimal, format_expected):
tdc = converter.TimeSeries_TimedeltaFormatter
result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal)
assert result == format_expected
@pytest.mark.parametrize("view_interval", [(1, 2), (2, 1)])
def test_call_w_different_view_intervals(self, view_interval, monkeypatch):
# previously broke on reversed xlmits; see GH37454
class mock_axis:
def get_view_interval(self):
return view_interval
tdc = converter.TimeSeries_TimedeltaFormatter()
monkeypatch.setattr(tdc, "axis", mock_axis())
tdc(0.0, 0)
|
the-stack_106_21926
|
unconfirmed_users = ['alice', 'brain', 'candace']
confirmed_users = []
while unconfirmed_users:
current_user = unconfirmed_users.pop()
print("Verifying user: "+current_user.title())
confirmed_users.append(current_user)
print("\nThe following users have been confirmed:")
for confirmed_user in confirmed_users:
print(confirmed_user.title())
|
the-stack_106_21929
|
import pytest
from cactusbot.handlers import SpamHandler
from cactusbot.packets import MessagePacket
async def get_user_id(_):
return 0
class MockAPI:
async def get_trust(self, _):
class Response:
status = 404
return Response()
spam_handler = SpamHandler(MockAPI())
@pytest.mark.asyncio
async def test_on_message():
assert (await spam_handler.on_message(
MessagePacket("THIS CONTAINS EXCESSIVE CAPITAL LETTERS.")
))[0].text == "Please do not spam capital letters."
assert (await spam_handler.on_message(MessagePacket(
"This is what one hundred emoji looks like!",
*(("emoji", "😮"),) * 100
)))[0].text == "Please do not spam emoji."
assert (await spam_handler.on_message(MessagePacket(
"Check out my amazing Twitter!",
("url", "twitter.com/CactusDevTeam",
"https://twitter.com/CactusDevTeam")
)))[0].text == "Please do not post URLs."
assert await spam_handler.on_message(
MessagePacket("PLEASE STOP SPAMMING CAPITAL LETTERS.", role=50)
) is None
def test_check_caps():
assert not spam_handler.check_caps("")
assert not spam_handler.check_caps("X")
assert not spam_handler.check_caps("3.14159265358979")
assert not spam_handler.check_caps(
"This is a reasonable message!")
assert not spam_handler.check_caps("WOW, that was incredible!")
assert spam_handler.check_caps(
"THIS IS DEFINITELY CAPITALIZED SPAM.")
assert spam_handler.check_caps(
"THAT was SO COOL! OMG WOW FANTASTIC!")
def test_check_emoji():
assert not spam_handler.check_emoji(MessagePacket(
"This message contains no emoji."
))
assert not spam_handler.check_emoji(MessagePacket(
"Wow, that was great!", ("emoji", "😄")))
assert not spam_handler.check_emoji(MessagePacket(
*(("emoji", "🌵"),) * 6
))
assert not spam_handler.check_emoji(MessagePacket(
("emoji", "😃"),
("emoji", "😛"),
("emoji", "🌵"),
("emoji", "🐹"),
("emoji", "🥔"),
("emoji", "💚")
))
assert spam_handler.check_emoji(MessagePacket(
*(("emoji", "🌵"),) * 7
))
assert spam_handler.check_emoji(MessagePacket(
("emoji", "😃"),
("emoji", "😛"),
("emoji", "🌵"),
("emoji", "🐹"),
("emoji", "🥔"),
("emoji", "💚"),
("emoji", "😎")
))
assert spam_handler.check_emoji(MessagePacket(
*(("emoji", "😄"),) * 100
))
def test_check_urls():
assert not spam_handler.contains_urls(MessagePacket(
"This message contains no URLs."
))
assert not spam_handler.contains_urls(MessagePacket(
"google.com was not parsed as a URL, and is therefore 'fine'."
))
assert spam_handler.contains_urls(MessagePacket(
"You should go check out ",
("url", "cactusbot.rtfd.org", "https://cactusbot.rtfd.org")
))
|
the-stack_106_21931
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial, reduce
import itertools as it
import operator as op
from typing import (Tuple, List, Sequence, Set, Dict, Any, Callable, Union,
Optional)
from jax import core
from jax._src import source_info_util
from jax.core import Var, Literal, Atom, Tracer
from jax._src.util import (safe_zip, safe_map, curry, unzip2, split_list,
tuple_delete)
import jax._src.pretty_printer as pp
map = safe_map
zip = safe_zip
def identity(x): return x
DType = Any
NDArray = Any
# Dynamic shape jaxprs
## Element types
class EltTy: pass
class BaseType(EltTy):
def __init__(self, dtype: DType):
self._dtype = np.dtype(dtype)
def __repr__(self):
return f'BaseType({self._dtype.name})'
def __hash__(self):
return hash(self._dtype)
def __eq__(self, other):
return isinstance(other, BaseType) and self._dtype == other._dtype
class BoundedIntTy(EltTy):
def __init__(self, bound: int):
assert isinstance(bound, int)
self._bound = bound
def __repr__(self):
return f'BIntTy{{≤{self._bound}}}'
def __eq__(self, other):
return isinstance(other, BoundedIntTy) and self._bound == other._bound
## Array types
class AbsArray(core.AbstractValue):
def __init__(self, shape, eltTy):
assert isinstance(shape, tuple)
assert isinstance(eltTy, EltTy)
self.shape = shape
self._eltTy = eltTy
def str_short(self, short_dtypes=False):
del short_dtypes # ignored
shape = f'[{",".join(str(d) for d in self.shape)}]' if self.shape else ''
if isinstance(self._eltTy, BoundedIntTy):
return f'BInt{{≤{self._eltTy._bound}}}{shape}'
elif isinstance(self._eltTy, BaseType):
dtype = self._eltTy._dtype.name
return f'{dtype}{shape}'
else:
return repr(self)
def __eq__(self, other):
if (isinstance(other, AbsArray) and self._eltTy == other._eltTy and
len(self.shape) == len(other.shape)):
for a, b in zip(self.shape, other.shape):
if type(a) is type(b) is int:
if a != b: return False
elif type(a) is type(b) is BoundedInt:
if a is not b: return False
elif type(a) is type(b) is Var:
if a is not b: return False
elif type(a) is type(b) is AbsArray:
if a != b: return False
elif type(a) is type(b) is DimIndexingExpr:
if a.name is not b.name or a.indices != b.indices: return False
else:
return False
else:
return True
return False
# this duck-typing is needed by eg ad.py using dtypes.py
@property
def dtype(self):
if isinstance(self._eltTy, BaseType):
return self._eltTy._dtype
else:
raise Exception
def at_least_vspace(self):
return AbsArray(self.shape, self._eltTy)
def join(self, other):
if self == other:
return self
raise NotImplementedError # TODO
class DimIndexingExpr:
def __init__(self, name, indices):
assert isinstance(name, (Var, Tracer))
assert (isinstance(indices, tuple) and
all(isinstance(i, int) for i in indices))
self.name = name
self.indices = indices
def __repr__(self):
indices = '.'.join(map(str, self.indices))
return f'{self.name}.{indices}'
## DJaxprs
class DJaxprTy:
in_dim_binders: List[Var]
in_types: List[core.AbstractValue]
out_dim_binders: List[Var]
out_types: List[core.AbstractValue]
def __init__(self, in_dim_binders, in_types, out_dim_binders, out_types):
self.in_dim_binders = in_dim_binders
self.in_types = in_types
self.out_dim_binders = out_dim_binders
self.out_types = out_types
def __repr__(self):
in_dim_binders = pp_vars(self.in_dim_binders)
in_types = ', '.join(aval.str_short() for aval in self.in_types)
out_dim_binders = pp_vars(self.out_dim_binders)
out_types = ', '.join(aval.str_short() for aval in self.out_types)
return f'[{in_dim_binders}] [{in_types}] -> [{out_dim_binders}] [{out_types}]'
class DJaxpr:
in_dim_binders: List[Var]
in_binders: List[Var]
out_dims: List[Atom]
outs: List[Atom]
eqns: List[core.JaxprEqn] # reusing existing eqns, helps reuse some tracing
def __init__(self, in_dim_binders, in_binders, out_dims, outs, eqns):
assert all(isinstance(v, Var) and isinstance(v.aval, AbsArray) and
isinstance(v.aval._eltTy, BoundedIntTy) for v in in_dim_binders)
assert all(isinstance(v, Var) for v in in_binders)
assert all(isinstance(x, (Var, Literal)) and isinstance(x.aval, AbsArray) and
isinstance(x.aval._eltTy, BoundedIntTy) for x in out_dims)
assert all(isinstance(x, (Var, Literal)) for x in outs)
assert all(isinstance(e, core.JaxprEqn) for e in eqns)
self.in_dim_binders = in_dim_binders
self.in_binders = in_binders
self.out_dims = out_dims
self.outs = outs
self.eqns = eqns
def __repr__(self):
return str(pp_djaxpr(self))
def pp_djaxpr(jaxpr: DJaxpr) -> pp.Doc:
eqns = map(pp_eqn, jaxpr.eqns)
in_dim_binders = pp_vars(jaxpr.in_dim_binders)
in_binders = pp_vars(jaxpr.in_binders)
out_dims = ', '.join(map(str, jaxpr.out_dims))
outs = ', '.join(map(str, jaxpr.outs))
out_dim_types = pp_vars(jaxpr.out_dims)
outs_type = ', '.join(v.aval.str_short() for v in jaxpr.outs)
return (pp.text(f'{{ lambda {in_dim_binders} ; {in_binders} .')
+ (pp.text('let ') + pp.nest(2, pp.brk() + pp.join(pp.brk(), eqns)) +
pp.text(f'in ( {out_dims} ; {outs} ) '
f': ( {out_dim_types} ; {outs_type} ) }}')))
def pp_vars(vs: Sequence[Atom]) -> str:
return ', '.join(f'{v}:{v.aval.str_short()}' for v in vs)
def pp_eqn(eqn: core.JaxprEqn) -> pp.Doc:
lhs = pp_vars(eqn.outvars)
pp_lhs = pp.text(f'{lhs} =')
pp_rhs = (pp.text(eqn.primitive.name) +
core.pp_kv_pairs(sorted(eqn.params.items()), core.JaxprPpContext())
+ pp.text(' ') + pp.text(' '.join(map(str, eqn.invars))))
return pp_lhs + pp.text(' ') + pp_rhs
# Typechecking DJaxprs
def typecheck_jaxpr(jaxpr: DJaxpr):
env: Set[Var] = set() # bound variables
for v in jaxpr.in_dim_binders:
if not (isinstance(v.aval, AbsArray) and
isinstance(v.aval._eltTy, BoundedIntTy)): raise TypeError
typecheck_type(env, v.aval)
env.add(v)
for v in jaxpr.in_binders:
typecheck_type(env, v.aval)
for v in jaxpr.in_binders:
env.add(v)
for eqn in jaxpr.eqns:
for x in eqn.invars:
typecheck_atom(env, x)
rule = typecheck_rules[eqn.primitive]
out_types = rule(*eqn.invars, **eqn.params)
subst: Dict[Var, Var] = {}
for v, t in zip(eqn.outvars, out_types):
if isinstance(t, Var):
aval = substitute(subst, t.aval)
if v.aval != aval: raise TypeError(f'{v.aval} != {aval}')
subst[t] = v
elif isinstance(t, core.AbstractValue):
aval = substitute(subst, t)
if v.aval.strip_weak_type() != aval:
raise TypeError(f'{v.aval} != {aval}')
else:
assert False # typecheck rule produced unexpected type
typecheck_type(env, v.aval)
env.add(v)
in_types = [v.aval for v in jaxpr.in_binders]
out_types = []
for x in jaxpr.outs:
aval = typecheck_atom(env, x)
out_types.append(aval)
return DJaxprTy(jaxpr.in_dim_binders, in_types, jaxpr.out_dims, out_types)
def typecheck_type(env, aval):
if isinstance(aval, (core.AbstractUnit, core.ShapedArray)):
return aval # all syntactic forms are valid
elif isinstance(aval, AbsArray):
for i, d in enumerate(aval.shape):
if isinstance(d, int):
continue
elif isinstance(d, Var):
if d not in env: raise TypeError('unbound dim size')
if not (isinstance(d.aval, AbsArray) and not d.aval.shape and
isinstance(d.aval._eltTy, BoundedIntTy)):
raise TypeError(f'dim var of unexpected type: {d.aval}')
elif isinstance(d, DimIndexingExpr):
if d.name not in env: raise TypeError('unbound dim size')
if not (isinstance(d.name.aval, AbsArray) and
isinstance(d.name.aval._eltTy, BoundedIntTy)):
raise TypeError(f'dim var of unexpected type: {d.name.aval}')
d_indices_set = set(d.indices)
if i in d_indices_set:
raise TypeError(f"circular dim indexing expression: {d}")
for j in d.indices:
d_j = aval.shape[j]
if (isinstance(d_j, DimIndexingExpr) and
not d_indices_set.issuperset(d_j.indices)):
raise TypeError(f"dim indexing not transitively closed: {d}")
expected_idx_array_shape = tuple(aval.shape[j] for j in d.indices)
if d.name.aval.shape != expected_idx_array_shape:
raise TypeError(f'incompatible shapes in dim indexing: {aval}')
else:
raise TypeError(f'unexpected type in shape: {type(d)}')
return aval
else:
raise TypeError(f'unknown type: {aval}')
def typecheck_atom(env, x):
if isinstance(x, Literal):
return core.raise_to_shaped(core.get_aval(x.val))
elif isinstance(x, Var):
return typecheck_type(env, x.aval)
else:
raise TypeError(f'atom of unexpected type {x}')
def substitute(subst, aval):
if isinstance(aval, AbsArray):
new_shape = []
for d in aval.shape:
if isinstance(d, Var):
new_d = subst.get(d, d)
elif isinstance(d, DimIndexingExpr):
new_d = DimIndexingExpr(subst.get(d.name, d.name), d.indices)
else:
new_d = d
new_shape.append(new_d)
return AbsArray(tuple(new_shape), aval._eltTy)
else:
return aval
typecheck_rules: Dict[core.Primitive, Callable] = {}
# Interpreting DJaxprs
def eval_jaxpr(jaxpr, dim_args, args):
env: Dict[Var, Any] = {}
def read(v):
if type(v) is core.Literal:
return v.val
else:
return env[v]
def write(v, val):
env[v] = val
write(core.unitvar, core.unit)
map(write, jaxpr.in_dim_binders, dim_args)
map(write, jaxpr.in_binders, args)
for eqn in jaxpr.eqns:
in_vals = map(read, eqn.invars)
ans = eqn.primitive.bind(*in_vals, **eqn.params)
if eqn.primitive.multiple_results:
map(write, eqn.outvars, ans)
elif len(eqn.outvars) > 1:
# TODO a jaxpr unpacks dependent tuples, while Python packages them up
map(write, eqn.outvars, eqn.primitive.unpack_result(ans))
else:
write(eqn.outvars[0], ans)
return map(read, jaxpr.out_dims), map(read, jaxpr.outs)
@curry
def jaxpr_as_fun(jaxpr, *args):
shapevars_to_vals: Dict[Var, Any] = dict(
(d, t) for v, x in zip(jaxpr.in_binders, args) if isinstance(v.aval, AbsArray)
for d, t in zip(v.aval.shape, x.shape) if isinstance(d, Var)
and x is not core.unit) # TODO partial eval assumes we can plug in units?
dim_args = [shapevars_to_vals[v] for v in jaxpr.in_dim_binders]
_, out = eval_jaxpr(jaxpr, dim_args, args)
return out
# Data representations
class BoundedInt:
val: Union[int, Tracer]
bound: int
def __init__(self, val: Union[int, Tracer], bound: int):
self._val = val
self._bound = bound
def __repr__(self):
return f'{self._val}{{≤{self._bound}}}'
def __eq__(self, other):
if isinstance(other, BoundedInt) and self._bound == other._bound:
return self._val is other._val or self._val == other._val
elif isinstance(other, int):
return self._val == other
else:
raise Exception
class DimIndexer:
data: NDArray
indices: Tuple[int, ...]
def __init__(self, data, indices):
self._data = data
self._indices = indices
def __repr__(self):
indices = '.'.join(map(str, self._indices))
data = f'{self._data._data}'
return f'{data}.{indices}'
# We want these to duck-type ndarrays when the element type is BaseType.
class Array:
def __init__(self,
shape: Tuple[Union[int, BoundedInt, DimIndexer], ...],
eltTy: EltTy,
data: NDArray):
self.shape = shape
self._eltTy = eltTy
self._data = data
@property
def dtype(self):
if isinstance(self._eltTy, BaseType):
return self._eltTy._dtype
else:
raise Exception
def __repr__(self):
dtypestr = (self._eltTy._dtype.name if isinstance(self._eltTy, BaseType)
else f'BInt{{≤{self._eltTy._bound}}}') # type: ignore
shapestr = ','.join(map(str, self.shape))
if any(isinstance(d, DimIndexer) for d in self.shape):
# find the last DimIndexer, as we'll treat chunks below that as
# rectangular
last = next(i for i, d in reversed(list(enumerate(self.shape)))
if isinstance(d, DimIndexer))
shape_prefix = tuple(d._val if type(d) is BoundedInt else d
for d in self.shape[:last])
outs = []
for idx in it.product(*map(range, shape_prefix)):
slices = [slice(d._data._data[tuple(idx[i] for i in d._indices)])
if isinstance(d, DimIndexer) else
slice(d._val) if isinstance(d, BoundedInt) else
slice(None) for d in self.shape[last:]]
full_index = (*idx, *slices)
data = self._data[full_index]
outs.append(f'{idx}:\n{data}')
return f'{dtypestr}[{shapestr}] with values:\n' + '\n\n'.join(outs)
else:
slices = tuple(slice(d._val) if type(d) is BoundedInt else slice(None)
for d in self.shape)
data = self._data[slices]
return f'{dtypestr}[{shapestr}] with value:\n{data}'
def __array__(self):
if any(isinstance(d, DimIndexer) for d in self.shape):
raise NotImplementedError # ragged ndarray
else:
slices = tuple(slice(d._val) if type(d) is BoundedInt else slice(None)
for d in self.shape)
return np.array(self._data[slices])
# Tracing to embed DJaxprs in Python
from jax import linear_util as lu
from jax.interpreters import partial_eval as pe
from jax._src.api_util import flatten_fun
from jax.tree_util import tree_flatten, tree_unflatten
def make_djaxpr(fun, *args, **kwargs):
args, in_tree = tree_flatten((args, kwargs))
f, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
in_avals = [core.raise_to_shaped(core.get_aval(x)) for x in args]
return trace_to_jaxpr_dynamic(f, in_avals)
def trace_to_jaxpr_dynamic(fun: lu.WrappedFun, in_avals: Sequence[core.AbstractValue]):
with core.new_main(DJaxprTrace, dynamic=True) as main:
main.jaxpr_stack = () # type: ignore
outs = trace_to_subjaxpr_dynamic(fun, main, in_avals)
del main
return outs
def trace_to_subjaxpr_dynamic(fun: lu.WrappedFun, main: core.MainTrace,
in_avals: Sequence[core.AbstractValue]):
frame = DJaxprStackFrame()
with pe.extend_jaxpr_stack(main, frame):
trace = DJaxprTrace(main, core.cur_sublevel())
in_dim_tracers, in_avals = _place_in_dim_tracers_in_shapes(trace, in_avals)
in_tracers = map(trace.new_arg, in_avals)
ans = fun.call_wrapped(*in_tracers)
out_tracers = map(trace.full_raise, ans)
out_dim_tracers = _extract_out_dim_tracers_from_shapes(main, in_dim_tracers, out_tracers)
return frame.to_jaxpr(in_dim_tracers, in_tracers, out_dim_tracers, out_tracers)
def _place_in_dim_tracers_in_shapes(trace, in_avals):
dim_tracers = {}
new_in_avals = []
for aval in in_avals:
if not isinstance(aval, AbsArray):
new_in_avals.append(aval)
else:
new_shape = []
for d in aval.shape:
if isinstance(d, AbsArray):
assert d.shape == () and isinstance(d._eltTy, BoundedIntTy)
dim_tracer = dim_tracers.get(id(d))
if dim_tracer is None:
dim_tracer = dim_tracers[id(d)] = trace.new_arg(d)
new_shape.append(dim_tracer)
elif isinstance(d, (int, BoundedInt)):
new_shape.append(d)
else:
raise NotImplementedError(d) # TODO
new_aval = AbsArray(tuple(new_shape), aval._eltTy)
new_in_avals.append(new_aval)
return list(dim_tracers.values()), new_in_avals
def _extract_out_dim_tracers_from_shapes(main, in_dim_tracers, out_tracers):
seen = {id(d) for d in in_dim_tracers}
def take(d):
if isinstance(d, Tracer):
return d._trace.main is main and id(d) not in seen and not seen.add(id(d))
elif isinstance(d, DimIndexingExpr):
return take(d.name)
else:
return False
return [d.name if isinstance(d, DimIndexingExpr) else d
for t in out_tracers if isinstance(t.aval, AbsArray)
for d in t.aval.shape if take(d)]
class DJaxprTrace(pe.DynamicJaxprTrace):
def process_primitive(self, primitive, tracers, params):
rule = custom_staging_rules.get(primitive)
if rule:
return rule(self, tracers, params)
else:
# If there's no special staging rule, by default do regular Jaxpr staging
return super().process_primitive(primitive, tracers, params)
def get_const(self, tracer):
assert isinstance(tracer, Tracer)
return self.frame.constvar_to_val.get(self.frame.tracer_to_var.get(id(tracer)))
def new_const(self, val):
if isinstance(val, BoundedInt):
raise NotImplementedError # TODO
elif isinstance(val, Array) and val.shape:
raise NotImplementedError # TODO
else:
return super().new_const(val)
custom_staging_rules: Dict[core.Primitive, Callable] = {}
class DJaxprStackFrame(pe.JaxprStackFrame):
def to_jaxpr(self, in_dim_tracers, in_tracers, out_dim_tracers, out_tracers):
t2v = lambda t: self.tracer_to_var[id(t)]
in_dim_binders, in_binders = map(t2v, in_dim_tracers), map(t2v, in_tracers)
out_dims, outs = map(t2v, out_dim_tracers), map(t2v, out_tracers)
# only include constants that are used
used_vars = ({a for eqn in self.eqns for a in eqn.invars if isinstance(a, Var)} |
{a for grp in [out_dims, outs] for a in grp if isinstance(a, Var)})
constvars, constvals = unzip2(
(v, c) for v, c in self.constvar_to_val.items() if v in used_vars)
in_binders = [*constvars, *in_binders]
# promote some lambda binders to pi binders
used_shape_vars = ({d for eqn in self.eqns for v in eqn.outvars
if isinstance(v.aval, AbsArray)
for d in v.aval.shape if isinstance(d, Var)} |
{d.name for eqn in self.eqns for v in eqn.outvars
if isinstance(v.aval, AbsArray)
for d in v.aval.shape if isinstance(d, DimIndexingExpr)})
lambda_binders = [v not in used_shape_vars for v in in_binders]
converted_binders, in_binders = partition_list(lambda_binders, in_binders)
in_dim_binders = in_dim_binders + converted_binders
out_dims = [v for v in out_dims if v not in in_dim_binders] # TODO
jaxpr = DJaxpr(in_dim_binders, in_binders, out_dims, outs, self.eqns)
typecheck_jaxpr(jaxpr)
return jaxpr, constvals, lambda_binders
def newvar(self, aval):
if isinstance(aval, AbsArray) and aval.shape:
# replace any tracers in the shape with their corresponding variables
shape = []
for d in aval.shape:
if isinstance(d, Tracer):
shape.append(self.tracer_to_var[id(d)])
elif isinstance(d, DimIndexingExpr):
assert isinstance(d.name, Tracer)
shape.append(DimIndexingExpr(self.tracer_to_var[id(d.name)], d.indices))
else:
shape.append(d)
aval = AbsArray(tuple(shape), aval._eltTy)
return self.gensym(aval)
def partition_list(bs, lst):
lists = lst1, lst2 = [], []
for b, x in zip(bs, lst):
lists[b].append(x)
return lst1, lst2
def _raise_absarray_to_type_level(aval: AbsArray, weak_type: bool):
assert isinstance(aval, AbsArray)
unique_avals: Dict[int, AbsArray] = {}
shape = []
for d in aval.shape:
if isinstance(d, BoundedInt):
shape.append(unique_avals.setdefault(id(d), AbsArray((), BoundedIntTy(d._bound))))
elif isinstance(d, DimIndexer):
raise NotImplementedError # TODO
else:
shape.append(d)
return AbsArray(tuple(shape), aval._eltTy)
core.raise_to_shaped_mappings[AbsArray] = _raise_absarray_to_type_level
def _abstractify_array_for_ad(x: Array): # TODO misleading name, used in djit
return AbsArray(x.shape, x._eltTy)
core.pytype_aval_mappings[Array] = _abstractify_array_for_ad
def _abstractify_bdint(x: BoundedInt):
return AbsArray((), BoundedIntTy(x._bound))
core.pytype_aval_mappings[BoundedInt] = _abstractify_bdint
# XLA lowering
from jax.interpreters import xla
from jax._src.lib import xla_bridge as xb
from jax._src.lib import xla_client as xc
xe = xc._xla
xops = xc._xla.ops
def _abstractify_array_to_type_level(x: Array):
return core.raise_to_shaped(core.get_aval(x))
xla.pytype_aval_mappings[Array] = _abstractify_array_to_type_level
def _array_xla_shape(aval: AbsArray):
if isinstance(aval._eltTy, BaseType):
dtype = aval._eltTy._dtype
shape = [d._eltTy._bound if isinstance(d, AbsArray) and not d.shape
else d for d in aval.shape]
return (xla.xc.Shape.array_shape(dtype, shape),)
elif isinstance(aval._eltTy, BoundedIntTy):
shape = [d._bound if isinstance(d, BoundedInt) else d for d in aval.shape]
return (xla.xc.Shape.array_shape(np.dtype('int32'), shape),)
else:
raise NotImplementedError
xla.xla_shape_handlers[AbsArray] = _array_xla_shape
xla.canonicalize_dtype_handlers[Array] = identity
def _array_device_put(x, device):
return xla._device_put_array(x._data, device)
xla.device_put_handlers[Array] = _array_device_put
def _bdint_device_put(x, device):
return xla._device_put_scalar(x._val, device)
xla.device_put_handlers[BoundedInt] = _bdint_device_put
def _bdint_canoncalize_dtype(x):
return BoundedInt(xla.canonicalize_dtype(x._val), x._bound)
xla.canonicalize_dtype_handlers[BoundedInt] = _bdint_canoncalize_dtype
def _make_params(c, dim_in_avals, in_avals):
n = it.count()
make = lambda a: [xb.parameter(c, next(n), s) for s in xla.aval_to_xla_shapes(a)]
return map(make, dim_in_avals), map(make, in_avals)
def _xla_consts(c, consts):
unique_consts = {id(const): const for const in consts}
xla_consts = {
id_: [xla.pyval_to_ir_constant(c, const)]
for id_, const in unique_consts.items()}
return [xla_consts[id(const)] for const in consts]
def djaxpr_subcomp(c, jaxpr, dim_args, args):
env: Dict[Var, Sequence[xe.XlaOp]] = {}
def aval(v):
return xla.abstractify(v.val) if type(v) is core.Literal else v.aval
def read(v):
if type(v) is core.Literal:
return [xla.pyval_to_ir_constant(c, xla.canonicalize_dtype(v.val))]
else:
return env[v]
def write(v, nodes):
env[v] = nodes
write(core.unitvar, xla._make_unit_constant(c))
map(write, jaxpr.in_dim_binders, dim_args)
map(write, jaxpr.in_binders, args)
for eqn in jaxpr.eqns:
in_vals, in_avals = map(read, eqn.invars), map(aval, eqn.invars)
in_dims = {v:read(v) for a in in_avals if isinstance(a, AbsArray)
for v in a.shape if isinstance(v, Var)}
rule = translations[eqn.primitive]
out_vals = rule(c, in_dims, in_avals, in_vals, **eqn.params)
map(write, eqn.outvars, out_vals)
return map(read, jaxpr.out_dims), map(read, jaxpr.outs)
def execute_compiled(compiled, partitioner, handlers, dim_vals, args):
input_bufs = list(it.chain(
(buf for x in dim_vals for buf in xla.device_put(x, None)),
(buf for x in args for buf in xla.device_put(x, None))))
out_bufs = compiled.execute(input_bufs)
dims_dict, grouped_bufs = partitioner(out_bufs)
return [handler(dims_dict, bs) for handler, bs in zip(handlers, grouped_bufs)]
def result_partitioner(in_dim_binders, in_dim_vals, out_dims, out_bufcounts):
out_dimvars = [v for v in out_dims if isinstance(v, Var)]
split_sizes = [len(out_dimvars)] + out_bufcounts[:-1]
def dim_handler(v, buf):
if not v.aval.shape:
return BoundedInt(int(buf.to_py()), v.aval._eltTy._bound)
else:
return Array(v.aval.shape, v.aval._eltTy, buf.to_py())
def partitioner(bufs):
dim_bufs, *grouped_bufs = split_list(bufs, split_sizes)
dims_dict = dict(it.chain(
zip(in_dim_binders, in_dim_vals),
zip(out_dimvars, map(dim_handler, out_dimvars, dim_bufs))))
return dims_dict, grouped_bufs
return partitioner
def result_handler(aval):
if isinstance(aval, AbsArray):
return array_result_handler(aval)
else:
handler = xla.aval_to_result_handler(None, aval)
return lambda _, bufs: handler(*bufs)
def array_result_handler(aval):
if not isinstance(aval._eltTy, BaseType): raise NotImplementedError
padded_shape = []
for d in aval.shape:
if isinstance(d, int):
padded_shape.append(d)
elif isinstance(d, Var):
padded_shape.append(d.aval._eltTy._bound)
elif isinstance(d, DimIndexingExpr):
padded_shape.append(d.name.aval._eltTy._bound)
else:
raise NotImplementedError # TODO
padded_aval = core.ShapedArray(tuple(padded_shape), aval._eltTy._dtype)
array_handler = xla.array_result_handler(None, padded_aval)
def handler(dims_dict, bufs):
shape = tuple(dims_dict[d] if isinstance(d, Var) else
DimIndexer(dims_dict[d.name], d.indices) if isinstance(d, DimIndexingExpr) else
d for d in aval.shape)
return Array(shape, aval._eltTy, array_handler(*bufs))
return handler
def aval_to_num_buffers(aval):
if isinstance(aval, AbsArray):
return 1
else:
return len(xla.aval_to_xla_shapes(aval))
translations: Dict[core.Primitive, Callable] = {}
dynamic_xla_call_p = core.Primitive('dxla_call')
dynamic_xla_call_p.multiple_results = True
@dynamic_xla_call_p.def_impl
def _dynamic_xla_call_impl(*args, jaxpr, num_consts):
in_dim_vals, consts, args = split_list(args, [len(jaxpr.in_dim_binders), num_consts])
dim_in_avals = [v.aval for v in jaxpr.in_dim_binders]
c = xc.XlaBuilder("dxla_call")
dim_params, params = _make_params(c, dim_in_avals, map(xla.abstractify, args))
const_params = _xla_consts(c, consts)
dim_outs, outs = djaxpr_subcomp(c, jaxpr, dim_params, const_params + params)
out = xops.Tuple(c, [o for ops in dim_outs + outs for o in ops])
compiled = xb.get_backend(None).compile(c.build(out))
result_handlers = map(result_handler, [v.aval for v in jaxpr.outs])
out_bufcounts = [aval_to_num_buffers(v.aval) for v in jaxpr.outs]
partitioner = result_partitioner(jaxpr.in_dim_binders, in_dim_vals,
jaxpr.out_dims, out_bufcounts)
return execute_compiled(compiled, partitioner, result_handlers,
in_dim_vals, args)
def djit(fun):
def f_jitted(*args, **kwargs):
args, in_tree = tree_flatten((args, kwargs))
f, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
# TODO we shouldn't dedup avals one array at a time; need to do it for the
# full argument list!
# unique_avals: Dict[int, core.AbstractValue] = {}
in_avals = [core.raise_to_shaped(core.get_aval(x)) for x in args]
jaxpr, consts, unconverted_binders = trace_to_jaxpr_dynamic(f, in_avals)
num_consts = len(consts)
args = [*consts, *args]
dim_vals, args = _extract_dim_vals(jaxpr.in_dim_binders, jaxpr.in_binders,
unconverted_binders, args)
out_flat = dynamic_xla_call_p.bind(*dim_vals, *args, jaxpr=jaxpr,
num_consts=num_consts)
return tree_unflatten(out_tree(), out_flat)
return f_jitted
def _extract_dim_vals(in_dim_binders, in_binders, unconverted_binders, args):
converted_in_dim_vals, args = partition_list(unconverted_binders, args)
sizes = {var: size for binder, arg in zip(in_binders, args)
for var, size in zip(binder.aval.shape, np.shape(arg))
if isinstance(var, Var)}
num_binders = len(in_dim_binders) - len(converted_in_dim_vals)
in_dim_vals = [sizes[v] for v in in_dim_binders[:num_binders]] + converted_in_dim_vals
return in_dim_vals, args
def traceable_to_padded_translation(traceable):
def translation(c, dims, avals, operands, **params):
dim_avals = [core.ShapedArray((), np.int32) for _ in dims]
padded_avals = map(_replace_vars_with_bounds, avals)
@lu.wrap_init
def fun(*args):
dim_sizes, args = split_list(args, [len(dims)])
logical_sizes = dict(zip(dims, dim_sizes))
logical_shapes = [tuple([logical_sizes.get(d, d) for d in aval.shape])
for aval in avals] # TODO more cases
return traceable(logical_shapes, *args, **params)
in_avals = [*dim_avals, *padded_avals]
jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(fun, in_avals)
operands_ = it.chain.from_iterable([*dims.values(), *operands])
platform = "cpu" # TODO: don't hardwire in the CPU translation.
ctx = xla.TranslationContext(c, platform, xla.AxisEnv(1, (), ()), '')
outs = xla.jaxpr_subcomp(ctx, jaxpr, xla._xla_consts(c, consts), *operands_)
return xla._partition_outputs(
[aval_to_num_buffers(aval) for aval in out_avals], outs)
return translation
def _replace_vars_with_bounds(aval):
if not isinstance(aval, AbsArray):
return aval
else:
new_shape = []
for d in aval.shape:
if isinstance(d, Var):
assert d.aval.shape == () and isinstance(d.aval._eltTy, BoundedIntTy)
new_shape.append(d.aval._eltTy._bound)
elif isinstance(d, int):
new_shape.append(d)
elif isinstance(d, BoundedInt):
new_shape.append(d._bound)
else:
raise NotImplementedError(d)
return core.ShapedArray(tuple(new_shape), aval._eltTy._dtype)
# AD
from jax.interpreters import ad
def _dynamic_xla_call_jvp(primals, tangents, *, jaxpr, num_consts):
del num_consts
in_dim_vals, primals = split_list(primals, [len(jaxpr.in_dim_binders)])
_, tangents = split_list(tangents, [len(jaxpr.in_dim_binders)])
new_jaxpr, consts = jvp_jaxpr(jaxpr)
outs = dynamic_xla_call_p.bind(*in_dim_vals, *consts, *primals, *tangents,
jaxpr=new_jaxpr, num_consts=len(consts))
primals_out, tangents_out = split_list(outs, [len(outs) // 2])
return primals_out, tangents_out
ad.primitive_jvps[dynamic_xla_call_p] = _dynamic_xla_call_jvp
def _dynamic_xla_call_transpose(cts_in, *args, jaxpr, num_consts):
# TODO make this a dynamic_xla_call_p bind
del num_consts
vars_to_vals = dict(
(d, t) for v, x in zip(jaxpr.in_binders, args)
if isinstance(v.aval, AbsArray) and not ad.is_undefined_primal(x)
for d, t in zip(v.aval.shape, x.shape) if isinstance(d, Var))
dim_args = [vars_to_vals[v] for v in jaxpr.in_dim_binders]
consts_bar, args_bar = backward_pass(jaxpr, dim_args, args, cts_in) # type: ignore
return [*consts_bar, *args_bar]
ad.primitive_transposes[dynamic_xla_call_p] = _dynamic_xla_call_transpose
def backward_pass(jaxpr, dim_args, args, cts_in):
primal_env = {}
ct_env = {}
def write_cotangent(v, ct):
ct_env[v] = ad.add_tangents(ct_env[v], ct) if v in ct_env else ct
def read_cotangent(v):
return ct_env.get(v, ad.Zero(v.aval))
def read_primal(v):
if type(v) is core.Literal:
raise NotImplementedError # TODO
else:
return primal_env.get(v, ad.UndefinedPrimal(v.aval))
def write_primal(v, val):
if not ad.is_undefined_primal(val):
primal_env[v] = val
write_primal(core.unitvar, core.unit)
map(write_primal, jaxpr.in_dim_binders, dim_args)
map(write_primal, jaxpr.in_binders, args)
map(write_cotangent, jaxpr.outs, cts_in)
raise NotImplementedError # TODO finish this
def jvp_jaxpr(jaxpr):
f = lu.wrap_init(jaxpr_as_fun(jaxpr))
dimvars = dict((v, v.aval) for v in jaxpr.in_dim_binders)
in_avals = [_replace_vars_with_avals(dimvars, v.aval) for v in jaxpr.in_binders]
jaxpr, consts, _ = trace_to_jaxpr_dynamic(jvp_traceable(ad.jvp(f)), in_avals * 2)
return jaxpr, consts
def _replace_vars_with_avals(dimvars, aval):
if isinstance(aval, AbsArray):
shape = [dimvars.get(d, d) for d in aval.shape]
return AbsArray(tuple(shape), aval._eltTy)
else:
return aval
@lu.transformation
def jvp_traceable(*primals_and_tangents):
n = len(primals_and_tangents)
primals, tangents = split_list(primals_and_tangents, [n // 2])
primals_out, tangents_out = yield (primals, tangents), {}
yield (*primals_out, *tangents_out)
def _dynamic_xla_call_pe(trace, *tracers, jaxpr, num_consts):
in_dim_tracers, tracers = split_list(tracers, [len(jaxpr.in_dim_binders)])
if any(not t.pval.is_known() for t in in_dim_tracers):
raise NotImplementedError
in_unknowns = [not t.pval.is_known() for t in tracers]
jaxpr1, jaxpr2, out_unknowns, num_res = partial_eval_jaxpr(jaxpr, in_unknowns)
known_tracers, unknown_tracers = partition_list(in_unknowns, tracers)
known_vals = [t.pval.get_known() for t in known_tracers]
in_dim_vals = [t.pval.get_known() for t in in_dim_tracers]
outs1_res = dynamic_xla_call_p.bind(*in_dim_vals, *known_vals, jaxpr=jaxpr1,
num_consts=num_consts)
outs1, res = split_list(outs1_res, [len(jaxpr1.outs) - num_res])
in_dim_tracers = map(trace.new_instantiated_const, in_dim_tracers)
res_tracers = map(trace.new_instantiated_const, res)
outs2 = [pe.JaxprTracer(trace, pe.PartialVal.unknown(v.aval), None)
for v in jaxpr2.outs]
eqn = pe.new_eqn_recipe(in_dim_tracers + res_tracers + unknown_tracers, outs2,
dynamic_xla_call_p, dict(jaxpr=jaxpr2, num_consts=0),
source_info_util.new_source_info())
for t in outs2: t.recipe = eqn
outs1, outs2 = iter(outs1), iter(outs2)
return [next(outs2) if uk else next(outs1) for uk in out_unknowns]
pe.custom_partial_eval_rules[dynamic_xla_call_p] = _dynamic_xla_call_pe
def partial_eval_jaxpr(jaxpr, in_unknowns):
env: Dict[Var, bool] = {}
res = []
def read(v):
if type(v) is core.Literal:
raise NotImplementedError # TODO
else:
return env[v]
def write(unk, v):
env[v] = unk
def new_res(v):
res.append(v)
return v
eqns1, eqns2 = [], []
map(write, in_unknowns, jaxpr.in_binders)
for eqn in jaxpr.eqns:
unks = map(read, eqn.invars)
if any(unks):
invars = [v if unk else new_res(v) for unk, v in zip(unks, eqn.invars)]
eqns2.append(pe.new_jaxpr_eqn(invars, eqn.outvars, eqn.primitive,
eqn.params,
source_info_util.new_source_info()))
map(partial(write, True), eqn.outvars)
else:
eqns1.append(eqn)
map(partial(write, False), eqn.outvars)
out_unknowns = map(read, jaxpr.outs)
out_dim_unknowns = map(read, jaxpr.out_dims) # when linearizing, all known
invars1, invars2 = partition_list(in_unknowns, jaxpr.in_binders)
outvars1, outvars2 = partition_list(out_unknowns, jaxpr.outs)
out_dims1, out_dims2 = partition_list(out_dim_unknowns, jaxpr.out_dims)
outvars1 = outvars1 + res
invars2 = res + invars2
# TODO forward the correct residuals here (all dimvars used in types)
in_dimvars2 = out_dims1 + jaxpr.in_dim_binders
jaxpr1 = DJaxpr(jaxpr.in_dim_binders, invars1, out_dims1, outvars1, eqns1)
jaxpr2 = DJaxpr(in_dimvars2, invars2, out_dims2, outvars2, eqns2)
return jaxpr1, jaxpr2, out_unknowns, len(res)
# batching
from jax.interpreters import batching
def _dynamic_xla_call_vmap(args, in_dims, *, jaxpr, num_consts):
del num_consts
in_dim_vals, args = split_list(args, [len(jaxpr.in_dim_binders)])
in_dim_bdims, arg_bdims = split_list(in_dims, [len(jaxpr.in_dim_binders)])
assert all(d is batching.not_mapped for d in in_dim_bdims)
axis_size, = {x.shape[d] for x, d in zip(args, arg_bdims)
if d is not batching.not_mapped}
new_jaxpr, consts, out_dims = batch_jaxpr(jaxpr, axis_size, arg_bdims)
outs = dynamic_xla_call_p.bind(*in_dim_vals, *consts, *args,
jaxpr=new_jaxpr, num_consts=len(consts))
return outs, out_dims
batching.primitive_batchers[dynamic_xla_call_p] = _dynamic_xla_call_vmap
def batch_jaxpr(jaxpr, axis_size, in_dims):
dimvars = dict((v, v.aval) for v in jaxpr.in_dim_binders)
in_avals = [_replace_vars_with_avals(dimvars, v.aval) for v in jaxpr.in_binders]
in_avals = [core.unmapped_aval(axis_size, core.no_axis_name, d, aval)
if d is not batching.not_mapped else aval
for d, aval in zip(in_dims, in_avals)]
fun, out_dims = batching.batch_subtrace(lu.wrap_init(jaxpr_as_fun(jaxpr)))
f = _batch_fun(fun, in_dims)
jaxpr, consts, _ = trace_to_jaxpr_dynamic(f, in_avals)
return jaxpr, consts, out_dims()
@lu.transformation
def _batch_fun(in_dims, *in_vals, **params):
with core.new_main(batching.BatchTrace, axis_name=core.no_axis_name) as main:
out_vals = yield (main, in_dims, *in_vals), params
del main
yield out_vals
def _map_array(size: int, axis: int, aval: AbsArray) -> AbsArray:
return AbsArray(tuple_delete(aval.shape, axis), aval._eltTy)
def _unmap_array(size: int, axis: int, aval: AbsArray) -> AbsArray:
raise NotImplementedError
core.aval_mapping_handlers[AbsArray] = _map_array, _unmap_array
# Primitives
import numpy as np
from jax._src.lax import lax
## sin
def sin(x: Any) -> Any:
return sin_p.bind(x)
sin_p = core.Primitive('sin_p')
@sin_p.def_abstract_eval
def _sin_abstract_eval(x):
if isinstance(x, AbsArray):
return AbsArray(x.shape, x._eltTy)
else:
return lax.sin_p.abstract_eval(x)
def _sin_typecheck_rule(invar):
return [invar.aval]
typecheck_rules[sin_p] = _sin_typecheck_rule
def _sin_translation_rule(c, dims, avals, operands):
(x,), = operands
return [[xops.Sin(x)]]
translations[sin_p] = _sin_translation_rule
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
## cos
def cos(x: Any) -> Any:
return cos_p.bind(x)
cos_p = core.Primitive('cos_p')
@cos_p.def_abstract_eval
def _cos_abstract_eval(x):
if isinstance(x, AbsArray):
return AbsArray(x.shape, x._eltTy)
else:
return lax.cos_p.abstract_eval(x)
def _cos_typecheck_rule(invar):
return [invar.aval]
typecheck_rules[cos_p] = _cos_typecheck_rule
def _cos_translation_rule(c, dims, avals, operands):
(x,), = operands
return [[xops.Cos(x)]]
translations[cos_p] = _cos_translation_rule
## reduce-sum
def reduce_sum(x: Any, axes: Optional[Sequence[int]] = None) -> Any:
if axes is None:
axes = tuple(range(len(x.shape)))
return reduce_sum_p.bind(x, axes=axes)
reduce_sum_p = core.Primitive('reduce_sum')
@reduce_sum_p.def_abstract_eval
def _sum_abstract_eval(operand, *, axes):
if isinstance(operand, AbsArray):
axes = set(axes)
new_shape = [d for i, d in enumerate(operand.shape) if i not in axes]
if (all(isinstance(d, int) for d in new_shape) and
isinstance(operand._eltTy, BaseType)):
return core.ShapedArray(tuple(new_shape), operand._eltTy._dtype)
else:
return AbsArray(tuple(new_shape), operand._eltTy)
else:
return lax.reduce_sum_p.reduce_sum_abstract_eval(operand, axes=axes)
def _reduce_sum_typecheck_rule(x, *, axes):
return [reduce_sum_p.abstract_eval(x.aval, axes=axes)]
typecheck_rules[reduce_sum_p] = _reduce_sum_typecheck_rule
def _reduce_sum_translation_traceable(logical_shapes, x, *, axes):
shape, = logical_shapes
x = _replace_masked_values(shape, x, 0, axes=axes)
return [lax._reduce_sum(x, axes=axes)]
translations[reduce_sum_p] = traceable_to_padded_translation(
_reduce_sum_translation_traceable)
def _replace_masked_values(logical_shape, x, val, axes=None):
axes = axes or set(range(len(logical_shape)))
masks = [lax.broadcasted_iota(np.int32, x.shape, i) < d
for i, d in enumerate(logical_shape) if d is not None and i in axes]
if masks:
x = lax.select(reduce(op.and_, masks), x, lax.full_like(x, val))
return x
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
raise NotImplementedError # TODO
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
### lt
def lt(x, y):
return lt_p.bind(x, y)
lt_p = core.Primitive('lt')
@lt_p.def_abstract_eval
def _lt_abstract_eval(x, y):
if isinstance(x, AbsArray) or isinstance(y, AbsArray):
# TODO check dtypes match
if not x.shape:
return AbsArray(y.shape, BaseType(np.dtype('bool')))
if not y.shape:
return AbsArray(x.shape, BaseType(np.dtype('bool')))
map(_dims_must_equal, x.shape, y.shape)
return AbsArray(x.shape, BaseType(np.dtype('bool')))
else:
return lax.lt_p.abstract_eval(x, y)
def _lt_typecheck_rule(x, y):
return [lt_p.abstract_eval(x.aval, y.aval)]
def _lt_translation_rule(c, dims, avals, operands):
(x,), (y,) = operands
return [[xops.Lt(x, y)]]
### dot
def dot(x, y):
assert len(x.shape) == len(y.shape) == 2
return dot_general(x, y, ([1], [0]), ([], []))
Dims = Tuple[Sequence[int], Sequence[int]]
def dot_general(x: Any, y: Any, contract: Dims, batch: Dims) -> Any:
return dot_general_p.bind(x, y, contract=contract, batch=batch)
dot_general_p = core.Primitive('dot_general')
@dot_general_p.def_abstract_eval
def _dot_general_abstract_eval(x, y, *, contract, batch):
for i, j in zip(*contract): _dims_must_equal(x.shape[i], y.shape[j])
for i, j in zip(*batch): _dims_must_equal(x.shape[i], y.shape[j])
shape = lax._dot_general_shape_computation(x.shape, y.shape, (contract, batch))
return AbsArray(shape, x._eltTy)
def _dot_general_typecheck_rule(x, y, *, contract, batch):
return [_dot_general_abstract_eval(x.aval, y.aval,
contract=contract, batch=batch)]
typecheck_rules[dot_general_p] = _dot_general_typecheck_rule
def _dot_general_trans(logical_shapes, x, y, *, contract, batch):
x_shape, _ = logical_shapes
lhs_contract, _ = contract
x = _replace_masked_values(x_shape, x, 0, axes=lhs_contract)
return [lax.dot_general(x, y, dimension_numbers=(contract, batch))]
translations[dot_general_p] = traceable_to_padded_translation(_dot_general_trans)
def _dot_general_transpose_rule(cotangent, x, y, *, contract, batch):
assert False # TODO
ad.primitive_transposes[dot_general_p] = _dot_general_transpose_rule
## add
def add(x: Any, y: Any) -> Any:
return add_p.bind(x, y)
add_p = core.Primitive('add')
@add_p.def_abstract_eval
def _add_abstract_eval(x, y):
if isinstance(x, AbsArray) and isinstance(y, AbsArray):
map(_dims_must_equal, x.shape, y.shape) # TODO broadcasting?
return AbsArray(x.shape, x._eltTy)
else:
return lax.add_p.abstract_eval(x, y)
def _dims_must_equal(d1, d2):
if isinstance(d1, (Tracer, Var)) and isinstance(d2, (Tracer, Var)):
if d1.aval is d2.aval: return True
elif isinstance(d1, int) and isinstance(d2, int):
return d1 == d2
raise Exception("can't prove shapes equal (or unequal)!")
def _add_typecheck_rule(x, y):
return [add_p.abstract_eval(x.aval, y.aval)]
typecheck_rules[add_p] = _add_typecheck_rule
def _add_translation_rule(c, dims, avals, operands):
(x,), (y,) = operands
return [[xops.Add(x, y)]]
translations[add_p] = _add_translation_rule
## mul
def mul(x: Any, y: Any) -> Any:
return mul_p.bind(x, y)
mul_p = core.Primitive('mul')
@mul_p.def_abstract_eval
def _mul_abstract_eval(x, y):
if isinstance(x, AbsArray) and isinstance(y, AbsArray):
map(_dims_must_equal, x.shape, y.shape) # TODO broadcasting?
return AbsArray(x.shape, x._eltTy)
else:
return lax.mul_p.abstract_eval(x, y)
def _mul_typecheck_rule(x, y):
return [mul_p.abstract_eval(x.aval, y.aval)]
typecheck_rules[mul_p] = _mul_typecheck_rule
def _mul_translation_rule(c, dims, avals, operands):
(x,), (y,) = operands
return [[xops.Mul(x, y)]]
translations[mul_p] = _mul_translation_rule
## nonzero
def nonzero(x):
return nonzero_p.bind(x)
nonzero_p = core.Primitive('nonzero')
def _nonzero_unpack_result(x):
return [x.shape[-1], x]
nonzero_p.unpack_result = _nonzero_unpack_result # type: ignore
def _nonzero_staging_rule(trace, tracers, params):
aval = tracers[0].aval
if isinstance(aval, AbsArray) and not isinstance(aval._eltTy, BaseType):
raise NotImplementedError
bound = aval.shape[-1]
bound = bound if isinstance(bound, int) else bound._bound
out_dim_aval = AbsArray(aval.shape[:-1], BoundedIntTy(bound))
out_dim_tracer = pe.DynamicJaxprTracer(trace, out_dim_aval, None)
if len(aval.shape) == 1:
out_val_aval = AbsArray((out_dim_tracer,), BaseType(np.dtype('int32')))
else:
indices = tuple(range(len(aval.shape[:-1])))
expr = DimIndexingExpr(out_dim_tracer, indices)
out_val_aval = AbsArray((*aval.shape[:-1], expr),
BaseType(np.dtype('int32')))
out_val_tracer = pe.DynamicJaxprTracer(trace, out_val_aval, None)
invars = map(trace.getvar, tracers)
outvars = map(trace.makevar, [out_dim_tracer, out_val_tracer])
eqn = pe.new_jaxpr_eqn(invars, outvars, nonzero_p, {},
source_info_util.new_source_info())
trace.frame.eqns.append(eqn)
return out_val_tracer
custom_staging_rules[nonzero_p] = _nonzero_staging_rule
def _nonzero_typecheck_rule(invar):
bound = invar.aval.shape[-1]
bound = bound if isinstance(bound, int) else bound._bound
newvar = core.gensym()
out_dim_var = newvar(AbsArray(invar.aval.shape[:-1], BoundedIntTy(bound)))
if len(invar.aval.shape) == 1:
out_val_aval = AbsArray((out_dim_var,), BaseType(np.dtype('int32')))
else:
indices = tuple(range(len(out_dim_var.aval.shape))) # pytype: disable=attribute-error
expr = DimIndexingExpr(out_dim_var, indices)
out_val_aval = AbsArray((*invar.aval.shape[:-1], expr),
BaseType(np.dtype('int32')))
return out_dim_var, out_val_aval
typecheck_rules[nonzero_p] = _nonzero_typecheck_rule
def _nonzero_translation_traceable(logical_shapes, x):
shape, = logical_shapes
assert shape
x = _replace_masked_values(shape, x, 0)
nonzero_indicators = x != 0
last_axis = len(shape) - 1
out_sizes = lax._reduce_sum(nonzero_indicators.astype(np.int32), [last_axis])
iota = lax.broadcasted_iota(np.int32, x.shape, dimension=last_axis)
_, idx = lax.sort_key_val(~nonzero_indicators, iota, dimension=last_axis)
return out_sizes, idx
translations[nonzero_p] = traceable_to_padded_translation(
_nonzero_translation_traceable)
def _nonzero_vmap_rule(args, in_dims):
(x,), (d,) = args, in_dims
if d != 0: raise NotImplementedError
return nonzero_p.bind(x), 0
batching.primitive_batchers[nonzero_p] = _nonzero_vmap_rule
## iota
def iota(n):
return iota_p.bind(n)
iota_p = core.Primitive('iota')
def _iota_staging_rule(trace, tracers, params):
tracer, = tracers
n = trace.get_const(tracer)
if n is not None:
if type(n) is not int: raise NotImplementedError # TODO batched version?
out_aval = core.ShapedArray((n,), np.dtype('int32'))
out_tracer = pe.DynamicJaxprTracer(trace, out_aval, None)
outvar = trace.makevar(out_tracer)
eqn = pe.new_jaxpr_eqn([], [outvar], iota_p, dict(size=n),
source_info_util.new_source_info())
else:
aval = tracer.aval
if not isinstance(aval, AbsArray): raise TypeError
if aval.shape:
indices = tuple(range(len(aval.shape)))
out_aval = AbsArray((*aval.shape, DimIndexingExpr(tracer, indices)),
BaseType(np.dtype('int32')))
else:
out_aval = AbsArray((tracer,), BaseType(np.dtype('int32')))
out_tracer = pe.DynamicJaxprTracer(trace, out_aval, None)
outvar = trace.makevar(out_tracer)
invar = trace.getvar(tracer)
eqn = pe.new_jaxpr_eqn([invar], [outvar], iota_p, {},
source_info_util.new_source_info())
trace.frame.eqns.append(eqn)
return out_tracer
custom_staging_rules[iota_p] = _iota_staging_rule
def _iota_typecheck_rule(*invars, size=None):
if size is not None:
if invars: raise TypeError
return [core.ShapedArray((size,), np.dtype('int32'))]
else:
invar, = invars
if not invar.aval.shape:
return [AbsArray((invar,), BaseType(np.dtype('int32')))]
else:
indices = tuple(range(len(invar.aval.shape)))
return [AbsArray((*invar.aval.shape, DimIndexingExpr(invar, indices)),
BaseType(np.dtype('int32')))]
typecheck_rules[iota_p] = _iota_typecheck_rule
def _iota_translation_rule(c, dims, avals, operands, *, size=None):
if size is None:
aval, = avals
size = aval._eltTy._bound
shape = aval.shape
else:
shape = ()
etype = xla.dtype_to_primitive_type(np.dtype('int32'))
xla_shape = xc.Shape.array_shape(etype, (*shape, size))
return [[xops.Iota(c, xla_shape, len(shape))]]
translations[iota_p] = _iota_translation_rule
## broadcast
def broadcast(x, d):
return broadcast_p.bind(x, d)
broadcast_p = core.Primitive('broadcast')
def _broadcast_staging_rule(trace, tracers, params):
x, d = tracers
d_const = trace.get_const(d)
if d_const is not None:
raise NotImplementedError # TODO
else:
aval = x.aval
dtype = aval._eltTy._dtype if isinstance(aval, AbsArray) else aval.dtype
out_aval = AbsArray((d, *x.shape), BaseType(dtype))
out_tracer = pe.DynamicJaxprTracer(trace, out_aval, None)
eqn = pe.new_jaxpr_eqn([trace.getvar(x), trace.getvar(d)],
[trace.makevar(out_tracer)], broadcast_p, {},
source_info_util.new_source_info())
trace.frame.eqns.append(eqn)
return out_tracer
custom_staging_rules[broadcast_p] = _broadcast_staging_rule
def _broadcast_typecheck_rule(x, d):
aval = x.aval
dtype = aval._eltTy._dtype if isinstance(aval, AbsArray) else aval.dtype
return [AbsArray((d, *x.aval.shape), BaseType(dtype))]
typecheck_rules[broadcast_p] = _broadcast_typecheck_rule
def _broadcast_translation_rule(c, dims, avals, operands, *, size=None):
(x,), (_,) = operands
if size is None:
_, aval = avals
assert not aval.shape
size = aval._eltTy._bound
return [[xops.Broadcast(x, (size,))]]
translations[broadcast_p] = _broadcast_translation_rule
# Examples
import jax.numpy as jnp
def bbarray(bound_shape: Tuple[int, ...], x: NDArray):
sizes: Dict[int, BoundedInt] = {}
shape = tuple(sizes.setdefault(d, BoundedInt(d, bound))
for d, bound in zip(x.shape, bound_shape))
slices = tuple(slice(d) for d in x.shape)
padded_x = jnp.ones(bound_shape, x.dtype).at[slices].set(x)
return Array(shape, BaseType(x.dtype), padded_x)
def ones_like(x):
if isinstance(x, Array): # doesn't work with tracers
return Array(x.shape, x._eltTy, jnp.ones_like(x._data))
else:
return jnp.ones_like(x)
if __name__ == '__main__':
import jax
jax.config.update('jax_platform_name', 'cpu')
def p(s): print('\n--- ' + str(s))
## Staging and typechecking
p('typecheck identity')
def f(x):
return x
x = jnp.array([0, 1])
jaxpr, _, _ = make_djaxpr(f, x)
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
p('typecheck sin')
def f(x):
return sin(x)
x = bbarray((5,), jnp.arange(3.))
jaxpr, _, _ = make_djaxpr(f, x)
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
p('typecheck sin-and-add')
def f(x):
y = sin(x)
z = sin(y)
return add(y, z)
x = bbarray((5,), jnp.arange(3.))
jaxpr, _, _ = make_djaxpr(f, x)
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
p('typecheck iota(3)')
def f(): # type: ignore
return iota(3)
jaxpr, _, _ = make_djaxpr(f)
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
p('typecheck nonzero')
def f(x):
return nonzero(x)
x = jnp.array([1, 0, -2, 0, 3, 0])
jaxpr, _, _ = make_djaxpr(f, x)
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
p('typecheck sum-of-nonzero')
def f(x):
return reduce_sum(nonzero(x), tuple(range(len(x.shape))))
x = jnp.array([1, 0, -2, 0, 3, 0])
jaxpr, _, _ = make_djaxpr(f, x)
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
## XLA lowering and execution
@djit
def f(x):
nonzero_idx = nonzero(x)
return reduce_sum(nonzero_idx)
p('execute sum of nonzero indices')
x = jnp.array([0, 1, 0, 1, 0, 1])
print(f(x))
print('should be', np.sum(np.nonzero(x)[0]))
@djit
def f(x):
return nonzero(x)
p('execute nonzero')
x = jnp.array([0, 1, 0, 1, 0, 1])
print(f(x))
print('should be', np.nonzero(x)[0])
@djit
def f(i):
return iota(i)
p('execute iota')
print(f(BoundedInt(3, 5)))
print('should be', np.arange(3))
@djit
def f(x, n):
y = nonzero(x)
return broadcast(y, n)
p('execute broadcast')
x = np.arange(3)
n = BoundedInt(4, 5)
print(f(x, n)) # type: ignore
print(f'should be\n{np.broadcast_to(np.nonzero(x)[0], (4, 2))}')
## ad
@djit
def f(x):
y = sin(x)
return reduce_sum(y, axes=(0,))
x = bbarray((5,), jnp.arange(2.))
p('basic jvp')
z, z_dot = jax.jvp(f, (x,), (ones_like(x),))
print(z, z_dot)
p('basic linearize')
_, f_lin = jax.linearize(f, x)
print(f_lin(ones_like(x)))
## vmap
@djit
def f(x):
return nonzero(x)
p('vmap of nonzero')
xs = jnp.array([[0, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 0, 1]])
print(jax.vmap(f)(xs))
## dot
@djit
def f(x):
return dot(x, x)
p('dot(x, x)')
x = bbarray((4, 4), np.arange(9., dtype=np.float32).reshape(3, 3))
print(f(x))
y = np.arange(9.).reshape(3, 3)
print(f'should be\n{np.dot(y, y)}')
|
the-stack_106_21932
|
"""
This file offers the methods to automatically retrieve the graph Devosia crocina.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def DevosiaCrocina(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Devosia crocina graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Devosia crocina graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="DevosiaCrocina",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_21935
|
import argparse
import tensorflow as tf
tf.random.set_seed(10)
import numpy as np
np.random.seed(15)
import matplotlib.pyplot as plt
from surrogate_models import coefficient_model
from optimizers import surrogate_optimizer
from utils import shape_return
if __name__ == '__main__':
'''
Usage: python nn_opt.py [train/optimize/rl_train/rl_optimize] [shape/shape_lift]
'''
parser = argparse.ArgumentParser(description='Surrogate model based optimization')
parser.add_argument('mode', metavar='mode', type=str, help='[train/optimize/rl_train/rl_optimize]') #Train a network or use a trained network for minimize/rl_minimize
parser.add_argument('constraint_options',metavar='constraint_options', type=str, help='[shape/shape_lift]')
args = parser.parse_args()
# Load dataset
input_data = np.load('DOE_2000.npy').astype('float32')
output_data = np.load('coeff_data_2000.npy').astype('float32')
# Define a simple fully connected model
model=coefficient_model(input_data,output_data)
# Train the model
if args.mode == 'train':
model.train_model()
else:
model.restore_model()
if args.mode == 'optimize':
# gradient-based Perform optimization
from constraints import cons
# Initialize optimizer
num_pars = np.shape(input_data)[1]
if args.constraint_options == 'shape_lift':
lift_cons = True
opt = surrogate_optimizer(model,num_pars,cons,lift_cons)
else:
opt = surrogate_optimizer(model,num_pars,cons)
best_func_val, solution, best_opt = opt.optimize(10) # Optimize with 10 restarts
# Visualize airfoil shape evolution
for i in range(1,np.shape(best_opt)[0]):
shape_return(best_opt[i],i)
elif args.mode == 'rl_train':
from rl_optimizers import rl_optimize
from constraints import t_base
# Create an RL based optimization
env_params = {}
env_params['num_params'] = np.shape(input_data)[1]
env_params['num_obs'] = np.shape(output_data)[1]
env_params['init_guess'] = np.asarray(t_base)
env_params['num_steps'] = 1 # 1 step prediction
rl_opt = rl_optimize(env_params,50,env_params['num_steps']) # 20 iteration optimization, 10 steps
rl_opt.train()
elif args.mode == 'rl_optimize':
from rl_optimizers import rl_optimize
from constraints import t_base
# Create an RL based optimization
env_params = {}
env_params['num_params'] = np.shape(input_data)[1]
env_params['num_obs'] = np.shape(output_data)[1]
env_params['init_guess'] = np.asarray(t_base,dtype='float32')
env_params['num_steps'] = 1
rl_opt = rl_optimize(env_params,50,env_params['num_steps']) # 20 iteration optimization, 10 steps
f = open('rl_checkpoint','r')
checkpoint_path = f.readline()
f.close()
rl_opt.load_checkpoint(checkpoint_path)
# Random restarts
best_drag = 10.0
for _ in range(30):
path, coeff_path = rl_opt.optimize_shape()
if coeff_path[-1][0] < best_drag:
best_drag = coeff_path[-1][0]
best_params = path[-1]
best_path = path
# Visualize airfoil shape evolution
for i in range(0,len(path)):
shape_return(best_path[i],i)
# Print best coefficients etc
print('Drag and lift coefficients',coeff_path[-1])
print('Optimized shape parameters:',path[-1])
|
the-stack_106_21937
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from nipype.pipeline import engine as pe
from fmriprep.workflows.bold.base import _get_wf_name
class FactoryContext:
def __init__(self, workdir, spec, bidsdatabase, workflow, memcalc):
self.workdir = workdir
self.spec = spec
self.database = bidsdatabase.database
self.bidsdatabase = bidsdatabase
self.workflow = workflow
self.memcalc = memcalc
class Factory:
def __init__(self, ctx):
self.workdir = ctx.workdir
self.spec = ctx.spec
self.database = ctx.database
self.bidsdatabase = ctx.bidsdatabase
self.workflow = ctx.workflow
self.memcalc = ctx.memcalc
def _endpoint(self, hierarchy, node, attr):
if len(hierarchy) > 1:
parent = hierarchy[1]
fullattr = ".".join([*[wf.name for wf in hierarchy[2:]], node.name, attr])
return parent, fullattr
return node, attr
def _single_subject_wf_name(self, sourcefile=None, bids_subject_id=None, subject_id=None):
bidsdatabase = self.bidsdatabase
if bids_subject_id is None:
if sourcefile is not None:
bidspath = bidsdatabase.tobids(sourcefile)
subject_id = bidsdatabase.tagval(bidspath, "subject")
if subject_id is not None:
bids_subject_id = bidsdatabase._format_tagval("subject", subject_id)
if bids_subject_id is not None:
return "single_subject_%s_wf" % bids_subject_id
def _bold_wf_name(self, sourcefile):
bidspath = self.bidsdatabase.tobids(sourcefile)
return _get_wf_name(bidspath)
def _get_hierarchy(self, name, sourcefile=None, subject_id=None, childname=None, create_ok=True):
hierarchy = [self.workflow]
def require_workflow(child_name):
wf = hierarchy[-1]
child = wf.get_node(child_name)
if child is None:
assert create_ok
child = pe.Workflow(name=child_name)
wf.add_nodes([child])
hierarchy.append(child)
require_workflow(name)
single_subject_wf_name = self._single_subject_wf_name(sourcefile=sourcefile, subject_id=subject_id)
if single_subject_wf_name is not None:
require_workflow(single_subject_wf_name)
if sourcefile is not None:
if self.database.tagval(sourcefile, "datatype") == "func":
require_workflow(self._bold_wf_name(sourcefile))
if childname is not None:
require_workflow(childname)
return hierarchy
def get(self, *args, **kwargs):
raise NotImplementedError()
def connect_common_attrs(self, outputhierarchy, outputnode, inputhierarchy, inputnode):
if isinstance(outputnode, str):
outputnode = outputhierarchy[-1].get_node(outputnode)
if isinstance(inputnode, str):
inputnode = inputhierarchy[-1].get_node(inputnode)
inputattrs = set(inputnode.inputs.copyable_trait_names())
outputattrs = set(outputnode.outputs.copyable_trait_names())
attrs = inputattrs & outputattrs # find common attr names
for attr in attrs:
self.connect_attr(outputhierarchy, outputnode, attr, inputhierarchy, inputnode, attr)
return attrs
def connect_attr(self, outputhierarchy, outputnode, outattr, inputhierarchy, inputnode, inattr):
inputhierarchy = [*inputhierarchy] # make copies
outputhierarchy = [*outputhierarchy]
assert outputhierarchy[0] == inputhierarchy[0]
while outputhierarchy[1] == inputhierarchy[1]:
inputhierarchy.pop(0)
outputhierarchy.pop(0)
workflow = inputhierarchy[0]
if isinstance(outputnode, str):
outputnode = outputhierarchy[-1].get_node(outputnode)
if isinstance(inputnode, str):
inputnode = inputhierarchy[-1].get_node(inputnode)
outputendpoint = self._endpoint(outputhierarchy, outputnode, outattr)
inputendpoint = self._endpoint(inputhierarchy, inputnode, inattr)
workflow.connect(*outputendpoint, *inputendpoint)
def connect(self, nodehierarchy, node, *args, **kwargs):
outputhierarchy, outputnode = self.get(*args, **kwargs)
self.connect_common_attrs(outputhierarchy, outputnode, nodehierarchy, node)
|
the-stack_106_21938
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
from pktverify.consts import MLE_CHILD_ID_RESPONSE, MLE_DISCOVERY_RESPONSE, HANDSHAKE_CLIENT_HELLO, NM_EXTENDED_PAN_ID_TLV, NM_NETWORK_NAME_TLV, NM_STEERING_DATA_TLV, NM_COMMISSIONER_UDP_PORT_TLV, NM_JOINER_UDP_PORT_TLV, NM_DISCOVERY_RESPONSE_TLV, RLY_RX_URI, RLY_TX_URI
from pktverify.packet_verifier import PacketVerifier
COMMISSIONER = 1
JOINER_ROUTER = 2
JOINER = 3
class Cert_8_2_01_JoinerRouter(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1
},
JOINER_ROUTER: {
'name': 'JOINER_ROUTER',
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rdn',
'router_selection_jitter': 1
},
JOINER: {
'name': 'JOINER',
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rdn',
'router_selection_jitter': 1
},
}
def test(self):
self.nodes[COMMISSIONER].interface_up()
self.nodes[COMMISSIONER].thread_start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'leader')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(5)
self.nodes[COMMISSIONER].commissioner_add_joiner(self.nodes[JOINER_ROUTER].get_eui64(), 'PSKD01')
self.nodes[COMMISSIONER].commissioner_add_joiner(self.nodes[JOINER].get_eui64(), 'PSKD02')
self.simulator.go(5)
self.nodes[JOINER_ROUTER].interface_up()
self.nodes[JOINER_ROUTER].joiner_start('PSKD01')
self.simulator.go(10)
self.assertEqual(
self.nodes[JOINER_ROUTER].get_masterkey(),
self.nodes[COMMISSIONER].get_masterkey(),
)
self.nodes[JOINER_ROUTER].thread_start()
self.simulator.go(5)
self.assertEqual(self.nodes[JOINER_ROUTER].get_state(), 'router')
self.nodes[COMMISSIONER].enable_allowlist()
self.nodes[COMMISSIONER].add_allowlist(self.nodes[JOINER_ROUTER].get_addr64())
self.nodes[JOINER].enable_allowlist()
self.nodes[JOINER].add_allowlist(self.nodes[JOINER_ROUTER].get_addr64())
self.nodes[JOINER].interface_up()
self.nodes[JOINER].joiner_start('PSKD02')
self.simulator.go(10)
self.assertEqual(
self.nodes[JOINER].get_masterkey(),
self.nodes[COMMISSIONER].get_masterkey(),
)
self.nodes[JOINER].thread_start()
self.simulator.go(5)
self.assertEqual(self.nodes[JOINER].get_state(), 'router')
self.collect_rloc16s()
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
COMMISSIONER = pv.vars['COMMISSIONER']
_cpkts = pkts.filter_wpan_src64(COMMISSIONER)
_cpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
# Step 3: Verify that the following details occur in the exchange between the Joiner,
# the Joiner_Router and the Commissioner
# 1. UDP port (Specified by the Commissioner: in Discovery Response) is used as destination port
# for UDP datagrams from Joiner_1 to the Commissioner.
pkts.range(_cpkts.index).filter_mle_cmd(MLE_DISCOVERY_RESPONSE).must_next().must_verify(
lambda p: {
NM_EXTENDED_PAN_ID_TLV, NM_NETWORK_NAME_TLV, NM_STEERING_DATA_TLV, NM_COMMISSIONER_UDP_PORT_TLV,
NM_JOINER_UDP_PORT_TLV, NM_DISCOVERY_RESPONSE_TLV
} == set(p.thread_meshcop.tlv.type))
# 2. Joiner_1 sends an initial DTLS-ClientHello handshake record to the Commissioner
pkts.filter(lambda p: p.dtls.handshake.type == [HANDSHAKE_CLIENT_HELLO]).must_next()
# 3. The Joiner_Router receives the initial DTLS-ClientHello handshake record and sends a RLY_RX.ntf
# message to the Commissioner
# Todo: verify coap payload
jr_rloc16 = pv.vars["JOINER_ROUTER_RLOC16"]
c_rloc16 = pv.vars["COMMISSIONER_RLOC16"]
pkts.filter_coap_request(RLY_RX_URI).must_next().must_verify(
lambda p: p.wpan.src16 == jr_rloc16 and p.wpan.dst16 == c_rloc16)
# 4. The Commissioner receives the RLY_RX.ntf message and sends a RLY_TX.ntf message to the Joiner_Router
pkts.filter_coap_request(RLY_TX_URI).must_next().must_verify(
lambda p: p.wpan.src16 == c_rloc16 and p.wpan.dst16 == jr_rloc16)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_21939
|
# -*- coding: utf-8 -*-
# Questo è Sensorberry
# File principale
# È stato costruito da Alessandro Massarenti
# V 2.0
import threading
import time
import serial
from telepot.loop import MessageLoop
from telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton
from config import *
from funzioni import *
from plant_class import *
# Handle input
usb = "USB0"
serial = serial.Serial('/dev/tty' + usb, 9600)
if serial.isOpen():
serial.close()
serial.open()
serial.isOpen()
def save_data_routine():
while 1:
time.sleep(10)
ambient_temp = get_data("temp", serial)
ambient_humidity = get_data("humi", serial)
database.savedata(ambient_temp, ambient_humidity)
database = Db()
database.check_entry_table()
avocado = Plant()
def msg_handler(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
user_id = msg['from']['id']
if content_type == 'text':
comando = msg['text']
if comando == '/start':
bot.sendMessage(chat_id, text="Scegli un bottone", reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text='avocado')],
]))
elif comando == 'avocado':
airtemp = str(avocado.getAirTemp())
airhumid = str(avocado.getAirHumid())
message: str = "Avocado:\nT. aria: " + \
airtemp + "°C\n" + "U. aria: " + airhumid + "%"
bot.sendMessage(chat_id, text=message)
print("ho risposto al comando avocado di:" + str(user_id))
MessageLoop(bot, msg_handler).run_as_thread()
print('Listening ...')
x = threading.Thread(target=save_data_routine)
x.start()
# Mantiene attivo il programma
while 1:
time.sleep(10)
|
the-stack_106_21940
|
#!/usr/bin/env python3
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import sys
from argparse import ArgumentParser, SUPPRESS
from pathlib import Path
from time import perf_counter
import cv2
import numpy as np
from openvino.inference_engine import IECore, get_version
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python'))
from models import OutputTransform, SegmentationModel, SalientObjectDetectionModel
import monitors
from pipelines import get_user_config, parse_devices, AsyncPipeline
from images_capture import open_images_capture
from performance_metrics import PerformanceMetrics
from helpers import resolution, log_blobs_info, log_runtime_settings, log_latency_per_stage
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.DEBUG, stream=sys.stdout)
class SegmentationVisualizer:
pascal_voc_palette = [
(0, 0, 0),
(128, 0, 0),
(0, 128, 0),
(128, 128, 0),
(0, 0, 128),
(128, 0, 128),
(0, 128, 128),
(128, 128, 128),
(64, 0, 0),
(192, 0, 0),
(64, 128, 0),
(192, 128, 0),
(64, 0, 128),
(192, 0, 128),
(64, 128, 128),
(192, 128, 128),
(0, 64, 0),
(128, 64, 0),
(0, 192, 0),
(128, 192, 0),
(0, 64, 128)
]
def __init__(self, colors_path=None):
if colors_path:
self.color_palette = self.get_palette_from_file(colors_path)
log.debug('The palette is loaded from {}'.format(colors_path))
else:
self.color_palette = self.pascal_voc_palette
log.debug('The PASCAL VOC palette is used')
log.debug('Get {} colors'.format(len(self.color_palette)))
self.color_map = self.create_color_map()
def get_palette_from_file(self, colors_path):
with open(colors_path, 'r') as file:
colors = []
for line in file.readlines():
values = line[line.index('(')+1:line.index(')')].split(',')
colors.append([int(v.strip()) for v in values])
return colors
def create_color_map(self):
classes = np.array(self.color_palette, dtype=np.uint8)[:, ::-1] # RGB to BGR
color_map = np.zeros((256, 1, 3), dtype=np.uint8)
classes_num = len(classes)
color_map[:classes_num, 0, :] = classes
color_map[classes_num:, 0, :] = np.random.uniform(0, 255, size=(256-classes_num, 3))
return color_map
def apply_color_map(self, input):
input_3d = cv2.merge([input, input, input])
return cv2.LUT(input_3d, self.color_map)
class SaliencyMapVisualizer:
def apply_color_map(self, input):
saliency_map = (input * 255.0).astype(np.uint8)
saliency_map = cv2.merge([saliency_map, saliency_map, saliency_map])
return saliency_map
def render_segmentation(frame, masks, visualiser, resizer, only_masks=False):
output = visualiser.apply_color_map(masks)
if not only_masks:
output = np.floor_divide(frame, 2) + np.floor_divide(output, 2)
return resizer.resize(output)
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument('-m', '--model', help='Required. Path to an .xml file with a trained model.',
required=True, type=Path)
args.add_argument('-at', '--architecture_type', help='Required. Specify the model\'s architecture type.',
type=str, required=True, choices=('segmentation', 'salient_object_detection'))
args.add_argument('-i', '--input', required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images, video file or camera id.')
args.add_argument('-d', '--device', default='CPU', type=str,
help='Optional. Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD is '
'acceptable. The demo will look for a suitable plugin for device specified. '
'Default value is CPU.')
common_model_args = parser.add_argument_group('Common model options')
common_model_args.add_argument('-c', '--colors', type=Path,
help='Optional. Path to a text file containing colors for classes.')
common_model_args.add_argument('--labels', help='Optional. Labels mapping file.', default=None, type=str)
infer_args = parser.add_argument_group('Inference options')
infer_args.add_argument('-nireq', '--num_infer_requests', help='Optional. Number of infer requests.',
default=1, type=int)
infer_args.add_argument('-nstreams', '--num_streams',
help='Optional. Number of streams to use for inference on the CPU or/and GPU in throughput '
'mode (for HETERO and MULTI device cases use format '
'<device1>:<nstreams1>,<device2>:<nstreams2> or just <nstreams>).',
default='', type=str)
infer_args.add_argument('-nthreads', '--num_threads', default=None, type=int,
help='Optional. Number of threads to use for inference on CPU (including HETERO cases).')
io_args = parser.add_argument_group('Input/output options')
io_args.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
io_args.add_argument('-o', '--output', required=False,
help='Optional. Name of the output file(s) to save.')
io_args.add_argument('-limit', '--output_limit', required=False, default=1000, type=int,
help='Optional. Number of frames to store in output. '
'If 0 is set, all frames are stored.')
io_args.add_argument('--no_show', help="Optional. Don't show output.", action='store_true')
io_args.add_argument('--output_resolution', default=None, type=resolution,
help='Optional. Specify the maximum output window resolution '
'in (width x height) format. Example: 1280x720. '
'Input frame size used by default.')
io_args.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
io_args.add_argument('--only_masks', default=False, action='store_true',
help='Optional. Display only masks. Could be switched by TAB key.')
debug_args = parser.add_argument_group('Debug options')
debug_args.add_argument('-r', '--raw_output_message', help='Optional. Output inference results as mask histogram.',
default=False, action='store_true')
return parser
def get_model(ie, args):
if args.architecture_type == 'segmentation':
return SegmentationModel(ie, args.model, labels=args.labels), SegmentationVisualizer(args.colors)
if args.architecture_type == 'salient_object_detection':
return SalientObjectDetectionModel(ie, args.model, labels=args.labels), SaliencyMapVisualizer()
def print_raw_results(mask, frame_id, labels=None):
log.debug(' ---------------- Frame # {} ---------------- '.format(frame_id))
log.debug(' Class ID | Pixels | Percentage ')
max_classes = int(np.max(mask)) + 1 # We use +1 for only background case
histogram = cv2.calcHist([np.expand_dims(mask, axis=-1)], [0], None, [max_classes], [0, max_classes])
all = np.product(mask.shape)
for id, val in enumerate(histogram[:, 0]):
if val > 0:
label = labels[id] if labels and len(labels) >= id else '#{}'.format(id)
log.debug(' {:<16} | {:6d} | {:5.2f}% '.format(label, int(val), val / all * 100))
def main():
args = build_argparser().parse_args()
cap = open_images_capture(args.input, args.loop)
log.info('OpenVINO Inference Engine')
log.info('\tbuild: {}'.format(get_version()))
ie = IECore()
plugin_config = get_user_config(args.device, args.num_streams, args.num_threads)
model, visualizer = get_model(ie, args)
log.info('Reading model {}'.format(args.model))
log_blobs_info(model)
pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests)
log.info('The model {} is loaded to {}'.format(args.model, args.device))
log_runtime_settings(pipeline.exec_net, set(parse_devices(args.device)))
next_frame_id = 0
next_frame_id_to_show = 0
metrics = PerformanceMetrics()
render_metrics = PerformanceMetrics()
presenter = None
output_transform = None
video_writer = cv2.VideoWriter()
only_masks = args.only_masks
while True:
if pipeline.is_ready():
# Get new image/frame
start_time = perf_counter()
frame = cap.read()
if frame is None:
if next_frame_id == 0:
raise ValueError("Can't read an image from the input")
break
if next_frame_id == 0:
output_transform = OutputTransform(frame.shape[:2], args.output_resolution)
if args.output_resolution:
output_resolution = output_transform.new_resolution
else:
output_resolution = (frame.shape[1], frame.shape[0])
presenter = monitors.Presenter(args.utilization_monitors, 55,
(round(output_resolution[0] / 4), round(output_resolution[1] / 8)))
if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'),
cap.fps(), output_resolution):
raise RuntimeError("Can't open video writer")
# Submit for inference
pipeline.submit_data(frame, next_frame_id, {'frame': frame, 'start_time': start_time})
next_frame_id += 1
else:
# Wait for empty request
pipeline.await_any()
if pipeline.callback_exceptions:
raise pipeline.callback_exceptions[0]
# Process all completed requests
results = pipeline.get_result(next_frame_id_to_show)
if results:
objects, frame_meta = results
if args.raw_output_message:
print_raw_results(objects, next_frame_id_to_show, model.labels)
frame = frame_meta['frame']
start_time = frame_meta['start_time']
rendering_start_time = perf_counter()
frame = render_segmentation(frame, objects, visualizer, output_transform, only_masks)
render_metrics.update(rendering_start_time)
presenter.drawGraphs(frame)
metrics.update(start_time, frame)
if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
video_writer.write(frame)
next_frame_id_to_show += 1
if not args.no_show:
cv2.imshow('Segmentation Results', frame)
key = cv2.waitKey(1)
if key == 27 or key == 'q' or key == 'Q':
break
if key == 9:
only_masks = not only_masks
presenter.handleKey(key)
pipeline.await_all()
# Process completed requests
for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id):
results = pipeline.get_result(next_frame_id_to_show)
while results is None:
results = pipeline.get_result(next_frame_id_to_show)
objects, frame_meta = results
if args.raw_output_message:
print_raw_results(objects, next_frame_id_to_show, model.labels)
frame = frame_meta['frame']
start_time = frame_meta['start_time']
rendering_start_time = perf_counter()
frame = render_segmentation(frame, objects, visualizer, output_transform, only_masks)
render_metrics.update(rendering_start_time)
presenter.drawGraphs(frame)
metrics.update(start_time, frame)
if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1):
video_writer.write(frame)
if not args.no_show:
cv2.imshow('Segmentation Results', frame)
key = cv2.waitKey(1)
metrics.log_total()
log_latency_per_stage(cap.reader_metrics.get_latency(),
pipeline.preprocess_metrics.get_latency(),
pipeline.inference_metrics.get_latency(),
pipeline.postprocess_metrics.get_latency(),
render_metrics.get_latency())
for rep in presenter.reportMeans():
log.info(rep)
if __name__ == '__main__':
sys.exit(main() or 0)
|
the-stack_106_21944
|
import requests
import json
import smtplib
URL = 'https://min-api.cryptocompare.com/data/price?fsym=DOGE&tsyms=INR'
def getDogePrice():
response = requests.request('GET', URL)
response = json.loads(response.text)
f = open(r'C:\Users\Mittu\Desktop\DogeAlert\value_change.txt', 'r')
previous_value = f.read()
if response['INR'] >= float(previous_value) + 3:
msg = '\nThe price of DogeCoin changed by {} rupees!\nPrevious value : {}\nCurrent value : {}'.format(round(response['INR'] - float(previous_value), 2), previous_value, response['INR'])
print(msg)
sendEmail(previous_value, float(response['INR']), msg)
sendTelegramMessage(msg)
f = open(r'C:\Users\Mittu\Desktop\DogeAlert\value_change.txt', 'w')
f.write(str(response['INR']))
f.close()
elif response['INR'] <= float(previous_value) - 3:
msg = '\nThe price of DogeCoin changed by {} rupees!\nPrevious value : {}\nCurrent value : {}'.format(round(float(previous_value) - response['INR'], 2), previous_value, response['INR'])
print(msg)
sendEmail(previous_value, float(response['INR']), msg)
sendTelegramMessage(msg)
f = open(r'C:\Users\Mittu\Desktop\DogeAlert\value_change.txt', 'w')
f.write(str(response['INR']))
f.close()
else:
msg = 'DogeCoin stable!\nPrevious value : {}\nCurrent value : {}'.format(previous_value, response['INR'])
print(msg)
#sendTelegramMessage(msg)
def sendEmail(previous_value, current_value, msg):
TO = ['[email protected]']
message = 'Subject: {}\n\n{}'.format('DogeCoin price update', msg)
for addr in TO:
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login('[email protected]', 'PASSWORD')
s.sendmail(from_addr='[email protected]', to_addrs=addr, msg=message)
def sendTelegramMessage(message):
url = "https://api.telegram.org/" + 'botID' + "/sendMessage"
data = {
"chat_id": 'CHANNEL_ID',
"text": message
}
try:
response = requests.request("POST", url, params=data)
print("Telegram URL :",url)
print("Telegram response :", response.text)
telegram_data = json.loads(response.text)
return telegram_data["ok"]
except Exception as e:
print("An error occurred in sending the alert message via Telegram")
print(e)
return False
if __name__ == '__main__':
getDogePrice()
|
the-stack_106_21945
|
"""Definition of HLO Instructions"""
from collections import defaultdict
from enum import Enum, auto
import numpy as np
from common import compute_bytes, append_flatten_elements, transpose_flatten, reshape_flatten
class ShardingSpecType(Enum):
REPLICATED = auto()
MAXIMAL = auto()
OTHER = auto()
TUPLE = auto()
PARTIAL_REDUCTION = auto()
INF_COST = 1e10 # infinity cost
class ShardingSpec:
def __init__(self, type_, tile_assignment_dimensions, tile_assignment_devices,
replicate_on_last_tile_dim, partial_reduce_replication):
self.type = type_
self.tile_assignment_dimensions = tuple(tile_assignment_dimensions)
self.tile_assignment_devices = tuple(tile_assignment_devices)
self.replicate_on_last_tile_dim = replicate_on_last_tile_dim
self.partial_reduce_replication = partial_reduce_replication
def num_tile_devices(self):
if self.type == ShardingSpecType.REPLICATED:
return 1
assert self.type == ShardingSpecType.OTHER
ret = np.prod(self.tile_assignment_dimensions)
if self.replicate_on_last_tile_dim:
ret /= self.tile_assignment_dimensions[-1]
return ret
def transpose(self, dimensions):
if self.type == ShardingSpecType.REPLICATED:
return self
assert self.type == ShardingSpecType.OTHER
spec_trans_dims = list(dimensions)
if self.replicate_on_last_tile_dim:
spec_trans_dims.append(len(dimensions))
tile_assignment_dimensions = [self.tile_assignment_dimensions[i]
for i in spec_trans_dims]
tile_assignment_devices = transpose_flatten(self.tile_assignment_devices,
self.tile_assignment_dimensions, spec_trans_dims)
ret = ShardingSpec(self.type,
tile_assignment_dimensions,
tile_assignment_devices,
self.replicate_on_last_tile_dim,
self.partial_reduce_replication)
return ret
def broadcast(self, new_shape, dimensions):
if self.type == ShardingSpecType.REPLICATED:
return self
assert self.type == ShardingSpecType.OTHER
tile_assignment_dimensions = []
for i in range(len(new_shape)):
if i in dimensions:
tile_assignment_dimensions.append(
self.tile_assignment_dimensions[dimensions.index(i)])
else:
tile_assignment_dimensions.append(1)
if self.replicate_on_last_tile_dim:
tile_assignment_dimensions.append(self.tile_assignment_dimensions[-1])
output_spec = ShardingSpec(self.type,
tile_assignment_dimensions,
self.tile_assignment_devices,
self.replicate_on_last_tile_dim,
self.partial_reduce_replication)
return output_spec
def reshape(self, old_shape, new_shape):
if self.type == ShardingSpecType.REPLICATED:
return self
assert self.type == ShardingSpecType.OTHER
# Construct a map that maps an old dimension to its corresponding new dimension
dim_mapping = {}
new_pt = -1
old_pt = -1
old_prod = 1
new_prod = 1
while True:
move_new = False
move_old = False
if new_prod == old_prod:
dim_mapping[old_pt + 1] = new_pt + 1
move_new = move_old = True
elif new_prod < old_prod:
move_new = True
else:
move_old = True
if move_new:
new_pt += 1
if new_pt < len(new_shape):
new_prod *= new_shape[new_pt]
else:
break
if move_old:
old_pt += 1
if old_pt < len(old_shape):
old_prod *= old_shape[old_pt]
else:
break
tile_assignment_dimensions = []
cur_prod = 1
state = 1 # 0: start 1: middle
i = 0
failed = False
while i < len(old_shape) and not failed:
if state == 0:
assert i in dim_mapping
while len(tile_assignment_dimensions) < dim_mapping[i]:
tile_assignment_dimensions.append(1)
tile_assignment_dimensions.append(
self.tile_assignment_dimensions[i])
state = 1
i += 1
elif state == 1:
if i in dim_mapping:
state = 0
else:
if self.tile_assignment_dimensions[i] == 1:
i += 1
else:
failed = True
if failed:
return None
while len(tile_assignment_dimensions) < len(new_shape):
tile_assignment_dimensions.append(1)
if self.replicate_on_last_tile_dim:
tile_assignment_dimensions.append(self.tile_assignment_dimensions[-1])
output_spec = ShardingSpec(self.type,
tile_assignment_dimensions,
self.tile_assignment_devices,
self.replicate_on_last_tile_dim,
self.partial_reduce_replication)
return output_spec
@staticmethod
def tile_internal(shape, tensor_dims, mesh_dims, cluster_env, partial_reduce_replication):
assert len(tensor_dims) == len(mesh_dims)
tile_assignment_dimensions = [1] * len(shape)
# Split on certain mesh dimensions
split_prod = 1
for tensor_dim, mesh_dim in zip(tensor_dims, mesh_dims):
tile_assignment_dimensions[tensor_dim] = cluster_env.device_mesh.shape[mesh_dim]
split_prod *= cluster_env.device_mesh.shape[mesh_dim]
if split_prod == 1:
return ShardingSpec.replicated(cluster_env)
# Replicate on reminding mesh dimensions
if split_prod < cluster_env.num_devices:
tile_assignment_dimensions.append(cluster_env.num_devices // split_prod)
replicate_on_last_tile_dim = True
else:
replicate_on_last_tile_dim = False
# Map device ids from device_mesh to tile_assignment_devices
tile_assignment_devices = []
tmp_indices = [None] * len(cluster_env.device_mesh.shape)
def generate_tile_assignment_devices(tensor_dim, mesh_indices):
if tensor_dim == len(shape) - 1:
append_flatten_elements(tile_assignment_devices, cluster_env.device_mesh,
mesh_indices, -1, tmp_indices)
else:
next_tensor_dim = tensor_dim + 1
next_mesh_dim = -1
if next_tensor_dim in tensor_dims:
next_mesh_dim = mesh_dims[tensor_dims.index(next_tensor_dim)]
for i in range(tile_assignment_dimensions[next_tensor_dim]):
if next_mesh_dim != -1:
mesh_indices[next_mesh_dim] = i
generate_tile_assignment_devices(next_tensor_dim, mesh_indices)
generate_tile_assignment_devices(-1, [-1] * len(cluster_env.device_mesh.shape))
return ShardingSpec(ShardingSpecType.OTHER,
tile_assignment_dimensions, tile_assignment_devices,
replicate_on_last_tile_dim,
False)
@staticmethod
def tile(shape, tensor_dims, mesh_dims, cluster_env):
return ShardingSpec.tile_internal(shape, tensor_dims, mesh_dims, cluster_env, False)
@staticmethod
def tile_partial_reduce(shape, tensor_dims, mesh_dims, cluster_env):
return ShardingSpec.tile_internal(shape, tensor_dims, mesh_dims, cluster_env, True)
@staticmethod
def replicated(cluster_env):
tile_assignment_devices = range(cluster_env.num_devices)
return ShardingSpec(ShardingSpecType.REPLICATED, (), tile_assignment_devices,
False, False)
@staticmethod
def split(shape, dim, cluster_env):
tile_assignment_dimensions = [1] * len(shape)
tile_assignment_dimensions[dim] = cluster_env.num_devices
tile_assignment_devices = range(cluster_env.num_devices)
return ShardingSpec(ShardingSpecType.OTHER,
tile_assignment_dimensions, tile_assignment_devices,
False, False)
@staticmethod
def tuple():
return ShardingSpec(ShardingSpecType.TUPLE, (), (), False, False)
def __str__(self):
return f"{self.tile_assignment_dimensions}"\
f"{list(self.tile_assignment_devices)}"
def __eq__(self, other):
return (self.type == other.type and
self.tile_assignment_dimensions == other.tile_assignment_dimensions and
self.tile_assignment_devices == other.tile_assignment_devices and
self.replicate_on_last_tile_dim == other.replicate_on_last_tile_dim and
self.partial_reduce_replication == other.partial_reduce_replication)
def resharding_cost_vector(cluster_env, source_ins, required_spec):
cost_vector = []
for strategy in source_ins.strategies:
cost_vector.append(cluster_env.resharding_cost(source_ins.shape,
strategy.output_spec, required_spec))
return cost_vector
def follow_ins_cost_vector(source_ins, index):
ret = [INF_COST] * len(source_ins.strategies)
ret[index] = 0
return ret
class InstructionStrategy:
def __init__(self, name, output_spec):
self.name = name
self.output_spec = output_spec
class OpCode(Enum):
PARAMETER = auto()
CONSTANT = auto()
BROADCAST = auto()
RESHAPE = auto()
TRANSPOSE = auto()
IDENTITY = auto()
EXP = auto()
FORCE_REPLICATED = auto()
ADD = auto()
SUBTRACT = auto()
MULTIPLY = auto()
DIV = auto()
COMPARE = auto()
SELECT = auto()
REDUCE = auto()
DOT = auto()
TUPLE = auto()
op_code_ct = defaultdict(int)
class HloInstruction:
def __init__(self, op_code, shape, operands=[]):
# Attributes
self.op_code = op_code
self.shape = shape
self.operands = operands
self.name = f"{str(op_code)[7:].lower()}.{op_code_ct[op_code]}"
op_code_ct[op_code] += 1
# Cost
self.strategies = []
self.compute_costs = []
self.communication_costs = []
self.memory_costs = []
self.resharding_costs = []
self.follow_ins = None
self.depth = None
# The index in HloComputation
self.index = HloComputation.cur_env.append(self)
self.batch_dim = None
def build_strategy_and_cost(self, cluster_env, solver_option):
raise NotImplementedError(f"{self.op_code}")
def propagate_batch_dim(self, operand):
raise NotImplementedError(f"{self.op_code}")
class HloParameter(HloInstruction):
def __init__(self, shape, fix_strategy=None):
super().__init__(OpCode.PARAMETER, shape, [])
self.fix_strategy = fix_strategy
def build_strategy_and_cost(self, cluster_env, solver_option):
for i in range(len(self.shape)):
for j in range(len(cluster_env.device_mesh.shape)):
if (cluster_env.device_mesh.shape[j] == 1 or
self.shape[i] < cluster_env.device_mesh.shape[j]):
continue
name = f"S{i} @ {j}"
output_spec = ShardingSpec.tile(self.shape, [i], [j], cluster_env)
self.strategies.append(InstructionStrategy(name, output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.strategies.append(InstructionStrategy("R", ShardingSpec.replicated(cluster_env)))
self.compute_costs.append(2)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape))
if self.fix_strategy:
new_strategies = []
new_compute_costs = []
new_communication_costs = []
new_memory_costs = []
# filter strategies
for i in range(len(self.strategies)):
if self.strategies[i].name == self.fix_strategy:
new_strategies.append(self.strategies[i])
new_compute_costs.append(self.compute_costs[i])
new_communication_costs.append(self.communication_costs[i])
new_memory_costs.append(self.memory_costs[i])
self.strategies = new_strategies
self.compute_costs = new_compute_costs
self.communication_costs = new_communication_costs
self.memory_costs = new_memory_costs
def __str__(self):
return f"{self.name} {self.shape} = parameter()"
class HloConstant(HloInstruction):
def __init__(self, value):
super().__init__(OpCode.CONSTANT, (), [])
self.value = value
def build_strategy_and_cost(self, cluster_env, solver_option):
self.strategies.append(InstructionStrategy("R", ShardingSpec.replicated(cluster_env)))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape))
def __str__(self):
return f"{self.name} {self.shape} = constant({self.value})"
class HloBroadcast(HloInstruction):
def __init__(self, operand, shape, dimensions=()):
for i in dimensions:
assert shape[i] == operand.shape[dimensions.index(i)]
super().__init__(OpCode.BROADCAST, shape, [operand])
self.dimensions = dimensions
def build_strategy_and_cost(self, cluster_env, solver_option):
follow = self.operands[0]
self.follow_ins = follow
for sid in range(len(follow.strategies)):
output_spec = follow.strategies[sid].output_spec.broadcast(
self.shape, self.dimensions)
name = f"{output_spec.tile_assignment_dimensions}"
self.strategies.append(InstructionStrategy(name, output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.resharding_costs.append([follow_ins_cost_vector(follow, sid)])
def __str__(self):
return f"{self.name} {self.shape} = broadcast({self.operands[0].name})"
class HloReshape(HloInstruction):
def __init__(self, operand, new_shape):
# todo: mark this as inplace
assert np.prod(operand.shape) == np.prod(new_shape)
super().__init__(OpCode.RESHAPE, new_shape, [operand])
self.new_shape = new_shape
def build_strategy_and_cost(self, cluster_env, solver_option):
follow = self.operands[0]
self.follow_ins = follow
old_shape = self.operands[0].shape
new_shape = self.new_shape
for sid in range(len(follow.strategies)):
output_spec = follow.strategies[sid].output_spec.reshape(
follow.shape, self.shape)
if output_spec is None:
continue
name = f"{output_spec.tile_assignment_dimensions}"
self.strategies.append(InstructionStrategy(name, output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.resharding_costs.append([follow_ins_cost_vector(follow, sid)])
def __str__(self):
return f"{self.name} {self.shape} = reshape({self.operands[0].name})"
class HloTranspose(HloInstruction):
def __init__(self, operand, dimensions):
assert len(dimensions) == len(operand.shape)
new_shape = tuple(operand.shape[i] for i in dimensions)
super().__init__(OpCode.TRANSPOSE, new_shape, [operand])
self.dimensions = dimensions
def build_strategy_and_cost(self, cluster_env, solver_option):
follow = self.operands[0]
self.follow_ins = follow
for sid in range(len(follow.strategies)):
output_spec = follow.strategies[sid].output_spec.transpose(self.dimensions)
name = f"{output_spec.tile_assignment_dimensions}"
self.strategies.append(InstructionStrategy(name, output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.resharding_costs.append([follow_ins_cost_vector(follow, sid)])
def __str__(self):
return f"{self.name} {self.shape} = transpose({self.operands[0].name}) " +\
f"dimensions={self.dimensions}"
class HloElementwise(HloInstruction):
def __init__(self, op_code, operands):
for i in range(0, len(operands)):
assert operands[0].shape == operands[i].shape
super().__init__(op_code, operands[0].shape, operands)
def build_strategy_and_cost(self, cluster_env, solver_option):
depths = [operand.depth for operand in self.operands]
follow_idx = np.argmax(depths)
follow = self.operands[follow_idx]
self.follow_ins = follow
for sid in range(len(follow.strategies)):
output_spec = follow.strategies[sid].output_spec
name = f"{output_spec.tile_assignment_dimensions}"
self.strategies.append(InstructionStrategy(name, output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
resharding_costs = []
for k in range(len(self.operands)):
if k == follow_idx:
resharding_costs.append(
follow_ins_cost_vector(follow, sid))
else:
resharding_costs.append(
resharding_cost_vector(cluster_env, self.operands[k], output_spec))
self.resharding_costs.append(resharding_costs)
def propagate_batch_dim(self, ins):
self.batch_dim = ins.batch_dim
return True
def __str__(self):
fun_name = str(self.op_code)[7:].lower()
args = ", ".join(f"{self.operands[i].name}" for i in range(len(self.operands)))
return f"{self.name} {self.shape} = {fun_name}({args})"
class HloIdentity(HloElementwise):
def __init__(self, operand):
super().__init__(OpCode.IDENTITY, [operand])
class HloExp(HloElementwise):
def __init__(self, operand):
super().__init__(OpCode.EXP, [operand])
class HloForceReplicated(HloElementwise):
def __init__(self, operand):
super().__init__(OpCode.FORCE_REPLICATED, [operand])
def build_strategy_and_cost(self, cluster_env, solver_option):
self.strategies.append(InstructionStrategy("R",
ShardingSpec.replicated(cluster_env)))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(0)
self.resharding_costs.append([
resharding_cost_vector(cluster_env, self.operands[0],
ShardingSpec.replicated(cluster_env))
])
class HloAdd(HloElementwise):
def __init__(self, lhs, rhs):
super().__init__(OpCode.ADD, [lhs, rhs])
class HloSubtract(HloElementwise):
def __init__(self, lhs, rhs):
super().__init__(OpCode.SUBTRACT, [lhs, rhs])
class HloMutiply(HloElementwise):
def __init__(self, lhs, rhs):
super().__init__(OpCode.MULTIPLY, [lhs, rhs])
class HloDiv(HloElementwise):
def __init__(self, lhs, rhs):
super().__init__(OpCode.DIV, [lhs, rhs])
class HloCompare(HloElementwise):
def __init__(self, lhs, rhs):
super().__init__(OpCode.COMPARE, [lhs, rhs])
class HloSelect(HloElementwise):
def __init__(self, pred, true_value, false_value):
super().__init__(OpCode.SELECT, [pred, true_value, false_value])
class HloReduce(HloInstruction):
def __init__(self, operand, dimensions):
new_shape = tuple(operand.shape[i] for i in range(len(operand.shape)) if i not in dimensions)
super().__init__(OpCode.REDUCE, new_shape, [operand])
self.dimensions = dimensions
def build_strategy_and_cost(self, cluster_env, solver_option):
operand = self.operands[0]
self.follow_ins = operand
# Map old dims to new dim
old_dim_to_new_dim = []
pt = 0
for old_dim in range(len(operand.shape)):
if old_dim in self.dimensions:
old_dim_to_new_dim.append(-1)
else:
old_dim_to_new_dim.append(pt)
pt += 1
assert pt == len(self.shape)
# Create follow strategies
for sid in range(len(operand.strategies)):
tensor_dim_to_mesh = cluster_env.get_tensor_dim_to_mesh_dim(
operand.shape, operand.strategies[sid].output_spec)
tile_tensor_dims = []
tile_mesh_dims = []
all_reduce_dims = []
for tensor_dim in range(len(operand.shape)):
mesh_dim = tensor_dim_to_mesh[tensor_dim]
if tensor_dim in self.dimensions:
if mesh_dim == -1: # reduce on a replicated dim
continue
else: # reduce on a split dim
all_reduce_dims.append(mesh_dim)
else:
if mesh_dim == -1: # follow replicated dim
pass
else: # follow split dim
tile_tensor_dims.append(old_dim_to_new_dim[tensor_dim])
tile_mesh_dims.append(mesh_dim)
output_spec = ShardingSpec.tile(self.shape, tile_tensor_dims, tile_mesh_dims, cluster_env)
mem_cost = compute_bytes(self.shape) / output_spec.num_tile_devices()
comm_cost = 0
for mesh_dim in all_reduce_dims:
comm_cost += cluster_env.all_reduce_cost(mem_cost, mesh_dim)
reduce_dims_str = "".join([str(x) for x in all_reduce_dims])
if reduce_dims_str:
name = f"follow (allreduce @ {reduce_dims_str})"
else:
name = f"{output_spec.tile_assignment_dimensions}"
self.strategies.append(InstructionStrategy(name, output_spec))
self.compute_costs.append(0)
self.communication_costs.append(comm_cost)
self.memory_costs.append(mem_cost)
self.resharding_costs.append([follow_ins_cost_vector(operand, sid)])
def __str__(self):
return f"{self.name} {self.shape} = reduce({self.operands[0].name}) " +\
f"dimensions={self.dimensions}"
class HloDot(HloInstruction):
def __init__(self, lhs, rhs,
lhs_batch_dims=(), lhs_contracting_dims=(1,),
rhs_batch_dims=(), rhs_contracting_dims=(0,)):
# shape inference
lhs_space_shape = \
tuple(lhs.shape[i] for i in range(len(lhs.shape))
if i not in lhs_contracting_dims and i not in lhs_batch_dims)
rhs_space_shape = \
tuple(rhs.shape[i] for i in range(len(rhs.shape))
if i not in rhs_contracting_dims and i not in rhs_batch_dims)
lhs_batch_shape = tuple(lhs.shape[i] for i in lhs_batch_dims)
shape = lhs_batch_shape + lhs_space_shape + rhs_space_shape
for i, j in zip(lhs_contracting_dims, rhs_contracting_dims):
assert lhs.shape[i] == rhs.shape[j]
for i, j in zip(lhs_batch_dims, rhs_batch_dims):
assert lhs.shape[i] == rhs.shape[j]
super().__init__(OpCode.DOT, shape, [lhs, rhs])
self.lhs = lhs
self.lhs_batch_dims = lhs_batch_dims
self.lhs_contracting_dims = lhs_contracting_dims
self.lhs_space_dims = tuple(set(range(len(lhs.shape))) - set(self.lhs_batch_dims) - set(self.lhs_contracting_dims))
assert len(self.lhs_contracting_dims) == 1
assert len(self.lhs_space_dims) == 1
self.rhs = rhs
self.rhs_batch_dims = rhs_batch_dims
self.rhs_contracting_dims = rhs_contracting_dims
self.rhs_space_dims = tuple(set(range(len(rhs.shape))) - set(self.rhs_batch_dims) - set(self.rhs_contracting_dims))
assert len(self.rhs_contracting_dims) == 1
assert len(self.rhs_space_dims) == 1
def build_strategy_and_cost(self, cluster_env, solver_option):
lhs = self.lhs
lhs_batch_dims = self.lhs_batch_dims
lhs_space_dim = self.lhs_space_dims[0]
lhs_con_dim = self.lhs_contracting_dims[0]
rhs = self.rhs
rhs_batch_dims = self.rhs_batch_dims
rhs_space_dim = self.rhs_space_dims[0]
rhs_con_dim = self.rhs_contracting_dims[0]
space_base_dim = len(self.lhs_batch_dims)
assert len(cluster_env.device_mesh.shape) == 2
# Split lhs space dim + rhs space dim
# @ {0, 1}
output_spec =\
ShardingSpec.tile(self.shape, [space_base_dim, space_base_dim + 1], [0, 1], cluster_env)
self.strategies.append(InstructionStrategy("SS = SR x RS @ {0,1}", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_space_dim], [0], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_space_dim], [1], cluster_env))
])
# @ {1, 0}
output_spec =\
ShardingSpec.tile(self.shape, [space_base_dim, space_base_dim + 1], [1, 0], cluster_env)
self.strategies.append(InstructionStrategy("SS = SR x RS @ {1,0}", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_space_dim], [1], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_space_dim], [0], cluster_env))
])
# Split lhs space dim + contracting dim
# @ {0, 1}
if cluster_env.device_mesh.shape[1] > 1:
output_spec = ShardingSpec.tile(self.shape, [space_base_dim], [0], cluster_env)
memory_cost = compute_bytes(self.shape) / output_spec.num_tile_devices()
self.strategies.append(
InstructionStrategy("SR = SS x SR @ {0,1} (allreduce @ 1)", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 1))
self.memory_costs.append(memory_cost)
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_space_dim, lhs_con_dim], [0, 1], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_con_dim], [1], cluster_env))
])
# @ {1, 0}
if cluster_env.device_mesh.shape[0] > 1:
output_spec = ShardingSpec.tile(self.shape, [space_base_dim], [1], cluster_env)
memory_cost = compute_bytes(self.shape) / output_spec.num_tile_devices()
self.strategies.append(
InstructionStrategy("SR = SS x SR @ {1,0} (allreduce @ 0)", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 0))
self.memory_costs.append(memory_cost)
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_space_dim, lhs_con_dim], [1, 0], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_con_dim], [0], cluster_env))
])
# Split rhs space dim + contracting dim
# @ {0, 1}
if cluster_env.device_mesh.shape[0] > 1 and cluster_env.device_mesh.shape[1] > 1:
output_spec = ShardingSpec.tile(self.shape, [space_base_dim+1], [1], cluster_env)
memory_cost = compute_bytes(self.shape) / output_spec.num_tile_devices()
self.strategies.append(
InstructionStrategy("RS = RS x SS @ {0,1} (allreduce @ 0)", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 0))
self.memory_costs.append(memory_cost)
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_con_dim], [0], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_con_dim, rhs_space_dim], [0, 1], cluster_env))
])
# @ {1, 0}
if cluster_env.device_mesh.shape[0] > 1 and cluster_env.device_mesh.shape[1] > 1:
output_spec = ShardingSpec.tile(self.shape, [space_base_dim+1], [0], cluster_env)
memory_cost = compute_bytes(self.shape) / output_spec.num_tile_devices()
self.strategies.append(
InstructionStrategy("RS = RS x SS @ {1,0} (allreduce @ 1)", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 1))
self.memory_costs.append(memory_cost)
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_con_dim], [1], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_con_dim, rhs_space_dim], [1, 0], cluster_env))
])
# Split one batch dim
for i in range(len(self.lhs_batch_dims)):
for j in range(len(cluster_env.device_mesh.shape)):
if (cluster_env.device_mesh.shape[j] == 1 or
self.shape[i] < cluster_env.device_mesh.shape[j]):
continue
output_spec = ShardingSpec.tile(self.shape, [i], [j], cluster_env)
self.strategies.append(InstructionStrategy(f"Sb_{i} = Sb x Sb @ {j}", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_batch_dims[i]], [j], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_batch_dims[i]], [j], cluster_env))
])
# Split two batch dims
if len(self.lhs_batch_dims) == 2 and cluster_env.device_mesh.shape[0] > 1\
and cluster_env.device_mesh.shape[1] > 1:
self.strategies = []
self.compute_costs = []
self.communication_costs = []
self.memory_costs = []
self.resharding_costs = []
# Split two batch dims
output_spec = ShardingSpec.tile(self.shape, [0, 1], [0, 1], cluster_env)
self.strategies.append(InstructionStrategy("Sb = Sb x Sb @ {0,1}", output_spec))
self.compute_costs.append(0)
self.communication_costs.append(0)
self.memory_costs.append(compute_bytes(self.shape) / output_spec.num_tile_devices())
self.resharding_costs.append([
resharding_cost_vector(cluster_env, lhs,
ShardingSpec.tile(lhs.shape, [lhs_batch_dims[0], lhs_batch_dims[1]], [0, 1], cluster_env)),
resharding_cost_vector(cluster_env, rhs,
ShardingSpec.tile(rhs.shape, [rhs_batch_dims[0], rhs_batch_dims[1]], [0, 1], cluster_env))
])
# If force batch dim to a mesh dim, filter out invalid strategies
if solver_option.force_batch_dim_to_mesh_dim is not None and self.batch_dim is not None:
filter_indices = []
for i in range(len(self.strategies)):
tensor_dim_to_mesh_dim = cluster_env.get_tensor_dim_to_mesh_dim(
self.shape, self.strategies[i].output_spec)
if tensor_dim_to_mesh_dim[self.batch_dim] == solver_option.force_batch_dim_to_mesh_dim:
filter_indices.append(i)
self.strategies = [self.strategies[i] for i in filter_indices]
self.compute_costs = [self.compute_costs[i] for i in filter_indices]
self.communication_costs = [self.communication_costs[i] for i in filter_indices]
self.memory_costs = [self.memory_costs[i] for i in filter_indices]
self.resharding_costs = [self.resharding_costs[i] for i in filter_indices]
def propagate_batch_dim(self, operand):
index = self.operands.index(operand)
if index == 0:
for i in range(len(self.lhs_batch_dims)):
if operand.batch_dim == self.lhs_batch_dims[i]:
self.batch_dim = i
return True
if operand.batch_dim == self.lhs_space_dims[0]:
self.batch_dim = len(self.lhs_batch_dims)
return True
if operand.batch_dim in self.lhs_contracting_dims:
return False
else:
for i in range(len(self.rhs_batch_dims)):
if operand.batch_dim == self.rhs_batch_dims[i]:
self.batch_dim = i
return True
if operand.batch_dim == self.rhs_space_dims[0]:
self.batch_dim = len(self.rhs_batch_dims)
return True
if operand.batch_dim in self.rhs_contracting_dims:
return False
def __str__(self):
return f"{self.name} {self.shape} = dot({self.lhs.name}, {self.rhs.name}) "\
f" lhs_con_dim={self.lhs_contracting_dims},"\
f" rhs_con_dim={self.rhs_contracting_dims}"
class HloTuple(HloInstruction):
def __init__(self, operands):
super().__init__(OpCode.TUPLE, (), operands)
def build_strategy_and_cost(self, cluster_env, solver_option):
self.strategies.append(InstructionStrategy("tuple", ShardingSpec.tuple()))
self.memory_costs.append(0)
self.compute_costs.append(0)
self.communication_costs.append(0)
self.resharding_costs.append([np.zeros(len(operand.strategies))
for operand in self.operands])
def __str__(self):
names = tuple(x.name for x in self.operands)
return f"{self.name} {self.shape} = tuple{names}"
class HloComputation:
cur_env = None
def __init__(self):
self.ct = 0
self.instructions = []
self.alias_list = []
self.alias_cost_vector = []
self.parameters = []
self.strategy_built = False
def append(self, instruction):
ct = len(self.instructions)
self.instructions.append(instruction)
if instruction.op_code == OpCode.PARAMETER:
self.parameters.append(instruction)
return ct
def liveness_analysis(self):
liveness_dict = dict()
live_set = set()
for t in range(len(self.instructions)-1, -1, -1):
inst = self.instructions[t]
live_set.add(inst)
for operand in inst.operands:
live_set.add(operand)
liveness_dict[t] = set(live_set)
live_set.remove(inst)
return liveness_dict
def set_alias(self, alias_list):
self.alias_list = alias_list
def concurrency_analysis(self):
frontier_list = []
edge_dict = defaultdict(list)
# Build degree dict
#out_degree = defaultdict(lambda : 0)
#for ins in self.instructions:
# for operand in ins.operands:
# out_degree[operand] += 1
degree = defaultdict(lambda : 0)
for ins in self.instructions:
for operand in ins.operands:
degree[ins] += 1
edge_dict[operand].append(ins)
# Init frontier
collected = 0
current_frontier = []
for ins in self.instructions:
if degree[ins] == 0:
current_frontier.append(ins)
collected += 1
frontier_list.append(current_frontier)
# Push forward frontier
while collected < len(self.instructions):
current_frontier = frontier_list[-1]
next_frontier = []
for ins in current_frontier:
for node in edge_dict[ins]:
degree[node] -= 1
if degree[node] == 0:
next_frontier.append(node)
collected += 1
frontier_list.append(next_frontier)
for i, frontier in enumerate(frontier_list):
print(i)
for ins in frontier:
print(ins)
def forward_backward_analysis(self):
used_by = defaultdict(list)
for ins in self.instructions:
for operand in ins.operands:
used_by[operand].append(ins.index)
sep_id = 0
for param in self.parameters:
if len(used_by[param]) > 2:
backward_id = used_by[param][0]
sep_id = max(sep_id, backward_id + 1)
return sep_id
def batch_dim_analysis(self):
# Build used by dict
used_by = defaultdict(list)
for ins in self.instructions:
for operand in ins.operands:
used_by[operand].append(ins)
# Find source.
# Rule: The first dim of parameters that are only used once
#possible_inputs = []
#for param in self.parameters:
# if len(used_by[param]) == 1:
# possible_inputs.append(param)
#source = possible_inputs[0]
source = self.instructions[0]
source.batch_dim = 0
# Dim propagation
queue = [source]
visited = set([source])
while len(queue) > 0:
ins = queue.pop(0)
# Propagate to operand
# Propagate to used_by
for consumer in used_by[ins]:
#print(f"Propagate from {ins} to {consumer}")
success = consumer.propagate_batch_dim(ins)
if not success:
continue
if consumer.index not in visited:
visited.add(consumer)
queue.append(consumer)
def depth_analysis(self):
edge_dict = defaultdict(list)
degree = defaultdict(lambda : 0)
for ins in self.instructions:
for operand in ins.operands:
degree[ins] += 1
edge_dict[operand].append(ins)
# Init frontier
collected = 0
current_frontier = []
for ins in self.instructions:
if degree[ins] == 0:
ins.depth = 0
current_frontier.append(ins)
collected += 1
# Push forward frontier
depth = 0
while collected < len(self.instructions):
next_frontier = []
for ins in current_frontier:
for node in edge_dict[ins]:
degree[node] -= 1
if degree[node] == 0:
next_frontier.append(node)
collected += 1
depth += 1
current_frontier = next_frontier
for ins in current_frontier:
ins.depth = depth
def build_strategy_and_cost(self, cluster_env, solver_option):
if self.strategy_built:
for ins in self.instructions:
ins.strategies = []
ins.compute_costs = []
ins.communication_costs = []
ins.memory_costs = []
ins.resharding_costs = []
ins.follow_ins = None
self.alias_cost_vector = []
# Analyze depth for all instructions
self.depth_analysis()
# Analyze batch dim
if solver_option.force_batch_dim_to_mesh_dim is not None:
batch_dim = self.batch_dim_analysis()
print("===== Batch Dim Analysis =====")
for i in range(len(self.instructions)):
print(f"Time {i:2d}: {self.instructions[i]} Batch: {self.instructions[i].batch_dim}")
# Build strategies and costs for each instruction
for ins in self.instructions:
ins.build_strategy_and_cost(cluster_env, solver_option)
# Build alias costs
for (ins_a, ins_b) in self.alias_list:
assert ins_a.shape == ins_b.shape
cost_vector = []
for stra_a in ins_a.strategies:
for stra_b in ins_b.strategies:
if stra_a.output_spec == stra_b.output_spec:
cost_vector.append(0)
else:
cost_vector.append(1)
self.alias_cost_vector.append(cost_vector)
self.strategy_built = True
def __enter__(self):
assert HloComputation.cur_env is None
HloComputation.cur_env = self
def __exit__(self, *args, **kwargs):
HloComputation.cur_env = None
def __str__(self):
strs = []
for i, ins in enumerate(self.instructions):
strs.append(f"{i:2d}: " + str(ins))
return "\n".join(strs)
|
the-stack_106_21946
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
HERO for Video Question Answering Tasks, shared by:
1. TVQA
2. How2QA
"""
from collections import defaultdict
import copy
import torch
from torch import nn
from torch.nn import functional as F
from .model import HeroModel
from .layers import MLPLayer
from .modeling_utils import mask_logits
class HeroForVideoQA(HeroModel):
def __init__(self, config, vfeat_dim, max_frm_seq_len):
super().__init__(
config, vfeat_dim, max_frm_seq_len)
hsz = config.c_config.hidden_size
self.qa_pool = nn.Linear(
in_features=hsz, out_features=1, bias=False)
self.qa_pred_head = MLPLayer(hsz, 1)
# in tvqa/how2qa, we also have annotations for st and ed frame idx
self.st_ed_pool = copy.deepcopy(self.qa_pool)
self.st_ed_pred_head = MLPLayer(hsz, 2)
def get_modularized_video(self, frame_embeddings, frame_mask):
"""
Args:
frame_embeddings: (Nv, Nq, L, D)
frame_mask: (Nv, Nq, L)
"""
st_ed_attn_scores = self.st_ed_pool(
frame_embeddings) # (Nv, Nq, L, 1)
qa_attn_scores = self.qa_pool(frame_embeddings)
st_ed_attn_scores = F.softmax(
mask_logits(st_ed_attn_scores,
frame_mask.unsqueeze(-1)), dim=1)
qa_attn_scores = F.softmax(
mask_logits(qa_attn_scores,
frame_mask.unsqueeze(-1)), dim=2)
# TODO check whether it is the same
st_ed_pooled_video = torch.einsum(
"vqlm,vqld->vlmd", st_ed_attn_scores,
frame_embeddings) # (Nv, L, 1, D)
qa_pooled_video = torch.einsum(
"vqlm,vqld->vqmd", qa_attn_scores,
frame_embeddings) # (Nv, Nq, 1, D)
return st_ed_pooled_video.squeeze(2), qa_pooled_video.squeeze(2)
def forward(self, batch, task='tvqa', compute_loss=True):
batch = defaultdict(lambda: None, batch)
if task == 'tvqa' or task == 'how2qa':
targets = batch['targets'].squeeze(-1)
c_attn_masks = batch["c_attn_masks"]
ts_targets = batch["ts_targets"]
# (num_video * 5, num_frames, hid_size)
frame_embeddings = self.v_encoder.forward_repr(
batch, encode_clip=False)
frame_embeddings = self.v_encoder.c_encoder.embeddings(
frame_embeddings,
position_ids=None)
qa_embeddings = self.v_encoder.f_encoder._compute_txt_embeddings(
batch["qa_input_ids"], batch["qa_pos_ids"], txt_type_ids=None)
frame_qa_embeddings = torch.cat(
(frame_embeddings, qa_embeddings), dim=1)
frame_qa_attn_mask = torch.cat(
(c_attn_masks, batch["qa_attn_masks"]), dim=1)
fused_video_qa = self.v_encoder.c_encoder.forward_encoder(
frame_qa_embeddings, frame_qa_attn_mask)
num_frames = c_attn_masks.shape[1]
video_embeddings = fused_video_qa[:, :num_frames, :]
num_videos = len(targets)
num_frames, hid_size = video_embeddings.shape[1:3]
video_embeddings = video_embeddings.view(
num_videos, -1, num_frames, hid_size)
video_masks = c_attn_masks.view(num_videos, -1, num_frames)
video_masks = video_masks.to(dtype=video_embeddings.dtype)
st_ed_pooled_video, qa_pooled_video = self.get_modularized_video(
video_embeddings, video_masks)
pred_st_ed = self.st_ed_pred_head(st_ed_pooled_video)
st_prob = mask_logits(pred_st_ed[:, :, 0], video_masks[:, 0])
ed_prob = mask_logits(pred_st_ed[:, :, 1], video_masks[:, 0])
logits = self.qa_pred_head(qa_pooled_video).squeeze(-1)
if compute_loss:
st_target, ed_target = ts_targets[:, 0], ts_targets[:, 1]
st_loss = F.cross_entropy(
st_prob, st_target, reduction="mean",
ignore_index=-1)
ed_loss = F.cross_entropy(
ed_prob, ed_target, reduction="mean",
ignore_index=-1)
temporal_loss = (st_loss + ed_loss)/2.
qa_loss = F.cross_entropy(logits, targets, reduction='mean',
ignore_index=-1)
return qa_loss, temporal_loss
else:
return logits
else:
raise ValueError(f'Unrecognized task: {task}')
|
the-stack_106_21947
|
from collections import deque
import time
import gym
import tensorflow as tf
import numpy as np
from mpi4py import MPI
from stable_baselines.common import Dataset, explained_variance, fmt_row, zipsame, ActorCriticRLModel, SetVerbosity, \
TensorboardWriter
from stable_baselines import logger
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.policies import LstmPolicy, ActorCriticPolicy
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.mpi_moments import mpi_moments
from stable_baselines.trpo_mpi.utils import traj_segment_generator, add_vtarg_and_adv, flatten_lists
from stable_baselines.a2c.utils import total_episode_reward_logger
class PPO1(ActorCriticRLModel):
"""
Proximal Policy Optimization algorithm (MPI version).
Paper: https://arxiv.org/abs/1707.06347
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param timesteps_per_actorbatch: (int) timesteps per actor per update
:param clip_param: (float) clipping parameter epsilon
:param entcoeff: (float) the entropy loss weight
:param optim_epochs: (float) the optimizer's number of epochs
:param optim_stepsize: (float) the optimizer's stepsize
:param optim_batchsize: (int) the optimizer's the batch size
:param gamma: (float) discount factor
:param lam: (float) advantage estimation
:param adam_epsilon: (float) the epsilon value for the adam optimizer
:param schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
"""
def __init__(self, policy, env, gamma=0.99, timesteps_per_actorbatch=256, clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64, lam=0.95, adam_epsilon=1e-5,
schedule='linear', verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False):
super().__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs)
self.gamma = gamma
self.timesteps_per_actorbatch = timesteps_per_actorbatch
self.clip_param = clip_param
self.entcoeff = entcoeff
self.optim_epochs = optim_epochs
self.optim_stepsize = optim_stepsize
self.optim_batchsize = optim_batchsize
self.lam = lam
self.adam_epsilon = adam_epsilon
self.schedule = schedule
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.lossandgrad = None
self.adam = None
self.assign_old_eq_new = None
self.compute_losses = None
self.params = None
self.step = None
self.proba_step = None
self.initial_state = None
self.summary = None
self.episode_reward = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_pi
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, action_ph, policy.policy
return policy.obs_ph, action_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.single_threaded_session(graph=self.graph)
# Construct network for new policy
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
# Network for old policy
with tf.variable_scope("oldpi", reuse=False):
old_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
# Target advantage function (if applicable)
atarg = tf.placeholder(dtype=tf.float32, shape=[None])
# Empirical return
ret = tf.placeholder(dtype=tf.float32, shape=[None])
# learning rate multiplier, updated with schedule
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[])
# Annealed cliping parameter epislon
clip_param = self.clip_param * lrmult
obs_ph = self.policy_pi.obs_ph
action_ph = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_pi.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
pol_entpen = (-self.entcoeff) * meanent
# pnew / pold
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action_ph) -
old_pi.proba_distribution.logp(action_ph))
# surrogate from conservative policy iteration
surr1 = ratio * atarg
surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg
# PPO's pessimistic surrogate (L^CLIP)
pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2))
vf_loss = tf.reduce_mean(tf.square(self.policy_pi.value_fn[:, 0] - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
self.loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
tf.summary.scalar('entropy_loss', pol_entpen)
tf.summary.scalar('policy_gradient_loss', pol_surr)
tf.summary.scalar('value_function_loss', vf_loss)
tf.summary.scalar('approximate_kullback-leiber', meankl)
tf.summary.scalar('clip_factor', clip_param)
tf.summary.scalar('loss', total_loss)
self.params = tf_util.get_trainable_vars("model")
self.assign_old_eq_new = tf_util.function(
[], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"), tf_util.get_globals_vars("model"))])
with tf.variable_scope("Adam_mpi", reuse=False):
self.adam = MpiAdam(self.params, epsilon=self.adam_epsilon, sess=self.sess)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.optim_stepsize))
tf.summary.scalar('advantage', tf.reduce_mean(atarg))
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_param))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', ret)
tf.summary.histogram('learning_rate', self.optim_stepsize)
tf.summary.histogram('advantage', atarg)
tf.summary.histogram('clip_range', self.clip_param)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', obs_ph)
else:
tf.summary.histogram('observation', obs_ph)
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
tf_util.initialize(sess=self.sess)
self.summary = tf.summary.merge_all()
self.lossandgrad = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
[self.summary, tf_util.flatgrad(total_loss, self.params)] + losses)
self.compute_losses = tf_util.function([obs_ph, old_pi.obs_ph, action_ph, atarg, ret, lrmult],
losses)
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name="PPO1",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn(seed)
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO1 model must be " \
"an instance of common.policies.ActorCriticPolicy."
with self.sess.as_default():
self.adam.sync()
# Prepare for rollouts
seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_actorbatch)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
# rolling buffer for episode lengths
lenbuffer = deque(maxlen=100)
# rolling buffer for episode rewards
rewbuffer = deque(maxlen=100)
self.episode_reward = np.zeros((self.n_envs,))
while True:
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if total_timesteps and timesteps_so_far >= total_timesteps:
break
if self.schedule == 'constant':
cur_lrmult = 1.0
elif self.schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / total_timesteps, 0)
else:
raise NotImplementedError
logger.log("********** Iteration %i ************" % iters_so_far)
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, self.gamma, self.lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
obs_ph, action_ph, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
# true_rew is the reward without discount
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
seg["true_rew"].reshape((self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
# predicted value function before udpate
vpredbefore = seg["vpred"]
# standardized advantage function estimate
atarg = (atarg - atarg.mean()) / atarg.std()
dataset = Dataset(dict(ob=obs_ph, ac=action_ph, atarg=atarg, vtarg=tdlamret),
shuffle=not issubclass(self.policy, LstmPolicy))
optim_batchsize = self.optim_batchsize or obs_ph.shape[0]
# set old parameter values to new parameter values
self.assign_old_eq_new(sess=self.sess)
logger.log("Optimizing...")
logger.log(fmt_row(13, self.loss_names))
# Here we do a bunch of optimization epochs over the data
for k in range(self.optim_epochs):
# list of tuples, each of which gives the loss for a minibatch
losses = []
for i, batch in enumerate(dataset.iterate_once(optim_batchsize)):
steps = (self.num_timesteps +
k * optim_batchsize +
int(i * (optim_batchsize / len(dataset.data_map))))
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata
# (memory, compute time, ...)
if self.full_tensorboard_log and (1 + k) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"],
cur_lrmult, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % steps)
else:
summary, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"],
cur_lrmult, sess=self.sess)
writer.add_summary(summary, steps)
else:
_, grad, *newlosses = self.lossandgrad(batch["ob"], batch["ob"], batch["ac"],
batch["atarg"], batch["vtarg"], cur_lrmult,
sess=self.sess)
self.adam.update(grad, self.optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
logger.log("Evaluating losses...")
losses = []
for batch in dataset.iterate_once(optim_batchsize):
newlosses = self.compute_losses(batch["ob"], batch["ob"], batch["ac"], batch["atarg"],
batch["vtarg"], cur_lrmult, sess=self.sess)
losses.append(newlosses)
mean_losses, _, _ = mpi_moments(losses, axis=0)
logger.log(fmt_row(13, mean_losses))
for (loss_val, name) in zipsame(mean_losses, self.loss_names):
logger.record_tabular("loss_" + name, loss_val)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
# local values
lrlocal = (seg["ep_lens"], seg["ep_rets"])
# list of tuples
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
if len(lenbuffer) > 0:
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and MPI.COMM_WORLD.Get_rank() == 0:
logger.dump_tabular()
return self
def save(self, save_path):
data = {
"gamma": self.gamma,
"timesteps_per_actorbatch": self.timesteps_per_actorbatch,
"clip_param": self.clip_param,
"entcoeff": self.entcoeff,
"optim_epochs": self.optim_epochs,
"optim_stepsize": self.optim_stepsize,
"optim_batchsize": self.optim_batchsize,
"lam": self.lam,
"adam_epsilon": self.adam_epsilon,
"schedule": self.schedule,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
|
the-stack_106_21948
|
######################################################################################################################
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import StringIO
import csv
def create_output_writer(_=None, __=None):
return ReportOutputWriter(_)
def csv_to_dict_list(s):
if s is None:
return None
result = []
cols = None
try:
reader = csv.reader(StringIO.StringIO(s))
cols = reader.next()
row = reader.next()
while True:
result.append({cols[i]: row[i] for i in range(0, len(cols))})
row = reader.next()
except StopIteration:
if cols is None:
return None
else:
return result
# noinspection PyMethodMayBeStatic
class ReportOutputWriter(object):
def __init__(self, _):
self._data_ = None
def write(self, data, _):
self._data_ = data
@property
def data(self):
return self._data_
def reset(self):
self._data_ = None
|
the-stack_106_21949
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import os
from os import path as op
import warnings
from ctypes import (cast, byref, sizeof, create_unicode_buffer,
c_void_p, c_wchar_p)
from ...ext.gdi32plus import (gdiplus, gdi32, user32, winreg, LOGFONT,
OUTLINETEXTMETRIC, GM_ADVANCED, FW_NORMAL,
FW_BOLD, LF_FACESIZE, DEFAULT_CHARSET,
TRUETYPE_FONTTYPE, FONTENUMPROC, BOOL)
# Inspired by:
# http://forums.codeguru.com/showthread.php?90792-How-to-get-a-system-
# font-file-name-given-a-LOGFONT-face-name
# XXX This isn't perfect, but it should work for now...
def find_font(face, bold, italic, orig_face=None):
style_dict = {'Regular': 0, 'Bold': 1, 'Italic': 2, 'Bold Italic': 3}
# Figure out which font to actually use by trying to instantiate by name
dc = user32.GetDC(0) # noqa, analysis:ignore
gdi32.SetGraphicsMode(dc, GM_ADVANCED) # only TT and OT fonts
logfont = LOGFONT()
logfont.lfHeight = -12 # conv point to pixels
logfont.lfWeight = FW_BOLD if bold else FW_NORMAL
logfont.lfItalic = italic
logfont.lfFaceName = face # logfont needs Unicode
hfont = gdi32.CreateFontIndirectW(byref(logfont))
original = gdi32.SelectObject(dc, hfont)
n_byte = gdi32.GetOutlineTextMetricsW(dc, 0, None)
assert n_byte > 0
metrics = OUTLINETEXTMETRIC()
assert sizeof(metrics) >= n_byte
assert gdi32.GetOutlineTextMetricsW(dc, n_byte, byref(metrics))
gdi32.SelectObject(dc, original)
user32.ReleaseDC(None, dc)
use_face = cast(byref(metrics, metrics.otmpFamilyName), c_wchar_p).value
if use_face != face:
warnings.warn('Could not find face match "%s", falling back to "%s"'
% (orig_face or face, use_face))
use_style = cast(byref(metrics, metrics.otmpStyleName), c_wchar_p).value
use_style = style_dict.get(use_style, 'Regular')
# AK: I get "Standaard" for use_style, which is Dutch for standard/regular
# Now we match by creating private font collections until we find
# the one that was used
font_dir = op.join(os.environ['WINDIR'], 'Fonts')
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
key = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts'
reg_vals = winreg.OpenKey(reg, key)
n_values = winreg.QueryInfoKey(reg_vals)[1]
fname = None
for vi in range(n_values):
name, ff = winreg.EnumValue(reg_vals, vi)[:2]
if name.endswith('(TrueType)'):
ff = op.join(font_dir, ff) if op.basename(ff) == ff else ff
assert op.isfile(ff)
pc = c_void_p()
assert gdiplus.GdipNewPrivateFontCollection(byref(pc)) == 0
gdiplus.GdipPrivateAddFontFile(pc, ff)
family = c_void_p()
if gdiplus.GdipCreateFontFamilyFromName(use_face, pc,
byref(family)) == 0:
val = BOOL()
assert gdiplus.GdipIsStyleAvailable(family, use_style,
byref(val)) == 0
if val.value:
buf = create_unicode_buffer(LF_FACESIZE)
assert gdiplus.GdipGetFamilyName(family, buf, 0) == 0
assert buf.value == use_face
fname = ff
break
fname = fname or find_font('', bold, italic, face) # fall back to default
return fname
def _list_fonts():
dc = user32.GetDC(0)
gdi32.SetGraphicsMode(dc, GM_ADVANCED) # only TT and OT fonts
logfont = LOGFONT()
logfont.lfCharSet = DEFAULT_CHARSET
logfont.lfFaceName = ''
logfont.lfPitchandFamily = 0
fonts = list()
def enum_fun(lp_logfont, lp_text_metric, font_type, l_param):
# Only support TTF for now (silly Windows shortcomings)
if font_type == TRUETYPE_FONTTYPE:
font = lp_logfont.contents.lfFaceName
if not font.startswith('@') and font not in fonts:
fonts.append(font)
return 1
gdi32.EnumFontFamiliesExW(dc, byref(logfont), FONTENUMPROC(enum_fun), 0, 0)
user32.ReleaseDC(None, dc)
return fonts
|
the-stack_106_21951
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import random
import time
import json
from functools import partial
import numpy as np
import paddle
import paddle.nn.functional as F
import paddlenlp as ppnlp
from model import ErnieForPretraining
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.datasets import load_dataset
from data import create_dataloader, transform_fn_dict
from data import convert_example, convert_chid_example
from evaluate import do_evaluate, do_evaluate_chid
# yapf: disable
# yapf: enable
def set_seed(seed):
"""sets random seed"""
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
@paddle.no_grad()
def do_predict(model, tokenizer, data_loader, label_normalize_dict):
model.eval()
normed_labels = [
normalized_lable
for origin_lable, normalized_lable in label_normalize_dict.items()
]
origin_labels = [
origin_lable
for origin_lable, normalized_lable in label_normalize_dict.items()
]
label_length = len(normed_labels[0])
y_pred_labels = []
for batch in data_loader:
src_ids, token_type_ids, masked_positions = batch
max_len = src_ids.shape[1]
new_masked_positions = []
for bs_index, mask_pos in enumerate(masked_positions.numpy()):
for pos in mask_pos:
new_masked_positions.append(bs_index * max_len + pos)
new_masked_positions = paddle.to_tensor(np.array(new_masked_positions).astype('int32'))
prediction_scores = model(
input_ids=src_ids,
token_type_ids=token_type_ids,
masked_positions=new_masked_positions)
softmax_fn = paddle.nn.Softmax()
prediction_probs = softmax_fn(prediction_scores)
batch_size = len(src_ids)
vocab_size = prediction_probs.shape[1]
# prediction_probs: [batch_size, label_lenght, vocab_size]
prediction_probs = paddle.reshape(
prediction_probs, shape=[batch_size, -1, vocab_size]).numpy()
# [label_num, label_length]
label_ids = np.array(
[tokenizer(label)["input_ids"][1:-1] for label in normed_labels])
y_pred = np.ones(shape=[batch_size, len(label_ids)])
# Calculate joint distribution of candidate labels
for index in range(label_length):
y_pred *= prediction_probs[:, index, label_ids[:, index]]
# Get max probs label's index
y_pred_index = np.argmax(y_pred, axis=-1)
for index in y_pred_index:
y_pred_labels.append(origin_labels[index])
return y_pred_labels
@paddle.no_grad()
def do_predict_chid(model, tokenizer, data_loader, label_normalize_dict):
"""
FewCLUE `chid` dataset is specical when evaluate: input slots have
additional `candidate_label_ids`, so need to customize the
evaluate function.
"""
model.eval()
normed_labels = [
normalized_lable
for origin_lable, normalized_lable in label_normalize_dict.items()
]
label_length = len(normed_labels[0])
y_pred_all = []
for batch in data_loader:
src_ids, token_type_ids, masked_positions, candidate_label_ids = batch
max_len = src_ids.shape[1]
new_masked_positions = []
for bs_index, mask_pos in enumerate(masked_positions.numpy()):
for pos in mask_pos:
new_masked_positions.append(bs_index * max_len + pos)
new_masked_positions = paddle.to_tensor(np.array(new_masked_positions).astype('int32'))
prediction_scores = model(
input_ids=src_ids,
token_type_ids=token_type_ids,
masked_positions=new_masked_positions)
softmax_fn = paddle.nn.Softmax()
prediction_probs = softmax_fn(prediction_scores)
batch_size = len(src_ids)
vocab_size = prediction_probs.shape[1]
# prediction_probs: [batch_size, label_lenght, vocab_size]
prediction_probs = paddle.reshape(
prediction_probs, shape=[batch_size, -1, vocab_size]).numpy()
candidate_num = candidate_label_ids.shape[1]
# [batch_size, candidate_num(7)]
y_pred = np.ones(shape=[batch_size, candidate_num])
for label_idx in range(candidate_num):
# [bathc_size, label_length(4)]
single_candidate_label_ids = candidate_label_ids[:, label_idx, :]
# Calculate joint distribution of candidate labels
for index in range(label_length):
# [batch_size,]
slice_word_ids = single_candidate_label_ids[:, index].numpy()
batch_single_token_prob = []
for bs_index in range(batch_size):
# [1, 1]
single_token_prob = prediction_probs[
bs_index, index, slice_word_ids[bs_index]]
batch_single_token_prob.append(single_token_prob)
y_pred[:, label_idx] *= np.array(batch_single_token_prob)
# Get max probs label's index
y_pred_index = np.argmax(y_pred, axis=-1)
y_pred_all.extend(y_pred_index)
return y_pred_all
predict_file = {
"bustm": "bustm_predict.json",
"chid": "chidf_predict.json",
"cluewsc": "cluewscf_predict.json",
"csldcp": "csldcp_predict.json",
"csl": "cslf_predict.json",
"eprstmt": "eprstmt_predict.json",
"iflytek": "iflytekf_predict.json",
"ocnli": "ocnlif_predict.json",
"tnews": "tnewsf_predict.json"
}
def write_iflytek(task_name, output_file, pred_labels):
test_ds, train_few_all = load_dataset(
"fewclue", name=task_name, splits=("test", "train_few_all"))
def label2id(train_few_all):
label2id = {}
for example in train_few_all:
label = example["label_des"]
label_id = example["label"]
if label not in label2id:
label2id[label] = str(label_id)
return label2id
label2id_dict = label2id(train_few_all)
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = label2id_dict[pred_labels[idx]]
str_test_example = json.dumps(test_example) + "\n"
f.write(str_test_example)
def write_bustm(task_name, output_file, pred_labels):
test_ds = load_dataset("fewclue", name=task_name, splits=("test"))
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = pred_labels[idx]
str_test_example = json.dumps(test_example) + "\n"
f.write(str_test_example)
def write_csldcp(task_name, output_file, pred_labels):
test_ds = load_dataset("fewclue", name=task_name, splits=("test"))
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = pred_labels[idx]
str_test_example = "\"{}\": {}, \"{}\": \"{}\"".format(
"id", test_example['id'], "label", test_example["label"])
f.write("{" + str_test_example + "}\n")
def write_tnews(task_name, output_file, pred_labels):
test_ds, train_few_all = load_dataset(
"fewclue", name=task_name, splits=("test", "train_few_all"))
def label2id(train_few_all):
label2id = {}
for example in train_few_all:
label = example["label_desc"]
label_id = example["label"]
if label not in label2id:
label2id[label] = str(label_id)
return label2id
label2id_dict = label2id(train_few_all)
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = label2id_dict[pred_labels[idx]]
str_test_example = json.dumps(test_example) + "\n"
f.write(str_test_example)
def write_cluewsc(task_name, output_file, pred_labels):
test_ds = load_dataset("fewclue", name=task_name, splits=("test"))
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = pred_labels[idx]
str_test_example = "\"{}\": {}, \"{}\": \"{}\"".format(
"id", test_example['id'], "label", test_example["label"])
f.write("{" + str_test_example + "}\n")
def write_eprstmt(task_name, output_file, pred_labels):
test_ds = load_dataset("fewclue", name=task_name, splits=("test"))
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = pred_labels[idx]
str_test_example = json.dumps(test_example)
f.write(str_test_example + "\n")
def write_ocnli(task_name, output_file, pred_labels):
test_ds = load_dataset("fewclue", name=task_name, splits=("test"))
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = pred_labels[idx]
str_test_example = json.dumps(test_example)
f.write(str_test_example + "\n")
def write_csl(task_name, output_file, pred_labels):
test_ds = load_dataset("fewclue", name=task_name, splits=("test"))
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["label"] = pred_labels[idx]
str_test_example = json.dumps(test_example)
f.write(str_test_example + "\n")
def write_chid(task_name, output_file, pred_labels):
test_ds = load_dataset("fewclue", name=task_name, splits=("test"))
test_example = {}
with open(output_file, 'w', encoding='utf-8') as f:
for idx, example in enumerate(test_ds):
test_example["id"] = example["id"]
test_example["answer"] = pred_labels[idx]
str_test_example = "\"{}\": {}, \"{}\": {}".format(
"id", test_example['id'], "answer", test_example["answer"])
f.write("{" + str_test_example + "}\n")
write_fn = {
"bustm": write_bustm,
"iflytek": write_iflytek,
"csldcp": write_csldcp,
"tnews": write_tnews,
"cluewsc": write_cluewsc,
"eprstmt": write_eprstmt,
"ocnli": write_ocnli,
"csl": write_csl,
"chid": write_chid
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task_name", required=True, type=str, help="The task_name to be evaluated")
parser.add_argument("--p_embedding_num", type=int, default=1, help="number of p-embedding")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument("--pattern_id", default=0, type=int, help="pattern id of pet")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--init_from_ckpt", type=str, default=None, help="The path of checkpoint to be loaded.")
parser.add_argument("--output_dir", type=str, default=None, help="The path of checkpoint to be loaded.")
parser.add_argument("--seed", type=int, default=1000, help="random seed for initialization")
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu",
help="Select which device to train model, defaults to gpu.")
args = parser.parse_args()
paddle.set_device(args.device)
set_seed(args.seed)
label_normalize_json = os.path.join("./label_normalized",
args.task_name + ".json")
label_norm_dict = None
with open(label_normalize_json, 'r', encoding="utf-8") as f:
label_norm_dict = json.load(f)
convert_example_fn = convert_example if args.task_name != "chid" else convert_chid_example
predict_fn = do_predict if args.task_name != "chid" else do_predict_chid
# Load test_ds for FewCLUE leaderboard
test_ds = load_dataset("fewclue", name=args.task_name, splits=("test"))
# Task related transform operations, eg: numbert label -> text_label, english -> chinese
transform_fn = partial(
transform_fn_dict[args.task_name],
label_normalize_dict=label_norm_dict,
is_test=True, pattern_id = args.pattern_id)
# Some fewshot_learning strategy is defined by transform_fn
# Note: Set lazy=False to transform example inplace immediately,
# because transform_fn should only be executed only once when
# iterate multi-times for train_ds
test_ds = test_ds.map(transform_fn, lazy=False)
model = ErnieForPretraining.from_pretrained('ernie-1.0')
tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained('ernie-1.0')
# Load parameters of best model on test_public.json of current task
if args.init_from_ckpt and os.path.isfile(args.init_from_ckpt):
state_dict = paddle.load(args.init_from_ckpt)
model.set_dict(state_dict)
print("Loaded parameters from %s" % args.init_from_ckpt)
else:
raise ValueError(
"Please set --params_path with correct pretrained model file")
if args.task_name != "chid":
# [src_ids, token_type_ids, masked_positions, masked_lm_labels]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
Stack(dtype="int64"), # masked_positions
): [data for data in fn(samples)]
else:
# [src_ids, token_type_ids, masked_positions, masked_lm_labels, candidate_labels_ids]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
Stack(dtype="int64"), # masked_positions
Stack(dtype="int64"), # candidate_labels_ids [candidate_num, label_length]
): [data for data in fn(samples)]
trans_func = partial(
convert_example_fn,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
is_test=True)
test_data_loader = create_dataloader(
test_ds,
mode='eval',
batch_size=args.batch_size,
batchify_fn=batchify_fn,
trans_fn=trans_func)
y_pred_labels = predict_fn(model, tokenizer, test_data_loader,
label_norm_dict)
output_file = os.path.join(args.output_dir, predict_file[args.task_name])
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
write_fn[args.task_name](args.task_name, output_file, y_pred_labels)
|
the-stack_106_21954
|
"""Module containing common scenarios that can be used
for writing tests with less boiler-plate."""
from typing import List, Dict, Any
import unittest
import time
from vega_client import vegaClient
from suite import vegaNetwork, ProcessOutput, ProcessExitResult
from requests.exceptions import ConnectionError
RETRIES_AMOUNT = 20
def run_dev_node(application: str) -> vegaNetwork:
"""Starts a single node in the run-dev mode and returns
`vegaNetwork` object with the running node.
Example:
>>> network = run_dev_node("vega-cryptocurrency-advanced")"""
network = vegaNetwork(application)
network.run_dev()
return network
def run_n_nodes(application: str, nodes_amount: int) -> vegaNetwork:
"""Creates and runs a network with N validators and return an
`vegaNetwork` object with it."""
address = "127.0.0.1:{}"
# Assign peer ports starting from 6331.
available_peer_port = 6331
# Assign API ports starting from 8080.
available_api_port = 8080
network = vegaNetwork(application)
network.generate_template(nodes_amount)
for i in range(nodes_amount):
network.generate_config(i, address.format(available_peer_port))
available_peer_port += 1
for i in range(nodes_amount):
public_api_address = address.format(available_api_port)
private_api_address = address.format(available_api_port + 1)
network.finalize(i, public_api_address, private_api_address)
available_api_port += 2
for i in range(nodes_amount):
network.run_node(i)
return network
def run_4_nodes(application: str) -> vegaNetwork:
"""Creates and runs a network with 4 validators and return an
`vegaNetwork` object with it.
Example:
>>> network = run_4_nodes("vega-cryptocurrency-advanced")
>>> for i in range(1, network.validators_count()):
... print(network.api_address(i))
...
'127.0.0.1', 8080, 8081
'127.0.0.1', 8082, 8083
'127.0.0.1', 8084, 8085
'127.0.0.1', 8086, 8087
"""
return run_n_nodes(application, 4)
def assert_processes_exited_successfully(test: unittest.TestCase, outputs: List[ProcessOutput]) -> None:
"""Asserts that all the processes exited successfully."""
for output in outputs:
test.assertEqual(output.exit_result, ProcessExitResult.Ok)
test.assertEqual(output.exit_code, 0, f"Process exited with non-zero code: {output.stderr}")
def launcher_networks(network: vegaNetwork) -> List[Dict[str, Any]]:
"""Builds a network configuration for `vega-launcher` from the
`vegaNetwork` object."""
networks = []
for validator_id in range(network.validators_count()):
host, public_port, private_port = network.api_address(validator_id)
node_network = {"host": host, "ssl": False, "public-api-port": public_port, "private-api-port": private_port}
networks.append(node_network)
# Temporary workaround: supervisor works in simple mode and we need only one node.
return networks[:1]
def wait_network_to_start(network: vegaNetwork) -> None:
"""Wait for network starting"""
wait_api_to_start(network)
wait_for_block(network, 1)
def wait_for_block(network: vegaNetwork, height: int = 1) -> None:
"""Wait for block at specific height"""
for validator_id in range(network.validators_count()):
host, public_port, private_port = network.api_address(validator_id)
client = vegaClient(host, public_port, private_port)
for _ in range(RETRIES_AMOUNT):
if client.public_api.get_block(height).status_code == 200:
break
time.sleep(0.5)
def wait_api_to_start(network: vegaNetwork) -> None:
"""Wait for api starting"""
for validator_id in range(network.validators_count()):
host, public_port, private_port = network.api_address(validator_id)
client = vegaClient(host, public_port, private_port)
for _ in range(RETRIES_AMOUNT):
try:
client.public_api.health_info()
break
except ConnectionError:
time.sleep(0.5)
|
the-stack_106_21955
|
from __future__ import print_function
import argparse
import av
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('output')
args = arg_parser.parse_args()
of = av.open(args.output, 'w')
print(of)
for codec_name in 'aac', 'vorbis':
try:
os = of.add_stream(codec_name)
except Exception as e:
print(e)
else:
print(os)
|
the-stack_106_21956
|
import logging
import contextlib
import io
import sys
import json
import os
from unittest import TestCase, mock
from google.cloud import logging_v2
import bigflow.log
class LoggerTestCase(TestCase):
def configure_mocked_logging(self, project_id, log_name, workflow_id=None):
self.gcp_handler = mock.Mock()
self.gcp_handler.level = logging.INFO
with mock.patch('bigflow.log.create_gcp_log_handler', return_value=self.gcp_handler):
bigflow.log._LOGGING_CONFIGURED = False
bigflow.log.init_logging(
workflow_id=workflow_id,
config={
'gcp_project_id': project_id,
'log_name': log_name,
},
)
def setUp(self):
self.configure_mocked_logging('project-id', 'logger_name', 'workflow-id')
self.test_logger = logging.getLogger('any.random.logger.name')
self.root_logger = logging.getLogger('')
def _clear_all_root_loggers(self):
for h in logging.getLogger().handlers[:]:
logging.getLogger().removeHandler(h)
h.close()
def tearDown(self):
self._clear_all_root_loggers()
bigflow.log._LOGGING_CONFIGURED = False
def test_should_create_correct_logging_link(self):
# when
f = io.StringIO()
with contextlib.redirect_stderr(f):
# stderr handler is created only when no other handlers are registered
self._clear_all_root_loggers()
self.configure_mocked_logging('project-id', 'another_log_name', 'workflow_id')
# then
out = f.getvalue()
self.assertIn("LOGS LINK", out)
self.assertIn("https://console.cloud.google.com/logs/query;query=", out)
self.assertIn("labels.workflow_id%3D%22workflow_id%22", out)
def _assert_single_log_event(self, message_re, severity=None):
self.assertEqual(1, self.gcp_handler.handle.call_count)
le: logging.LogRecord = self.gcp_handler.handle.call_args_list[0][0][0]
if severity:
self.assertEqual(le.levelname, severity)
if message_re:
self.assertRegexpMatches(le.message, message_re)
return le
def test_should_log_unhandled_exception(self):
# when
try:
raise ValueError("oh no... i'm dying")
except Exception:
sys.excepthook(*sys.exc_info()) # simulate uncaught exception
# then
le = self._assert_single_log_event(
message_re="Uncaught exception: oh no... i\'m dying",
severity='ERROR',
)
self.assertTrue(le.exc_info)
def test_should_handle_warning(self):
# when
self.test_logger.warning("warning message")
# then
self._assert_single_log_event(
message_re="warning message",
severity='WARNING',
)
def test_should_handle_info(self):
# when
self.test_logger.info("info message")
# then
self._assert_single_log_event(
message_re="info message",
severity='INFO',
)
def test_should_handle_error(self):
# when
self.test_logger.error("error message")
# then
self._assert_single_log_event(
message_re="error message",
severity='ERROR',
)
def test_should_install_gcp_handler_when_logging_already_exists(self):
# given
self._clear_all_root_loggers()
logging.basicConfig(level=logging.ERROR)
# when
self.configure_mocked_logging('project-id', 'logger_name', 'workflow_id')
self.test_logger.info("message")
# then
self._assert_single_log_event(
message_re="message",
severity='INFO',
)
@mock.patch.dict('os.environ')
@mock.patch.dict('sys.modules')
@mock.patch('google.cloud.logging.Client')
@mock.patch('google.cloud.logging.handlers.CloudLoggingHandler')
def test_logging_should_autoinitialize_via_env_variables(
self,
client_mock,
cloud_logging_handler_mock
):
# given
for m in list(sys.modules):
if m.startswith("bigflow."):
del sys.modules[m]
del sys.modules['bigflow']
self._clear_all_root_loggers()
os.environ['bf_log_config'] = json.dumps({'log_level': "INFO", "gcp_project_id": "proj"})
# when
import bigflow
# then
self.assertEqual(logging.root.level, logging.INFO)
|
the-stack_106_21957
|
# -*- coding: utf-8 -*-
"""
cleaning up the different parts
"""
import os
import glob
import pandas as pd
# load data
path = 'data/'
all_files = glob.glob(os.path.join(path, "*.csv"))
df = (pd.read_csv(f) for f in all_files)
tracks = pd.concat(df, ignore_index=True)
# get rid of sloppiness before
del tracks['Unnamed: 0']
# check for duplicates
tracks.shape
tracks.drop_duplicates()
# reorder
tracks.head(n=10)
tracks.tail(n=10)
tracks_sorted = tracks.sort_values(by=['played'])
# get artist + song
query = tracks_sorted['artist'] + ', ' + tracks_sorted['song']
# ask spotify for track id
# import requests
# ...
|
the-stack_106_21958
|
import sys
import json
from collections import OrderedDict
TERMINATORS = ["jmp", "br", "ret"]
def form_blocks(instrs):
cur_block = []
for instr in instrs:
if "op" in instr: # instruction
cur_block.append(instr)
if instr["op"] in TERMINATORS:
if cur_block:
yield cur_block
cur_block = []
else: # a label
if cur_block:
yield cur_block
cur_block = [instr]
yield cur_block
def associate_label_to_blocks(blocks):
result = OrderedDict()
for block in blocks:
if "label" in block[0]:
name = block[0]["label"]
block = block[1:]
else:
name = f'B{len(result)}'
result[name] = block
return result
def form_cfg(label2block: OrderedDict):
cfg = {}
for i, (label, block) in enumerate(label2block.items()):
last = block[-1]
if "labels" in last: # jmp or br
successors = last["labels"]
elif last["op"] == "ret":
successors = []
else:
if (i < len(label2block) - 1):
successors = [list(label2block.keys())[i + 1]]
else:
successors = []
cfg[label] = successors
return cfg
def main():
prog = json.load(sys.stdin)
for func in prog["functions"]:
blocks = form_blocks(func["instrs"])
label2block = associate_label_to_blocks(blocks)
for label, block in label2block.items():
print(f"{label}:\n {block}")
cfg = form_cfg(label2block)
print(cfg)
if __name__ == '__main__':
main()
|
the-stack_106_21962
|
import inspect
import json
import math
import numbers
from textwrap import TextWrapper
import mmap
import time
import numpy as np
from asciitree import BoxStyle, LeftAligned
from asciitree.traversal import Traversal
from collections.abc import Iterable
from numcodecs.compat import ensure_ndarray, ensure_text
from numcodecs.registry import codec_registry
from numcodecs.blosc import cbuffer_sizes, cbuffer_metainfo
from typing import Any, Callable, Dict, Optional, Tuple, Union
def flatten(arg: Iterable) -> Iterable:
for element in arg:
if isinstance(element, Iterable) and not isinstance(element, (str, bytes)):
yield from flatten(element)
else:
yield element
# codecs to use for object dtype convenience API
object_codecs = {
str.__name__: 'vlen-utf8',
bytes.__name__: 'vlen-bytes',
'array': 'vlen-array',
}
def json_dumps(o: Any) -> bytes:
"""Write JSON in a consistent, human-readable way."""
return json.dumps(o, indent=4, sort_keys=True, ensure_ascii=True,
separators=(',', ': ')).encode('ascii')
def json_loads(s: str) -> Dict[str, Any]:
"""Read JSON in a consistent way."""
return json.loads(ensure_text(s, 'ascii'))
def normalize_shape(shape) -> Tuple[int]:
"""Convenience function to normalize the `shape` argument."""
if shape is None:
raise TypeError('shape is None')
# handle 1D convenience form
if isinstance(shape, numbers.Integral):
shape = (int(shape),)
# normalize
shape = tuple(int(s) for s in shape)
return shape
# code to guess chunk shape, adapted from h5py
CHUNK_BASE = 256*1024 # Multiplier by which chunks are adjusted
CHUNK_MIN = 128*1024 # Soft lower limit (128k)
CHUNK_MAX = 64*1024*1024 # Hard upper limit
def guess_chunks(shape: Tuple[int, ...], typesize: int) -> Tuple[int, ...]:
"""
Guess an appropriate chunk layout for an array, given its shape and
the size of each element in bytes. Will allocate chunks only as large
as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of
each axis, slightly favoring bigger values for the last index.
Undocumented and subject to change without warning.
"""
ndims = len(shape)
# require chunks to have non-zero length for all dimensions
chunks = np.maximum(np.array(shape, dtype='=f8'), 1)
# Determine the optimal chunk size in bytes using a PyTables expression.
# This is kept as a float.
dset_size = np.product(chunks)*typesize
target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024)))
if target_size > CHUNK_MAX:
target_size = CHUNK_MAX
elif target_size < CHUNK_MIN:
target_size = CHUNK_MIN
idx = 0
while True:
# Repeatedly loop over the axes, dividing them by 2. Stop when:
# 1a. We're smaller than the target chunk size, OR
# 1b. We're within 50% of the target chunk size, AND
# 2. The chunk is smaller than the maximum chunk size
chunk_bytes = np.product(chunks)*typesize
if (chunk_bytes < target_size or
abs(chunk_bytes-target_size)/target_size < 0.5) and \
chunk_bytes < CHUNK_MAX:
break
if np.product(chunks) == 1:
break # Element size larger than CHUNK_MAX
chunks[idx % ndims] = math.ceil(chunks[idx % ndims] / 2.0)
idx += 1
return tuple(int(x) for x in chunks)
def normalize_chunks(
chunks: Any, shape: Tuple[int, ...], typesize: int
) -> Tuple[int, ...]:
"""Convenience function to normalize the `chunks` argument for an array
with the given `shape`."""
# N.B., expect shape already normalized
# handle auto-chunking
if chunks is None or chunks is True:
return guess_chunks(shape, typesize)
# handle no chunking
if chunks is False:
return shape
# handle 1D convenience form
if isinstance(chunks, numbers.Integral):
chunks = tuple(int(chunks) for _ in shape)
# handle bad dimensionality
if len(chunks) > len(shape):
raise ValueError('too many dimensions in chunks')
# handle underspecified chunks
if len(chunks) < len(shape):
# assume chunks across remaining dimensions
chunks += shape[len(chunks):]
# handle None or -1 in chunks
if -1 in chunks or None in chunks:
chunks = tuple(s if c == -1 or c is None else int(c)
for s, c in zip(shape, chunks))
return tuple(chunks)
def normalize_dtype(dtype: Union[str, np.dtype], object_codec) -> Tuple[np.dtype, Any]:
# convenience API for object arrays
if inspect.isclass(dtype):
dtype = dtype.__name__ # type: ignore
if isinstance(dtype, str):
# allow ':' to delimit class from codec arguments
tokens = dtype.split(':')
key = tokens[0]
if key in object_codecs:
dtype = np.dtype(object)
if object_codec is None:
codec_id = object_codecs[key]
if len(tokens) > 1:
args = tokens[1].split(',')
else:
args = []
try:
object_codec = codec_registry[codec_id](*args)
except KeyError: # pragma: no cover
raise ValueError('codec %r for object type %r is not '
'available; please provide an '
'object_codec manually' % (codec_id, key))
return dtype, object_codec
dtype = np.dtype(dtype)
# don't allow generic datetime64 or timedelta64, require units to be specified
if dtype == np.dtype('M8') or dtype == np.dtype('m8'):
raise ValueError('datetime64 and timedelta64 dtypes with generic units '
'are not supported, please specify units (e.g., "M8[ns]")')
return dtype, object_codec
# noinspection PyTypeChecker
def is_total_slice(item, shape: Tuple[int]) -> bool:
"""Determine whether `item` specifies a complete slice of array with the
given `shape`. Used to optimize __setitem__ operations on the Chunk
class."""
# N.B., assume shape is normalized
if item == Ellipsis:
return True
if item == slice(None):
return True
if isinstance(item, slice):
item = item,
if isinstance(item, tuple):
return all(
(isinstance(s, slice) and
((s == slice(None)) or
((s.stop - s.start == l) and (s.step in [1, None]))))
for s, l in zip(item, shape)
)
else:
raise TypeError('expected slice or tuple of slices, found %r' % item)
def normalize_resize_args(old_shape, *args):
# normalize new shape argument
if len(args) == 1:
new_shape = args[0]
else:
new_shape = args
if isinstance(new_shape, int):
new_shape = (new_shape,)
else:
new_shape = tuple(new_shape)
if len(new_shape) != len(old_shape):
raise ValueError('new shape must have same number of dimensions')
# handle None in new_shape
new_shape = tuple(s if n is None else int(n)
for s, n in zip(old_shape, new_shape))
return new_shape
def human_readable_size(size) -> str:
if size < 2**10:
return '%s' % size
elif size < 2**20:
return '%.1fK' % (size / float(2**10))
elif size < 2**30:
return '%.1fM' % (size / float(2**20))
elif size < 2**40:
return '%.1fG' % (size / float(2**30))
elif size < 2**50:
return '%.1fT' % (size / float(2**40))
else:
return '%.1fP' % (size / float(2**50))
def normalize_order(order: str) -> str:
order = str(order).upper()
if order not in ['C', 'F']:
raise ValueError("order must be either 'C' or 'F', found: %r" % order)
return order
def normalize_dimension_separator(sep: Optional[str]) -> Optional[str]:
if sep in (".", "/", None):
return sep
else:
raise ValueError(
"dimension_separator must be either '.' or '/', found: %r" % sep)
def normalize_fill_value(fill_value, dtype: np.dtype):
if fill_value is None or dtype.hasobject:
# no fill value
pass
elif fill_value == 0:
# this should be compatible across numpy versions for any array type, including
# structured arrays
fill_value = np.zeros((), dtype=dtype)[()]
elif dtype.kind == 'U':
# special case unicode because of encoding issues on Windows if passed through numpy
# https://github.com/alimanfoo/zarr/pull/172#issuecomment-343782713
if not isinstance(fill_value, str):
raise ValueError('fill_value {!r} is not valid for dtype {}; must be a '
'unicode string'.format(fill_value, dtype))
else:
try:
if isinstance(fill_value, bytes) and dtype.kind == 'V':
# special case for numpy 1.14 compatibility
fill_value = np.array(fill_value, dtype=dtype.str).view(dtype)[()]
else:
fill_value = np.array(fill_value, dtype=dtype)[()]
except Exception as e:
# re-raise with our own error message to be helpful
raise ValueError('fill_value {!r} is not valid for dtype {}; nested '
'exception: {}'.format(fill_value, dtype, e))
return fill_value
def normalize_storage_path(path: Union[str, bytes, None]) -> str:
# handle bytes
if isinstance(path, bytes):
path = str(path, 'ascii')
# ensure str
if path is not None and not isinstance(path, str):
path = str(path)
if path:
# convert backslash to forward slash
path = path.replace('\\', '/')
# ensure no leading slash
while len(path) > 0 and path[0] == '/':
path = path[1:]
# ensure no trailing slash
while len(path) > 0 and path[-1] == '/':
path = path[:-1]
# collapse any repeated slashes
previous_char = None
collapsed = ''
for char in path:
if char == '/' and previous_char == '/':
pass
else:
collapsed += char
previous_char = char
path = collapsed
# don't allow path segments with just '.' or '..'
segments = path.split('/')
if any(s in {'.', '..'} for s in segments):
raise ValueError("path containing '.' or '..' segment not allowed")
else:
path = ''
return path
def buffer_size(v) -> int:
return ensure_ndarray(v).nbytes
def info_text_report(items: Dict[Any, Any]) -> str:
keys = [k for k, v in items]
max_key_len = max(len(k) for k in keys)
report = ''
for k, v in items:
wrapper = TextWrapper(width=80,
initial_indent=k.ljust(max_key_len) + ' : ',
subsequent_indent=' '*max_key_len + ' : ')
text = wrapper.fill(str(v))
report += text + '\n'
return report
def info_html_report(items) -> str:
report = '<table class="zarr-info">'
report += '<tbody>'
for k, v in items:
report += '<tr>' \
'<th style="text-align: left">%s</th>' \
'<td style="text-align: left">%s</td>' \
'</tr>' \
% (k, v)
report += '</tbody>'
report += '</table>'
return report
class InfoReporter:
def __init__(self, obj):
self.obj = obj
def __repr__(self):
items = self.obj.info_items()
return info_text_report(items)
def _repr_html_(self):
items = self.obj.info_items()
return info_html_report(items)
class TreeNode:
def __init__(self, obj, depth=0, level=None):
self.obj = obj
self.depth = depth
self.level = level
def get_children(self):
if hasattr(self.obj, 'values'):
if self.level is None or self.depth < self.level:
depth = self.depth + 1
return [TreeNode(o, depth=depth, level=self.level)
for o in self.obj.values()]
return []
def get_text(self):
name = self.obj.name.split("/")[-1] or "/"
if hasattr(self.obj, 'shape'):
name += ' {} {}'.format(self.obj.shape, self.obj.dtype)
return name
def get_type(self):
return type(self.obj).__name__
class TreeTraversal(Traversal):
def get_children(self, node):
return node.get_children()
def get_root(self, tree):
return tree
def get_text(self, node):
return node.get_text()
tree_group_icon = 'folder'
tree_array_icon = 'table'
def tree_get_icon(stype: str) -> str:
if stype == "Array":
return tree_array_icon
elif stype == "Group":
return tree_group_icon
else:
raise ValueError("Unknown type: %s" % stype)
def tree_widget_sublist(node, root=False, expand=False):
import ipytree
result = ipytree.Node()
result.icon = tree_get_icon(node.get_type())
if root or (expand is True) or (isinstance(expand, int) and node.depth < expand):
result.opened = True
else:
result.opened = False
result.name = node.get_text()
result.nodes = [tree_widget_sublist(c, expand=expand) for c in node.get_children()]
result.disabled = True
return result
def tree_widget(group, expand, level):
try:
import ipytree
except ImportError as error:
raise ImportError(
"{}: Run `pip install zarr[jupyter]` or `conda install ipytree`"
"to get the required ipytree dependency for displaying the tree "
"widget. If using jupyterlab<3, you also need to run "
"`jupyter labextension install ipytree`".format(error)
)
result = ipytree.Tree()
root = TreeNode(group, level=level)
result.add_node(tree_widget_sublist(root, root=True, expand=expand))
return result
class TreeViewer:
def __init__(self, group, expand=False, level=None):
self.group = group
self.expand = expand
self.level = level
self.text_kwargs = dict(
horiz_len=2,
label_space=1,
indent=1
)
self.bytes_kwargs = dict(
UP_AND_RIGHT="+",
HORIZONTAL="-",
VERTICAL="|",
VERTICAL_AND_RIGHT="+"
)
self.unicode_kwargs = dict(
UP_AND_RIGHT="\u2514",
HORIZONTAL="\u2500",
VERTICAL="\u2502",
VERTICAL_AND_RIGHT="\u251C"
)
def __bytes__(self):
drawer = LeftAligned(
traverse=TreeTraversal(),
draw=BoxStyle(gfx=self.bytes_kwargs, **self.text_kwargs)
)
root = TreeNode(self.group, level=self.level)
result = drawer(root)
# Unicode characters slip in on Python 3.
# So we need to straighten that out first.
result = result.encode()
return result
def __unicode__(self):
drawer = LeftAligned(
traverse=TreeTraversal(),
draw=BoxStyle(gfx=self.unicode_kwargs, **self.text_kwargs)
)
root = TreeNode(self.group, level=self.level)
return drawer(root)
def __repr__(self):
return self.__unicode__()
def _ipython_display_(self):
tree = tree_widget(self.group, expand=self.expand, level=self.level)
tree._ipython_display_()
return tree
def check_array_shape(param, array, shape):
if not hasattr(array, 'shape'):
raise TypeError('parameter {!r}: expected an array-like object, got {!r}'
.format(param, type(array)))
if array.shape != shape:
raise ValueError('parameter {!r}: expected array with shape {!r}, got {!r}'
.format(param, shape, array.shape))
def is_valid_python_name(name):
from keyword import iskeyword
return name.isidentifier() and not iskeyword(name)
class NoLock:
"""A lock that doesn't lock."""
def __enter__(self):
pass
def __exit__(self, *args):
pass
nolock = NoLock()
class PartialReadBuffer:
def __init__(self, store_key, chunk_store):
self.chunk_store = chunk_store
# is it fsstore or an actual fsspec map object
assert hasattr(self.chunk_store, "map")
self.map = self.chunk_store.map
self.fs = self.chunk_store.fs
self.store_key = store_key
self.buff = None
self.nblocks = None
self.start_points = None
self.n_per_block = None
self.start_points_max = None
self.read_blocks = set()
_key_path = self.map._key_to_str(store_key)
_key_path = _key_path.split('/')
_chunk_path = [self.chunk_store._normalize_key(_key_path[-1])]
_key_path = '/'.join(_key_path[:-1] + _chunk_path)
self.key_path = _key_path
def prepare_chunk(self):
assert self.buff is None
header = self.fs.read_block(self.key_path, 0, 16)
nbytes, self.cbytes, blocksize = cbuffer_sizes(header)
typesize, _shuffle, _memcpyd = cbuffer_metainfo(header)
self.buff = mmap.mmap(-1, self.cbytes)
self.buff[0:16] = header
self.nblocks = nbytes / blocksize
self.nblocks = (
int(self.nblocks)
if self.nblocks == int(self.nblocks)
else int(self.nblocks + 1)
)
if self.nblocks == 1:
self.buff = self.read_full()
return
start_points_buffer = self.fs.read_block(
self.key_path, 16, int(self.nblocks * 4)
)
self.start_points = np.frombuffer(
start_points_buffer, count=self.nblocks, dtype=np.int32
)
self.start_points_max = self.start_points.max()
self.buff[16: (16 + (self.nblocks * 4))] = start_points_buffer
self.n_per_block = blocksize / typesize
def read_part(self, start, nitems):
assert self.buff is not None
if self.nblocks == 1:
return
start_block = int(start / self.n_per_block)
wanted_decompressed = 0
while wanted_decompressed < nitems:
if start_block not in self.read_blocks:
start_byte = self.start_points[start_block]
if start_byte == self.start_points_max:
stop_byte = self.cbytes
else:
stop_byte = self.start_points[self.start_points > start_byte].min()
length = stop_byte - start_byte
data_buff = self.fs.read_block(self.key_path, start_byte, length)
self.buff[start_byte:stop_byte] = data_buff
self.read_blocks.add(start_block)
if wanted_decompressed == 0:
wanted_decompressed += ((start_block + 1) * self.n_per_block) - start
else:
wanted_decompressed += self.n_per_block
start_block += 1
def read_full(self):
return self.chunk_store[self.store_key]
def retry_call(callabl: Callable,
args=None,
kwargs=None,
exceptions: Tuple[Any, ...] = (),
retries: int = 10,
wait: float = 0.1) -> Any:
"""
Make several attempts to invoke the callable. If one of the given exceptions
is raised, wait the given period of time and retry up to the given number of
retries.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
for attempt in range(1, retries+1):
try:
return callabl(*args, **kwargs)
except exceptions:
if attempt < retries:
time.sleep(wait)
else:
raise
def all_equal(value: Any, array: Any):
"""
Test if all the elements of an array are equivalent to a value.
If `value` is None, then this function does not do any comparison and
returns False.
"""
if value is None:
return False
if not value:
# if `value` is falsey, then just 1 truthy value in `array`
# is sufficient to return False. We assume here that np.any is
# optimized to return on the first truthy value in `array`.
try:
return not np.any(array)
except TypeError: # pragma: no cover
pass
if np.issubdtype(array.dtype, np.object_):
# we have to flatten the result of np.equal to handle outputs like
# [np.array([True,True]), True, True]
return all(flatten(np.equal(value, array, dtype=array.dtype)))
else:
# Numpy errors if you call np.isnan on custom dtypes, so ensure
# we are working with floats before calling isnan
if np.issubdtype(array.dtype, np.floating) and np.isnan(value):
return np.all(np.isnan(array))
else:
# using == raises warnings from numpy deprecated pattern, but
# using np.equal() raises type errors for structured dtypes...
return np.all(value == array)
|
the-stack_106_21964
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Uniform(distribution.Distribution):
"""Uniform distribution with `a` and `b` parameters.
The PDF of this distribution is constant between [`a`, `b`], and 0 elsewhere.
"""
def __init__(
self, a=0.0, b=1.0, strict=True, strict_statistics=True, name="Uniform"):
"""Construct Uniform distributions with `a` and `b`.
The parameters `a` and `b` must be shaped in a way that supports
broadcasting (e.g. `b - a` is a valid operation).
Here are examples without broadcasting:
```python
# Without broadcasting
u1 = Uniform(3.0, 4.0) # a single uniform distribution [3, 4]
u2 = Uniform([1.0, 2.0], [3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform([[1.0, 2.0],
[3.0, 4.0]],
[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
And with broadcasting:
```python
u1 = Uniform(3.0, [5.0, 6.0, 7.0]) # 3 distributions
```
Args:
a: `float` or `double` tensor, the minimum endpoint.
b: `float` or `double` tensor, the maximum endpoint. Must be > `a`.
strict: Whether to assert that `a > b`. If `strict` is False and inputs
are invalid, correct behavior is not guaranteed.
strict_statistics: Boolean, default True. If True, raise an exception if
a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
If False, batch members with valid parameters leading to undefined
statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Raises:
InvalidArgumentError: if `a >= b` and `strict=True`.
"""
self._strict_statistics = strict_statistics
self._strict = strict
with ops.op_scope([a, b], name):
with ops.control_dependencies(
[check_ops.assert_less(a, b)] if strict else []):
a = array_ops.identity(a, name="a")
b = array_ops.identity(b, name="b")
self._a = a
self._b = b
self._name = name
self._batch_shape = self._ones().get_shape()
self._event_shape = tensor_shape.TensorShape([])
contrib_tensor_util.assert_same_float_dtype((a, b))
@property
def strict_statistics(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._strict_statistics
@property
def strict(self):
"""Boolean describing behavior on invalid input."""
return self._strict
@property
def name(self):
return self._name
@property
def dtype(self):
return self.a.dtype
def batch_shape(self, name="batch_shape"):
with ops.name_scope(self.name):
with ops.op_scope([], name):
return array_ops.shape(self._ones())
def get_batch_shape(self):
return self._batch_shape
def event_shape(self, name="event_shape"):
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
return self._event_shape
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def prob(self, x, name="prob"):
"""The PDF of observations in `x` under these Uniform distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `a` and `b`.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the prob values of `x`. If `x` is `nan`,
will return `nan`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, x], name):
x = ops.convert_to_tensor(x, name="x")
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s" %
(x.dtype, self.dtype))
broadcasted_x = x * self._ones()
return math_ops.select(
math_ops.is_nan(broadcasted_x), broadcasted_x, math_ops.select(
math_ops.logical_or(broadcasted_x < self.a,
broadcasted_x > self.b),
array_ops.zeros_like(broadcasted_x),
(1.0 / self.range()) * array_ops.ones_like(broadcasted_x)))
def log_prob(self, x, name="log_prob"):
return super(Uniform, self).log_prob(x, name)
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under these Uniform distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `a` and `b`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`. If `x` is `nan`, will
return `nan`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, x], name):
x = ops.convert_to_tensor(x, name="x")
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s" %
(x.dtype, self.dtype))
broadcasted_x = x * self._ones()
zeros = array_ops.zeros_like(x + self.a + self.b, dtype=self.dtype)
ones = array_ops.ones_like(x + self.a + self.b, dtype=self.dtype)
result_if_not_big = math_ops.select(
x < self.a, zeros, (broadcasted_x - self.a) / self.range())
return math_ops.select(x >= self.b, ones, result_if_not_big)
def log_cdf(self, x, name="log_cdf"):
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, x], name):
x = ops.convert_to_tensor(x, name="x")
return math_ops.log(self.cdf(x))
def entropy(self, name="entropy"):
"""The entropy of Uniform distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, self.range()], name):
return math_ops.log(self.range())
def sample(self, n, seed=None, name="sample"):
"""Sample `n` observations from the Uniform Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
with values of type `self.dtype`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, n], name):
n = ops.convert_to_tensor(n, name="n")
n_val = tensor_util.constant_value(n)
shape = array_ops.concat(0, [array_ops.pack([n]), self.batch_shape()])
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
# Provide some hints to shape inference
inferred_shape = tensor_shape.vector(n_val).concatenate(
self.get_batch_shape())
samples.set_shape(inferred_shape)
return (array_ops.expand_dims(self.a, 0) + array_ops.expand_dims(
self.range(), 0) * samples)
def mean(self, name="mean"):
with ops.name_scope(self.name):
with ops.op_scope([self._a, self._b], name):
return (self.a + self.b) / 2
def variance(self, name="variance"):
with ops.name_scope(self.name):
with ops.op_scope([self.range()], name):
return math_ops.square(self.range()) / 12.
def std(self, name="std"):
with ops.name_scope(self.name):
with ops.op_scope([self.range()], name):
return self.range() / math_ops.sqrt(12.)
def range(self, name="range"):
"""`b - a`."""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b], name):
return self.b - self.a
@property
def is_reparameterized(self):
return True
# TODO(rsepassi): Find a more efficient way of doing the broadcasting in_ones
# and _zeros.
def _ones(self):
return array_ops.ones_like(self.a + self.b)
def _zeros(self):
return array_ops.zeros_like(self.a + self.b)
@property
def is_continuous(self):
return True
|
the-stack_106_21965
|
import gpflow
import numpy as np
from gpflow.mean_functions import Identity, Linear, Zero
from .layers import SVGPLayer
def init_layers_linear(X, Y, Z, kernels, layer_sizes, mean_function=Zero(),
num_outputs=None, Layer=SVGPLayer, whiten=False):
num_outputs = num_outputs or Y.shape[1]
layers = []
X_running, Z_running = X.copy(), Z.copy()
for in_idx, kern_in in enumerate(kernels[:-1]):
dim_in = layer_sizes[in_idx]
dim_out = layer_sizes[in_idx+1]
# Initialize mean function to be either Identity or PCA projection
if dim_in == dim_out:
mf = Identity()
else:
if dim_in > dim_out: # stepping down, use the pca projection
# use eigenvectors corresponding to dim_out largest eigenvalues
_, _, V = np.linalg.svd(X_running, full_matrices=False)
W = V[:dim_out, :].T
else: # stepping up, use identity + padding
W = np.concatenate([np.eye(dim_in),
np.zeros((dim_in, dim_out - dim_in))], 1)
mf = Linear(W)
gpflow.set_trainable(mf.A, False)
gpflow.set_trainable(mf.b, False)
layers.append(Layer(kern_in, Z_running, dim_out, mf, white=whiten))
if dim_in != dim_out:
Z_running = Z_running.dot(W)
X_running = X_running.dot(W)
# final layer
layers.append(Layer(kernels[-1], Z_running, num_outputs, mean_function,
white=whiten))
return layers
|
the-stack_106_21966
|
from torch_sparse import SparseTensor
class ToSparseTensor(object):
r"""Converts the :obj:`edge_index` attribute of a data object into a
(transposed) :class:`torch_sparse.SparseTensor` type with key
:obj:`adj_.t`.
Args:
remove_faces (bool, optional): If set to :obj:`False`, the
:obj:`edge_index` tensor will not be removed.
(default: :obj:`True`)
fill_cache (bool, optional): If set to :obj:`False`, will not
fill the underlying :obj:`SparseTensor` cache.
(default: :obj:`True`)
"""
def __init__(self, remove_edge_index: bool = True,
fill_cache: bool = True):
self.remove_edge_index = remove_edge_index
self.fill_cache = fill_cache
def __call__(self, data):
assert data.edge_index is not None
(row, col), N, E = data.edge_index, data.num_nodes, data.num_edges
perm = (col * N + row).argsort()
row, col = row[perm], col[perm]
if self.remove_edge_index:
data.edge_index = None
value = None
for key in ['edge_weight', 'edge_attr', 'edge_type']:
if data[key] is not None:
value = data[key][perm]
if self.remove_edge_index:
data[key] = None
break
for key, item in data:
if item.size(0) == E:
data[key] = item[perm]
data.adj_t = SparseTensor(row=col, col=row, value=value,
sparse_sizes=(N, N), is_sorted=True)
if self.fill_cache: # Pre-process some important attributes.
data.adj_t.storage.rowptr()
data.adj_t.storage.csr2csc()
return data
def __repr__(self):
return f'{self.__class__.__name__}()'
|
the-stack_106_21970
|
"""A collections of functions to facilitate
analysis of HiC data based on the cooler and cooltools
interfaces."""
import warnings
from typing import Tuple, Dict, Callable
import cooltools.expected
import cooltools.snipping
import pandas as pd
import bioframe
import cooler
import pairtools
import numpy as np
import multiprocess
from .snipping_lib import flexible_pileup
# define type aliases
CisTransPairs = Dict[str, pd.DataFrame]
PairsSamples = Dict[str, CisTransPairs]
# define functions
def get_expected(
clr: cooler.Cooler, arms: pd.DataFrame, proc: int = 20, ignore_diagonals: int = 2
) -> pd.DataFrame:
"""Takes a clr file handle and a pandas dataframe
with chromosomal arms (generated by getArmsHg19()) and calculates
the expected read number at a certain genomic distance.
The proc parameters defines how many processes should be used
to do the calculations. ingore_diags specifies how many diagonals
to ignore (0 mains the main diagonal, 1 means the main diagonal
and the flanking tow diagonals and so on)"""
with multiprocess.Pool(proc) as pool:
expected = cooltools.expected.diagsum(
clr,
tuple(arms.itertuples(index=False, name=None)),
transforms={"balanced": lambda p: p["count"] * p["weight1"] * p["weight2"]},
map=pool.map,
ignore_diags=ignore_diagonals,
)
# construct a single dataframe for all regions (arms)
expected_df = (
expected.groupby(["region", "diag"])
.aggregate({"n_valid": "sum", "count.sum": "sum", "balanced.sum": "sum"})
.reset_index()
)
# account for different number of valid bins in diagonals
expected_df["balanced.avg"] = expected_df["balanced.sum"] / expected_df["n_valid"]
return expected_df
def get_arms_hg19() -> pd.DataFrame:
"""Downloads the coordinates for chromosomal arms of the
genome assembly hg19 and returns it as a dataframe."""
# download chromosomal sizes
chromsizes = bioframe.fetch_chromsizes("hg19")
# download centromers
centromeres = bioframe.fetch_centromeres("hg19")
centromeres.set_index("chrom", inplace=True)
centromeres = centromeres.mid
# define chromosomes that are well defined (filter out unassigned contigs)
good_chroms = list(chromsizes.index[:23])
# construct arm regions (for each chromosome fro 0-centromere and from centromere to the end)
arms = [
arm
for chrom in good_chroms
for arm in (
(chrom, 0, centromeres.get(chrom, 0)),
(chrom, centromeres.get(chrom, 0), chromsizes.get(chrom, 0)),
)
]
# construct dataframe out of arms
arms = pd.DataFrame(arms, columns=["chrom", "start", "end"])
return arms
def _assign_supports(features, supports):
"""assigns supports to entries in snipping windows.
Workaround for bug in cooltools 0.2.0 that duplicate
supports are not handled correctly. Copied from cooltools.common.assign_regions"""
index_name = features.index.name # Store the name of index
features = (
features.copy().reset_index()
) # Store the original features' order as a column with original index
if "chrom" in features.columns:
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=["chrom", "start", "end"],
cols2=["chrom", "start", "end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
"chrom_1",
"start_1",
"end_1",
] # To filter out duplicates later
overlap["overlap_length"] = overlap["overlap_end"] - overlap["overlap_start"]
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values("overlap_length", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features["region"] = overlap["name_2"]
if "chrom1" in features.columns:
for idx in ("1", "2"):
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=[f"chrom{idx}", f"start{idx}", f"end{idx}"],
cols2=[f"chrom", f"start", f"end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
f"chrom{idx}_1",
f"start{idx}_1",
f"end{idx}_1",
] # To filter out duplicates later
overlap[f"overlap_length{idx}"] = (
overlap[f"overlap_end{idx}"] - overlap[f"overlap_start{idx}"]
)
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values(f"overlap_length{idx}", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features[f"region{idx}"] = overlap["name_2"]
# Form a single column with region names where region1 == region2, and np.nan in other cases:
features["region"] = np.where(
features["region1"] == features["region2"], features["region1"], np.nan
)
features = features.drop(
["region1", "region2"], axis=1
) # Remove unnecessary columns
features = features.set_index(
index_name if not index_name is None else "index"
) # Restore the original index
features.index.name = index_name # Restore original index title
return features
def assign_regions(
window: int,
binsize: int,
chroms: pd.Series,
positions: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is givin the the chroms series and the positions series."""
# construct windows from the passed chromosomes and positions
snipping_windows = cooltools.snipping.make_bin_aligned_windows(
binsize, chroms.values, positions.values, window
)
# assign chromosomal arm to each position
snipping_windows = _assign_supports(snipping_windows, bioframe.parse_regions(arms))
return snipping_windows
def assign_regions_2d(
window: int,
binsize: int,
chroms1: pd.Series,
positions1: pd.Series,
chroms2: pd.Series,
positions2: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location pairs.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is given by the chroms1 and chroms2 series as well as the
positions1 and positions2 series."""
# construct windows from the passed chromosomes 1 and positions 1
windows1 = assign_regions(window, binsize, chroms1, positions1, arms)
windows1.columns = [str(i) + "1" for i in windows1.columns]
# construct windows from the passed chromosomes 1 and positions 1
windows2 = assign_regions(window, binsize, chroms2, positions2, arms)
windows2.columns = [str(i) + "2" for i in windows2.columns]
windows = pd.concat((windows1, windows2), axis=1)
# concatenate windows
windows = pd.concat((windows1, windows2), axis=1)
# filter for mapping to different regions
windows_final = windows.loc[windows["region1"] == windows["region2"], :]
# subset data and rename regions
windows_small = windows_final[
["chrom1", "start1", "end1", "chrom2", "start2", "end2", "region1"]
]
windows_small.columns = [
"chrom1",
"start1",
"end1",
"chrom2",
"start2",
"end2",
"region",
]
return windows_small
def do_pileup_obs_exp(
clr: cooler.Cooler,
expected_df: pd.DataFrame,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle, an expected dataframe
constructed by getExpected, snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the obs/exp value. Returns a numpy array
that contains averages of all selected regions.
The collapse parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
region_frame = get_regions_from_snipping_windows(expected_df)
oe_snipper = cooltools.snipping.ObsExpSnipper(
clr, expected_df, regions=bioframe.parse_regions(region_frame)
)
# set warnings filter to ignore RuntimeWarnings since cooltools
# does not check whether there are inf or 0 values in
# the expected dataframe
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
with multiprocess.Pool(proc) as pool:
# extract a matrix of obs/exp average values for each snipping_window
oe_pile = cooltools.snipping.pileup(
snipping_windows, oe_snipper.select, oe_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile = np.nanmean(oe_pile[:, :, :], axis=2)
return collapsed_pile
return oe_pile
def do_pileup_iccf(
clr: cooler.Cooler,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle and snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the corrected HiC counts. Returns a numpy array
that contains averages of all selected regions. The collapse
parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
# get regions from snipping windows
region_frame = get_regions_from_snipping_windows(snipping_windows)
iccf_snipper = cooltools.snipping.CoolerSnipper(
clr, regions=bioframe.parse_regions(region_frame)
)
with multiprocess.Pool(proc) as pool:
iccf_pile = cooltools.snipping.pileup(
snipping_windows, iccf_snipper.select, iccf_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile_plus = np.nanmean(iccf_pile[:, :, :], axis=2)
return collapsed_pile_plus
return iccf_pile
def sliding_diamond(
array: np.ndarray, side_len: int = 6, center_x: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
"""Will slide a diamond of side length 'sideLen'
down the diagonal of the passed array and return
the average values for each position and
the relative position of each value with respect
to the center of the array (in Bin units)"""
# initialize accumulators for diamond value and x-position
diamond_accumulator = list()
bin_accumulator = list()
if side_len % 2 == 0:
half_window = side_len
for i in range(0, (array.shape[0] - half_window + 1)):
# extract diamond
diamond_array = array[i : (i + half_window), i : (i + half_window)]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i,
(i + half_window),
)
)
)
else:
half_window = side_len // 2
for i in range(half_window, (array.shape[0] - half_window)):
# extract diamond
diamond_array = array[
i - half_window : (i + half_window) + 1,
i - half_window : (i + half_window) + 1,
]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i - half_window,
(i + half_window) + 1,
)
)
)
if center_x:
x_out = np.array(bin_accumulator - np.median(bin_accumulator))
else:
x_out = np.array(bin_accumulator)
return (x_out, np.array(diamond_accumulator))
def load_pairs(path: str) -> pd.DataFrame:
"""Function to load a .pairs or .pairsam file
into a pandas dataframe.
This only works for relatively small files!"""
# get handels for header and pairs_body
header, pairs_body = pairtools._headerops.get_header(
pairtools._fileio.auto_open(path, "r")
)
# extract column names from header
cols = pairtools._headerops.extract_column_names(header)
# read data into dataframe
frame = pd.read_csv(pairs_body, sep="\t", names=cols)
return frame
def down_sample_pairs(
sample_dict: PairsSamples, distance: int = 10 ** 4
) -> PairsSamples:
"""Will downsample cis and trans reads in sampleDict to contain
as many combined cis and trans reads as the sample with the lowest readnumber of the
specified distance."""
# initialize output dictionary
out_dict = {sample: {} for sample in sample_dict}
for sample in sample_dict.keys():
# create temporary dataframes
cis_temp = sample_dict[sample]["cis"]
cis_temp["rType"] = "cis"
trans_temp = sample_dict[sample]["trans"]
trans_temp["rType"] = "trans"
# concatenate them and store in outdict
out_dict[sample]["all"] = pd.concat((cis_temp, trans_temp))
# filter on distance
out_dict[sample]["all"] = out_dict[sample]["all"].loc[
(out_dict[sample]["all"]["pos2"] - out_dict[sample]["all"]["pos1"])
> distance,
:,
]
# get the minimum number of reads
min_reads = min([len(i["all"]) for i in out_dict.values()])
# do the downsampling and split into cis and trans
for sample in out_dict.keys():
out_dict[sample]["all"] = out_dict[sample]["all"].sample(n=min_reads)
out_dict[sample]["cis"] = out_dict[sample]["all"].loc[
out_dict[sample]["all"]["rType"] == "cis", :
]
out_dict[sample]["trans"] = out_dict[sample]["all"].loc[
out_dict[sample]["all"]["rType"] == "trans", :
]
# get rid of all reads
out_dict[sample].pop("all")
return out_dict
def pile_to_frame(pile: np.ndarray) -> pd.DataFrame:
"""Takes a pile of pileup windows produced
by doPileupsObsExp/doPileupsICCF (with collapse set to False;
this is numpy ndarray with the following dimensions:
pile.shape = [windowSize, windowSize, windowNumber])
and arranges them as a dataframe with the pixels of the
pile flattened into columns and each individual window
being a row.
Window1: | Pixel 1 | Pixel 2 | Pixel3| ...
Window2: | Pixel 1 | Pixel 2 | Pixel3| ...
Window3: | Pixel 1 | Pixel 2 | Pixel3| ...
"""
return pd.DataFrame(
pile.flatten().reshape(pile.shape[0] ** 2, pile.shape[2])
).transpose()
def get_diag_indices(arr: np.ndarray) -> list:
"""Helper function that returns the indices of the diagonal
of a given array into a flattened representation of the array.
For example, the 3 by 3 array:
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
would have diagonal indices [0, 4, 8].
"""
assert arr.shape[0] == arr.shape[1], "Please supply a square array!"
shape = arr.shape[0]
return [
i + index for index, i in enumerate(range(0, shape ** 2 - shape + 1, shape))
]
def get_pairing_score(
clr: cooler.Cooler,
windowsize: int = 4 * 10 ** 4,
func: Callable = np.mean,
regions: pd.DataFrame = pd.DataFrame(),
norm: bool = True,
blank_diag: bool = True,
arms: pd.DataFrame = pd.DataFrame(),
) -> pd.DataFrame:
"""Takes a cooler file (clr),
a windowsize (windowsize), a summary
function (func) and a set of genomic
regions to calculate the pairing score
as follows: A square with side-length windowsize
is created for each of the entries in the supplied genomics
regions and the summary function applied to the Hi-C pixels
at the location in the supplied cooler file. The results are
returned as a dataframe. If no regions are supplied, regions
are constructed for each bin in the cooler file to
construct a genome-wide pairing score. Norm refers to whether the median of the
calculated pairing score should be subtracted from the supplied values and blankDiag
refers to whether the diagonal should be blanked before calculating pairing score."""
# Check whether genomic regions were supplied
if len(regions) == 0:
# If no regions are supplied, pregenerate all bins; drop bins with nan weights
regions = clr.bins()[:].dropna()
# find midpoint of each bin to assign windows to each midpoint
regions.loc[:, "mid"] = (regions["start"] + regions["end"]) // 2
# check that norm is only set if genomewide pairingScore is calculated
elif norm:
raise ValueError("Norm flag can only be set with genomeWide pairingScore!")
# drop nan rows from regions
regions = regions.dropna()
# fix indices
regions.index = range(len(regions))
regions.loc[:, "binID"] = range(len(regions))
# Chromosomal arms are needed so each process only extracts a subset from the file
if len(arms) == 0:
arms = get_arms_hg19()
# extract all windows
windows = assign_regions(
windowsize, clr.binsize, regions["chrom"], regions["mid"], arms
)
# add binID to later merge piles
windows.loc[:, "binID"] = regions["binID"]
windows = windows.dropna()
# generate pileup
pile = do_pileup_iccf(clr, windows, collapse=False)
# convert to dataframe
pile_frame = pile_to_frame(pile)
if blank_diag:
dummy_array = np.arange(pile[:, :, 0].shape[0] ** 2).reshape(
pile[:, :, 0].shape[0], pile[:, :, 0].shape[0]
)
indices = get_diag_indices(dummy_array)
pile_frame.iloc[:, indices] = np.nan
# apply function to each row (row = individual window)
summarized = pile_frame.apply(func, axis=1)
# subset regions with regions that were assigned windows
output = pd.merge(regions, windows, on="binID", suffixes=("", "_w")).dropna()
# add results
output.loc[:, "PairingScore"] = summarized
# normalize by median
if norm:
output.loc[:, "PairingScore"] = output["PairingScore"] - np.median(
output.dropna()["PairingScore"]
)
return output[["chrom", "start", "end", "PairingScore"]]
def get_pairing_score_obs_exp(
clr: cooler.Cooler,
expected: pd.DataFrame,
windowsize: int = 4 * 10 ** 4,
func: Callable = np.mean,
regions: pd.DataFrame = pd.DataFrame(),
norm: bool = True,
arms: pd.DataFrame = pd.DataFrame(),
) -> pd.DataFrame:
"""Takes a cooler file (clr), an expected dataframe (expected; maybe generated by getExpected),
a windowsize (windowsize), a summary
function (func) and a set of genomic
regions to calculate the pairing score
as follows: A square with side-length windowsize
is created for each of the entries in the supplied genomics
regions and the summary function applied to the Hi-C pixels (obs/exp values)
at the location in the supplied cooler file. The results are
returned as a dataframe. If no regions are supplied, regions
are constructed for each bin in the cooler file to
construct a genome-wide pairing score."""
# Check whether genomic regions were supplied
if len(regions) == 0:
# If no regions are supplied, pregenerate all bins; drop bins with nan weights
regions = clr.bins()[:].dropna()
# find midpoint of each bin to assign windows to each midpoint
regions.loc[:, "mid"] = (regions["start"] + regions["end"]) // 2
# check that norm is only set if genomewide pairingScore is calculated
elif norm:
raise ValueError("Norm flag can only be set with genomeWide pairingScore!")
# drop nan rows from regions
regions = regions.dropna()
# fix indices
regions.index = range(len(regions))
regions.loc[:, "binID"] = range(len(regions))
# Chromosomal arms are needed so each process only extracts a subset from the file
if len(arms) == 0:
arms = get_arms_hg19()
# extract all windows
windows = assign_regions(
windowsize, clr.binsize, regions["chrom"], regions["mid"], arms
)
# add binID to later merge piles
windows.loc[:, "binID"] = regions["binID"]
windows = windows.dropna()
# generate pileup
pile = do_pileup_obs_exp(clr, expected, windows, collapse=False)
# convert to dataframe
pile_frame = pile_to_frame(pile)
# replace inf with nan
pile_frame = pile_frame.replace([np.inf, -np.inf], np.nan)
# apply function to each row (row = individual window)
summarized = pile_frame.apply(func, axis=1)
# subset regions with regions that were assigned windows
output = pd.merge(regions, windows, on="binID", suffixes=("", "_w")).dropna()
# add results
output.loc[:, "PairingScore"] = summarized
# normalize by median
if norm:
output.loc[:, "PairingScore"] = output["PairingScore"] - np.median(
output.dropna()["PairingScore"]
)
return output[["chrom", "start", "end", "PairingScore"]]
def extract_windows_different_sizes_iccf(regions, arms, cooler_file, processes=2):
"""For extraction of a collection of regions that span genomic regions .
regions -> data_frame with chrom, start, end (start, end in genomic coordinates)
cooler -> opened cooler file
arms -> chromosomal arms
"""
# assign arms to regions
snipping_windows = _assign_supports(regions, bioframe.parse_regions(arms)).dropna()
iccf_snipper = cooltools.snipping.CoolerSnipper(
cooler_file, regions=bioframe.parse_regions(arms)
)
with multiprocess.Pool(processes) as pool:
result = flexible_pileup(
snipping_windows, iccf_snipper.select, iccf_snipper.snip, mapper=pool.map
)
return result
def extract_windows_different_sizes_obs_exp(
regions, arms, cooler_file, expected_df, processes=2
):
"""For extraction of a collection of regions that span genomic regions .
regions -> data_frame with chrom, start, end (start, end in genomic coordinates)
cooler -> opened cooler file
arms -> chromosomal arms
"""
# assign arms to regions
snipping_windows = _assign_supports(regions, bioframe.parse_regions(arms)).dropna()
oe_snipper = cooltools.snipping.ObsExpSnipper(
cooler_file, expected_df, regions=bioframe.parse_regions(arms)
)
with multiprocess.Pool(processes) as pool:
result = flexible_pileup(
snipping_windows, oe_snipper.select, oe_snipper.snip, mapper=pool.map
)
return result
def get_regions_from_snipping_windows(snipping_windows):
"""Gets regions for use in CoolerSnipper class from snipping_windows"""
return (
snipping_windows.loc[:, ["region"]]
.drop_duplicates()
.apply(
lambda x: bioframe.region.parse_region(x["region"]),
axis=1,
result_type="expand",
)
.rename(columns={0: "chrom", 1: "start", 2: "end"})
)
|
the-stack_106_21971
|
# Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
import torch.nn.functional as F
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
from ofa.utils import get_same_padding, sub_filter_start_end, make_divisible, SEModule, MyNetwork, MyConv2d
__all__ = ['DynamicSeparableConv2d', 'DynamicConv2d', 'DynamicGroupConv2d',
'DynamicBatchNorm2d', 'DynamicGroupNorm', 'DynamicSE', 'DynamicLinear']
class DynamicSeparableConv2d(nn.Module):
KERNEL_TRANSFORM_MODE = 1 # None or 1
def __init__(self, max_in_channels, kernel_size_list, stride=1, dilation=1):
super(DynamicSeparableConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.kernel_size_list = kernel_size_list
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_in_channels, max(
self.kernel_size_list), self.stride,
groups=self.max_in_channels, bias=False,
)
self._ks_set = list(set(self.kernel_size_list))
self._ks_set.sort() # e.g., [3, 5, 7]
if self.KERNEL_TRANSFORM_MODE is not None:
# register scaling parameters
# 7to5_matrix, 5to3_matrix
scale_params = {}
for i in range(len(self._ks_set) - 1):
ks_small = self._ks_set[i]
ks_larger = self._ks_set[i + 1]
param_name = '%dto%d' % (ks_larger, ks_small)
# noinspection PyArgumentList
scale_params['%s_matrix' % param_name] = Parameter(
torch.eye(ks_small ** 2))
for name, param in scale_params.items():
self.register_parameter(name, param)
self.active_kernel_size = max(self.kernel_size_list)
def get_active_filter(self, in_channel, kernel_size):
out_channel = in_channel
max_kernel_size = max(self.kernel_size_list)
start, end = sub_filter_start_end(max_kernel_size, kernel_size)
filters = self.conv.weight[:out_channel,
:in_channel, start:end, start:end]
if self.KERNEL_TRANSFORM_MODE is not None and kernel_size < max_kernel_size:
# start with max kernel
start_filter = self.conv.weight[:out_channel, :in_channel, :, :]
for i in range(len(self._ks_set) - 1, 0, -1):
src_ks = self._ks_set[i]
if src_ks <= kernel_size:
break
target_ks = self._ks_set[i - 1]
start, end = sub_filter_start_end(src_ks, target_ks)
_input_filter = start_filter[:, :, start:end, start:end]
_input_filter = _input_filter.contiguous()
_input_filter = _input_filter.view(
_input_filter.size(0), _input_filter.size(1), -1)
_input_filter = _input_filter.view(-1, _input_filter.size(2))
_input_filter = F.linear(
_input_filter, self.__getattr__(
'%dto%d_matrix' % (src_ks, target_ks)),
)
_input_filter = _input_filter.view(
filters.size(0), filters.size(1), target_ks ** 2)
_input_filter = _input_filter.view(filters.size(
0), filters.size(1), target_ks, target_ks)
start_filter = _input_filter
filters = start_filter
return filters
def forward(self, x, kernel_size=None):
if kernel_size is None:
kernel_size = self.active_kernel_size
in_channel = x.size(1)
filters = self.get_active_filter(in_channel, kernel_size).contiguous()
padding = get_same_padding(kernel_size)
filters = self.conv.weight_standardization(
filters) if isinstance(self.conv, MyConv2d) else filters
y = F.conv2d(
x, filters, None, self.stride, padding, self.dilation, in_channel
)
return y
class DynamicConv2d(nn.Module):
def __init__(self, max_in_channels, max_out_channels, kernel_size=1, stride=1, dilation=1):
super(DynamicConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.max_out_channels = max_out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_out_channels, self.kernel_size, stride=self.stride, bias=False,
)
self.active_out_channel = self.max_out_channels
def get_active_filter(self, out_channel, in_channel):
return self.conv.weight[:out_channel, :in_channel, :, :]
def forward(self, x, out_channel=None):
if out_channel is None:
out_channel = self.active_out_channel
in_channel = x.size(1)
filters = self.get_active_filter(out_channel, in_channel).contiguous()
padding = get_same_padding(self.kernel_size)
filters = self.conv.weight_standardization(
filters) if isinstance(self.conv, MyConv2d) else filters
y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
return y
class DynamicGroupConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size_list, groups_list, stride=1, dilation=1):
super(DynamicGroupConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size_list = kernel_size_list
self.groups_list = groups_list
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.in_channels, self.out_channels, max(
self.kernel_size_list), self.stride,
groups=min(self.groups_list), bias=False,
)
self.active_kernel_size = max(self.kernel_size_list)
self.active_groups = min(self.groups_list)
def get_active_filter(self, kernel_size, groups):
start, end = sub_filter_start_end(
max(self.kernel_size_list), kernel_size)
filters = self.conv.weight[:, :, start:end, start:end]
sub_filters = torch.chunk(filters, groups, dim=0)
sub_in_channels = self.in_channels // groups
sub_ratio = filters.size(1) // sub_in_channels
filter_crops = []
for i, sub_filter in enumerate(sub_filters):
part_id = i % sub_ratio
start = part_id * sub_in_channels
filter_crops.append(
sub_filter[:, start:start + sub_in_channels, :, :])
filters = torch.cat(filter_crops, dim=0)
return filters
def forward(self, x, kernel_size=None, groups=None):
if kernel_size is None:
kernel_size = self.active_kernel_size
if groups is None:
groups = self.active_groups
filters = self.get_active_filter(kernel_size, groups).contiguous()
padding = get_same_padding(kernel_size)
filters = self.conv.weight_standardization(
filters) if isinstance(self.conv, MyConv2d) else filters
y = F.conv2d(
x, filters, None, self.stride, padding, self.dilation, groups,
)
return y
class DynamicBatchNorm2d(nn.Module):
SET_RUNNING_STATISTICS = False
def __init__(self, max_feature_dim):
super(DynamicBatchNorm2d, self).__init__()
self.max_feature_dim = max_feature_dim
self.bn = nn.BatchNorm2d(self.max_feature_dim)
@staticmethod
def bn_forward(x, bn: nn.BatchNorm2d, feature_dim):
if bn.num_features == feature_dim or DynamicBatchNorm2d.SET_RUNNING_STATISTICS:
return bn(x)
else:
exponential_average_factor = 0.0
if bn.training and bn.track_running_stats:
if bn.num_batches_tracked is not None:
bn.num_batches_tracked += 1
if bn.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / \
float(bn.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = bn.momentum
return F.batch_norm(
x, bn.running_mean[:feature_dim], bn.running_var[:
feature_dim], bn.weight[:feature_dim],
bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
exponential_average_factor, bn.eps,
)
def forward(self, x):
feature_dim = x.size(1)
y = self.bn_forward(x, self.bn, feature_dim)
return y
class DynamicGroupNorm(nn.GroupNorm):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True, channel_per_group=None):
super(DynamicGroupNorm, self).__init__(
num_groups, num_channels, eps, affine)
self.channel_per_group = channel_per_group
def forward(self, x):
n_channels = x.size(1)
n_groups = n_channels // self.channel_per_group
return F.group_norm(x, n_groups, self.weight[:n_channels], self.bias[:n_channels], self.eps)
@property
def bn(self):
return self
class DynamicSE(SEModule):
def __init__(self, max_channel):
super(DynamicSE, self).__init__(max_channel)
def get_active_reduce_weight(self, num_mid, in_channel, groups=None):
if groups is None or groups == 1:
return self.fc.reduce.weight[:num_mid, :in_channel, :, :]
else:
assert in_channel % groups == 0
sub_in_channels = in_channel // groups
sub_filters = torch.chunk(
self.fc.reduce.weight[:num_mid, :, :, :], groups, dim=1)
return torch.cat([
sub_filter[:, :sub_in_channels, :, :] for sub_filter in sub_filters
], dim=1)
def get_active_reduce_bias(self, num_mid):
return self.fc.reduce.bias[:num_mid] if self.fc.reduce.bias is not None else None
def get_active_expand_weight(self, num_mid, in_channel, groups=None):
if groups is None or groups == 1:
return self.fc.expand.weight[:in_channel, :num_mid, :, :]
else:
assert in_channel % groups == 0
sub_in_channels = in_channel // groups
sub_filters = torch.chunk(
self.fc.expand.weight[:, :num_mid, :, :], groups, dim=0)
return torch.cat([
sub_filter[:sub_in_channels, :, :, :] for sub_filter in sub_filters
], dim=0)
def get_active_expand_bias(self, in_channel, groups=None):
if groups is None or groups == 1:
return self.fc.expand.bias[:in_channel] if self.fc.expand.bias is not None else None
else:
assert in_channel % groups == 0
sub_in_channels = in_channel // groups
sub_bias_list = torch.chunk(self.fc.expand.bias, groups, dim=0)
return torch.cat([
sub_bias[:sub_in_channels] for sub_bias in sub_bias_list
], dim=0)
def forward(self, x, groups=None):
in_channel = x.size(1)
num_mid = make_divisible(
in_channel // self.reduction, divisor=MyNetwork.CHANNEL_DIVISIBLE)
y = x.mean(3, keepdim=True).mean(2, keepdim=True)
# reduce
reduce_filter = self.get_active_reduce_weight(
num_mid, in_channel, groups=groups).contiguous()
reduce_bias = self.get_active_reduce_bias(num_mid)
y = F.conv2d(y, reduce_filter, reduce_bias, 1, 0, 1, 1)
# relu
y = self.fc.relu(y)
# expand
expand_filter = self.get_active_expand_weight(
num_mid, in_channel, groups=groups).contiguous()
expand_bias = self.get_active_expand_bias(in_channel, groups=groups)
y = F.conv2d(y, expand_filter, expand_bias, 1, 0, 1, 1)
# hard sigmoid
y = self.fc.h_sigmoid(y)
return x * y
class DynamicLinear(nn.Module):
def __init__(self, max_in_features, max_out_features, bias=True):
super(DynamicLinear, self).__init__()
self.max_in_features = max_in_features
self.max_out_features = max_out_features
self.bias = bias
self.linear = nn.Linear(self.max_in_features,
self.max_out_features, self.bias)
self.active_out_features = self.max_out_features
def get_active_weight(self, out_features, in_features):
return self.linear.weight[:out_features, :in_features]
def get_active_bias(self, out_features):
return self.linear.bias[:out_features] if self.bias else None
def forward(self, x, out_features=None):
if out_features is None:
out_features = self.active_out_features
in_features = x.size(1)
weight = self.get_active_weight(out_features, in_features).contiguous()
bias = self.get_active_bias(out_features)
y = F.linear(x, weight, bias)
return y
|
the-stack_106_21975
|
import datetime
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils.html import strip_tags
from django.utils.text import Truncator
from wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, StreamFieldPanel
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Page, PageManager
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
from bvspca.core.blocks import ContentStreamBlock
from bvspca.core.models_abstract import Attachable, MenuTitleable, MetaTagable, PageDesignMixin, PageTypeMixin
class EventManager(PageManager):
def future(self, limit=None):
events = self.live()\
.filter(Q(start_date__gte=datetime.date.today()) | Q(end_date__gte=datetime.date.today()),)\
.order_by('start_date', 'end_date')
if limit:
return events[:limit]
return events
def previous(self, current_event):
previous = self.live()\
.filter(Q(start_date__gte=datetime.date.today()) | Q(end_date__gte=datetime.date.today()),)\
.filter(start_date__lte=current_event.start_date)\
.exclude(pk=current_event.pk)\
.order_by('start_date', 'end_date').last()
return previous if previous else None
def next(self, current_event):
next = self.live()\
.filter(start_date__gte=current_event.start_date)\
.filter(start_date__gte=datetime.date.today())\
.exclude(pk=current_event.pk)\
.order_by('start_date', 'end_date').first()
return next if next else None
class Event(Page, MetaTagable, Attachable, PageTypeMixin):
details = RichTextField(verbose_name='details')
start_date = models.DateField(verbose_name='start date')
end_date = models.DateField(verbose_name='end date', blank=True, null=True)
main_photo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
contact_name = models.CharField(max_length=100, blank=True)
contact_email = models.EmailField(max_length=100, blank=True)
contact_phone = models.CharField(max_length=15, blank=True)
website = models.URLField(max_length=200, blank=True)
extra_details = StreamField(ContentStreamBlock(required=False), verbose_name="Extra Content", blank=True)
objects = EventManager()
search_fields = Page.search_fields + [
index.SearchField('details'),
]
subpage_types = []
content_panels = Page.content_panels + [
ImageChooserPanel('main_photo'),
FieldRowPanel([
FieldPanel('start_date'),
FieldPanel('end_date'),
]),
FieldPanel('details'),
FieldPanel('contact_name'),
FieldPanel('contact_email'),
FieldPanel('contact_phone'),
FieldPanel('website'),
StreamFieldPanel('extra_details'),
StreamFieldPanel('attachments'),
]
def save(self, *args, **kwargs):
if not self.end_date or self.start_date > self.end_date:
self.end_date = self.start_date
super(Event, self).save(*args, **kwargs)
def formatted_date(self):
# todo: move to a template tag??
if self.start_date == self.end_date:
return self.start_date.strftime('%a, %d %b %Y')
return self.start_date.strftime('%a, %d %b ') + '–' + self.end_date.strftime(' %a, %d %b %Y')
def seo_and_social_meta_values(self):
data = super().seo_and_social_meta_values()
if self.main_photo:
data['photo'] = self.main_photo
data['social_title'] = self.title
data['social_description'] = Truncator(strip_tags(self.details)).chars(200).replace("[\r|\n].", "")
data['site_name'] = settings.WAGTAIL_SITE_NAME
data['page_url'] = self.full_url
return data
def get_context(self, request, *args, **kwargs):
context = super(Event, self).get_context(request, args, kwargs)
context['previous'] = Event.objects.previous(self)
context['next'] = Event.objects.next(self)
return context
class Meta:
indexes = [
models.Index(fields=['start_date']),
models.Index(fields=['end_date']),
models.Index(fields=['start_date', 'end_date']),
]
def __str__(self):
return self.title
class EventsPage(Page, MenuTitleable, PageDesignMixin):
parent_page_types = []
content_panels = [
FieldPanel('title'),
] + PageDesignMixin.content_panels
promote_panels = Page.promote_panels + [FieldPanel('menu_title')]
def get_context(self, request, *args, **kwargs):
context = super(EventsPage, self).get_context(request, args, kwargs)
context['events'] = Event.objects.future()
return context
def __str__(self):
return self.title
Event.parent_page_types = [EventsPage]
EventsPage.subpage_types = [Event]
|
the-stack_106_21977
|
import sys
import subprocess
from Tkinter import Tk, Frame, Button, LEFT, FLAT
import logging
def set_config(logger, logdir=""):
if logdir != "":
handler = logging.FileHandler(logdir)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
logger = logging.getLogger(__name__)
logger = set_config(logger,'brightness.log')
BINS = 10
BIN_WIDTH = 1
close_after = 700
change = ""
monitor = ""
def get_monitor():
global monitor
comm ='''xrandr --prop | grep " connected"'''
result = subprocess.check_output(comm, shell=True)
logger.debug(result)
# print("get_monitor> ")
# print(result)
monitor_name = result.strip().split('connected')[0].strip()
monitor = monitor_name
print("detected monitor: "+monitor_name)
logger.debug('monitor: '+str(monitor))
return monitor_name
def change_brightness():
t = change.strip()
try:
ch = float(t)
except:
return -1
curr_b = get_curr_brightness()
if curr_b == -1:
err = "Error changing the brightness"
print(err)
logger.debug(err)
return -1
new_b = ch + curr_b
comm = '''xrandr --output %s --brightness %f''' % (monitor,new_b)
result = subprocess.check_output(comm, shell=True)
# print("result: ")
# print(result)
print("New brightness: "+str(new_b))
logger.debug("New brightness: "+str(new_b))
return new_b
def get_new_brightness(scale=20):
new_b = change_brightness()
if new_b == -1:
return -1
else:
return int(new_b * scale)
def get_curr_brightness():
comm = '''xrandr --prop --verbose | grep -A10 " connected" | grep "Brightness" '''
result = subprocess.check_output(comm, shell=True)
logger.debug(result)
#print("result: ")
#print(result)
b_num = result.split(':')[1].strip()
try:
b_num_f = float(b_num)
msg = "current brightness: "+str(b_num_f)
print(msg)
logger.debug(msg)
return b_num_f
except:
err = "Error detecting the current brightness"
print(err)
logger.debug(err)
return -1
#
#
#def get_brightness(scale=20):
# comm = '''xrandr --prop --verbose | grep -A10 " connected" | grep "Brightness" '''
# result = subprocess.check_output(comm, shell=True)
# print("result: ")
# print(result)
# b_num = result.split(':')[1].strip()
# a = float(b_num) * scale
# try:
# return int(round(a))
# except:
# return -1
#
def main():
root = Tk()
#root.attributes('-alpha', 0.0) #For icon
#root.iconify()
#window = tk.Toplevel(root)
#window.overrideredirect(1) #Remove border
frame = Frame(root)
frame.pack()
br = get_new_brightness(scale=BINS) # how many boxes/bins
if br==-1:
br=0
active_bins = min(br, BINS)
for i in range(active_bins):
b = Button(frame, bg="blue", relief=FLAT, width=BIN_WIDTH)
b.pack(side=LEFT)
for i in range(BINS-br):
b = Button(frame, bg="grey", width=BIN_WIDTH)
b.pack(side=LEFT)
for i in range(br-BINS):
b = Button(frame, bg="red", width=BIN_WIDTH)
b.pack(side=LEFT)
root.after(close_after, lambda: root.destroy())
root.mainloop()
if __name__ == '__main__':
if len(sys.argv)==3:
monitor = sys.argv[2]
change = sys.argv[1]
print(change)
main()
elif len(sys.argv)==2:
get_monitor()
change = sys.argv[1]
main()
print("""The application tried to detect the monitor. Pass it as a second argument if it was wrongly detected""")
else:
get_monitor()
change="0"
main()
print("""The application tries to detect the monitor but without any change in the brightness regardless of the button you click expects the args <monitor name> and <+/-percentags>. e.g., python brightnesstk.py LVDS-0 0.2""")
|
the-stack_106_21978
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import json
import math
import os
import sys
from enum import Enum
from typing import List
import numpy as np
import torch
import torch.nn as nn
from pytext.common.constants import Stage
from pytext.config import ConfigBase
from pytext.config.component import Component, ComponentType
from pytext.models.crf import CRF
from pytext.models.model import Model
from pytext.utils import timing
from pytext.utils.file_io import PathManager
class State(Enum):
ANALYSIS = "Analysis"
OTHERS = "Others"
class Sparsifier(Component):
__COMPONENT_TYPE__ = ComponentType.SPARSIFIER
__EXPANSIBLE__ = True
class Config(ConfigBase):
pass
def sparsify(self, *args, **kwargs):
pass
def sparsification_condition(self, *args, **kwargs):
pass
def get_sparsifiable_params(self, *args, **kwargs):
pass
def initialize(self, *args, **kwargs):
pass
def get_current_sparsity(self, model: Model) -> float:
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
nonzero_params = sum(
p.nonzero().size(0) for p in model.parameters() if p.requires_grad
)
return (trainable_params - nonzero_params) / trainable_params
class L0_projection_sparsifier(Sparsifier):
"""
L0 projection-based (unstructured) sparsification
Args:
weights (torch.Tensor): input weight matrix
sparsity (float32): the desired sparsity [0-1]
"""
class Config(Sparsifier.Config):
sparsity: float = 0.9
starting_epoch: int = 2
frequency: int = 1
layerwise_pruning: bool = True
accumulate_mask: bool = False
def __init__(
self,
sparsity,
starting_epoch,
frequency,
layerwise_pruning=True,
accumulate_mask=False,
):
assert 0 <= sparsity <= 1
self.sparsity = sparsity
assert starting_epoch >= 1
self.starting_epoch = starting_epoch
assert frequency >= 1
self.frequency = frequency
self.layerwise_pruning = layerwise_pruning
self.accumulate_mask = accumulate_mask
self._masks = None
@classmethod
def from_config(cls, config: Config):
return cls(
config.sparsity,
config.starting_epoch,
config.frequency,
config.layerwise_pruning,
config.accumulate_mask,
)
def sparsification_condition(self, state):
return (
state.stage == Stage.TRAIN
and state.epoch >= self.starting_epoch
and state.step_counter % self.frequency == 0
)
def sparsify(self, state):
"""
obtain a mask and apply the mask to sparsify
"""
model = state.model
# compute new mask when conditions are True
if self.sparsification_condition(state):
masks = self.get_masks(model)
# applied the computed mask, self.accumulate_mask handled separately
if not self.accumulate_mask:
self.apply_masks(model, masks)
# if self.accumulate_mask is True, apply the existent mask irregardless Stage
if self.accumulate_mask and self._masks is not None:
self.apply_masks(model, self._masks)
def get_sparsifiable_params(self, model: Model):
sparsifiable_params = [p for p in model.parameters() if p.requires_grad]
return sparsifiable_params
def apply_masks(self, model: Model, masks: List[torch.Tensor]):
"""
apply given masks to zero-out learnable weights in model
"""
learnableparams = self.get_sparsifiable_params(model)
assert len(learnableparams) == len(masks)
for m, w in zip(masks, learnableparams):
if len(m.size()):
assert m.size() == w.size()
w.data *= m.clone()
# if accumulate_mask, remove a param permanently by also removing
# its gradient
if self.accumulate_mask:
w.grad.data *= m.clone()
def get_masks(
self, model: Model, pre_masks: List[torch.Tensor] = None
) -> List[torch.Tensor]:
"""
Note: this function returns the masks only but do not sparsify or modify the
weights
prune x% of weights among the weights with "1" in pre_masks
Args:
model: Model
pre_masks: list of FloatTensors where "1" means retained the weight and
"0" means pruned the weight
Return:
masks: List[torch.Tensor], intersection of new masks and pre_masks, so
that "1" only if the weight is selected after new masking and pre_mask
"""
learnableparams = self.get_sparsifiable_params(model)
if pre_masks:
self._masks = pre_masks
if self._masks is None:
# retain everything if no pre_masks given
self._masks = [torch.ones_like(p) for p in learnableparams]
assert len(learnableparams) == len(self._masks)
for m, w in zip(self._masks, learnableparams):
if len(m.size()):
assert m.size() == w.size()
if self.layerwise_pruning:
masks = []
for m, param in zip(self._masks, learnableparams):
weights_abs = torch.abs(param.data).to(param.device)
# absolute value of weights selected from existent masks
weights_abs_masked_flat = torch.flatten(weights_abs[m.bool()])
total_size = weights_abs_masked_flat.numel()
if total_size > 0:
# using ceil instead of floor() or int()
# because at least one element in the tensor required to be selected
max_num_nonzeros = math.ceil(total_size * (1 - self.sparsity))
# only pruned among the weights slected from existent masks
topkval = (
torch.topk(weights_abs_masked_flat, max_num_nonzeros)
.values.min()
.item()
)
# intersection of the new mask and pre_mexistent masks,
# mask == 1 retain, mask == 0 pruned,
mask = (weights_abs >= topkval).float() * m
else:
mask = param.new_empty(())
masks.append(mask)
else:
# concatenated flatten tensor of learnableparams that have _masks as True
learnableparams_masked_flat = torch.cat(
[
torch.flatten(p[m.bool()])
for m, p in zip(self._masks, learnableparams)
],
dim=0,
)
# using ceil instead of floor() or int() because at least one element
# in the tensor required to be selected
max_num_nonzeros = math.ceil(
learnableparams_masked_flat.numel() * (1 - self.sparsity)
)
# select globally the top-k th weight among weights selected from _masks
topkval = (
torch.topk(torch.abs(learnableparams_masked_flat), max_num_nonzeros)
.values.min()
.item()
)
# intersection of the new mask and _masks,
# mask == 1 retain, mask == 0 pruned,
masks = [
(torch.abs(p.data) >= topkval).float() * m
if p.numel() > 0
else p.new_empty(())
for m, p in zip(self._masks, learnableparams)
]
if self.accumulate_mask:
self._masks = masks
return masks
class CRF_SparsifierBase(Sparsifier):
class Config(Sparsifier.Config):
starting_epoch: int = 1
frequency: int = 1
def sparsification_condition(self, state):
if state.stage == Stage.TRAIN:
return False
return (
state.epoch >= self.starting_epoch
and state.step_counter % self.frequency == 0
)
def get_sparsifiable_params(self, model: nn.Module):
for m in model.modules():
if isinstance(m, CRF):
return m.transitions.data
def get_transition_sparsity(self, transition):
nonzero_params = transition.nonzero().size(0)
return (transition.numel() - nonzero_params) / transition.numel()
class CRF_L1_SoftThresholding(CRF_SparsifierBase):
"""
implement l1 regularization:
min Loss(x, y, CRFparams) + lambda_l1 * ||CRFparams||_1
and solve the optimiation problem via (stochastic) proximal gradient-based
method i.e., soft-thresholding
param_updated = sign(CRFparams) * max ( abs(CRFparams) - lambda_l1, 0)
"""
class Config(CRF_SparsifierBase.Config):
lambda_l1: float = 0.001
def __init__(self, lambda_l1: float, starting_epoch: int, frequency: int):
self.lambda_l1 = lambda_l1
assert starting_epoch >= 1
self.starting_epoch = starting_epoch
assert frequency >= 1
self.frequency = frequency
@classmethod
def from_config(cls, config: Config):
return cls(config.lambda_l1, config.starting_epoch, config.frequency)
def sparsify(self, state):
if not self.sparsification_condition(state):
return
model = state.model
transition_matrix = self.get_sparsifiable_params(model)
transition_matrix_abs = torch.abs(transition_matrix)
assert (
len(state.optimizer.param_groups) == 1
), "different learning rates for multiple param groups not supported"
lrs = state.optimizer.param_groups[0]["lr"]
threshold = self.lambda_l1 * lrs
transition_matrix = torch.sign(transition_matrix) * torch.max(
(transition_matrix_abs - threshold),
transition_matrix.new_zeros(transition_matrix.shape),
)
current_sparsity = self.get_transition_sparsity(transition_matrix)
print(f"sparsity of CRF transition matrix: {current_sparsity}")
class CRF_MagnitudeThresholding(CRF_SparsifierBase):
"""
magnitude-based (equivalent to projection onto l0 constraint set) sparsification
on CRF transition matrix. Preserveing the top-k elements either rowwise or
columnwise until sparsity constraint is met.
"""
class Config(CRF_SparsifierBase.Config):
sparsity: float = 0.9
grouping: str = "row"
def __init__(self, sparsity, starting_epoch, frequency, grouping):
assert 0 <= sparsity <= 1
self.sparsity = sparsity
assert starting_epoch >= 1
self.starting_epoch = starting_epoch
assert frequency >= 1
self.frequency = frequency
assert (
grouping == "row" or grouping == "column"
), "grouping needs to be row or column"
self.grouping = grouping
@classmethod
def from_config(cls, config: Config):
return cls(
config.sparsity, config.starting_epoch, config.frequency, config.grouping
)
def sparsify(self, state):
if not self.sparsification_condition(state):
return
model = state.model
transition_matrix = self.get_sparsifiable_params(model)
num_rows, num_cols = transition_matrix.shape
trans_abs = torch.abs(transition_matrix)
if self.grouping == "row":
max_num_nonzeros = math.ceil(num_cols * (1 - self.sparsity))
topkvals = (
torch.topk(trans_abs, k=max_num_nonzeros, dim=1)
.values.min(dim=1, keepdim=True)
.values
)
else:
max_num_nonzeros = math.ceil(num_rows * (1 - self.sparsity))
topkvals = (
torch.topk(trans_abs, k=max_num_nonzeros, dim=0)
.values.min(dim=0, keepdim=True)
.values
)
# trans_abs < topkvals is a broadcasted comparison
transition_matrix[trans_abs < topkvals] = 0.0
current_sparsity = self.get_transition_sparsity(transition_matrix)
print(f"sparsity of CRF transition matrix: {current_sparsity}")
class SensitivityAnalysisSparsifier(Sparsifier):
class Config(Sparsifier.Config):
pre_train_model_path: str = ""
analyzed_sparsity: float = 0.8
# we don't use all eval data for analysis, only use a portion of the data.
max_analysis_batches: int = 0
# allow the user to skip pruning for some weight. Here we set the max
# number of weight tensor can be skipped for pruning.
max_skipped_weight: int = 0
# if we already did sensitivity analysis before
pre_analysis_path: str = ""
sparsity: float = 0.8
def __init__(
self,
pre_train_model_path,
analyzed_sparsity,
max_analysis_batches,
max_skipped_weight,
pre_analysis_path,
sparsity,
):
assert PathManager.exists(
pre_train_model_path
), "The pre-trained model must be exist"
self.pre_train_model_path = pre_train_model_path
self.param_dict = None
assert (
0.0 <= analyzed_sparsity <= 1.0
), "Analyzed sparsity need to be in the range of [0, 1]"
self.analyzed_sparsity = analyzed_sparsity
self.max_analysis_batches = max_analysis_batches
self.max_skipped_weight = max_skipped_weight
self.require_mask_parameters = []
self.pre_analysis_path = pre_analysis_path
assert (
0.0 <= sparsity <= 1.0
), "Pruning sparsity need to be in the range of [0, 1]"
self.sparsity = sparsity
self._masks = None
self.analysis_state = State.OTHERS
@classmethod
def from_config(cls, config: Config):
return cls(
config.pre_train_model_path,
config.analyzed_sparsity,
config.max_analysis_batches,
config.max_skipped_weight,
config.pre_analysis_path,
config.sparsity,
)
def get_sparsifiable_params(self, model):
param_dict = {}
for module_name, m in model.named_modules():
# Search the name of all module_name in named_modules
# only test the parameters in nn.Linear
if isinstance(m, nn.Linear):
# module_name: module.xxx
# param_name: module.xxx.weight
# we only check weight tensor
param_name = module_name + ".weight"
param_dict[param_name] = m.weight
return param_dict
def get_mask_for_param(self, param, sparsity):
"""
generate the prune mask for one weight tensor.
"""
n = int(sparsity * param.nelement())
if n > 0:
# If n > 0, we need to remove n parameters, the threshold
# equals to the n-th largest parameters.x
threshold = float(param.abs().flatten().kthvalue(n - 1)[0])
else:
# If n == 0, it means all parameters need to be kept.
# Because the absolute parameter value >= 0, setting
# threshold to -1 ensures param.abs().ge(threshold)
# is True for all the parameters.
threshold = -1.0
# reverse_mask indiciates the weights that need to be kept
mask = param.abs().ge(threshold).float()
return mask
def layer_wise_analysis(
self, param_name, param_dict, trainer, state, eval_data, metric_reporter
):
# perform pruning for the target param with param_name
if param_name is None:
prunable_param_shape = None
else:
prunable_param = param_dict[param_name]
# include the shape information for better analysis
prunable_param_shape = list(prunable_param.shape)
mask = self.get_mask_for_param(prunable_param, self.analyzed_sparsity)
with torch.no_grad():
param_dict[param_name].data.mul_(mask)
# get the eval_metric for the pruned model
with torch.no_grad():
# set the number of batches of eval data for analysis
analysis_data = eval_data
if self.max_analysis_batches > 0:
analysis_data = itertools.islice(eval_data, self.max_analysis_batches)
eval_metric = trainer.run_epoch(state, analysis_data, metric_reporter)
current_metric = metric_reporter.get_model_select_metric(eval_metric)
if metric_reporter.lower_is_better:
current_metric = -current_metric
return current_metric, prunable_param_shape
def find_params_to_prune(self, metric_dict, max_skip_weight_num):
require_mask_parameters = sorted(
metric_dict.keys(), reverse=True, key=lambda param: metric_dict[param]
)
metric_sensitivities_by_param = [
metric_dict[p] for p in require_mask_parameters
]
skipped_weight_num = 0
while skipped_weight_num < max_skip_weight_num:
# calculate the mean and sandard deviation
mean_ = np.mean(metric_sensitivities_by_param[:-skipped_weight_num])
std_ = np.std(metric_sensitivities_by_param[:-skipped_weight_num])
# skip runing of the parameter if the metric disensitivity is
# less than mean_ - 3 * std_, otherwise break.
if (
metric_sensitivities_by_param[-skipped_weight_num - 1]
>= mean_ - 3 * std_
):
break
skipped_weight_num += 1
require_mask_parameters = require_mask_parameters[:-skipped_weight_num]
# return how many weight are skipped during this iteration
return require_mask_parameters, skipped_weight_num
def sensitivity_analysis(
self, trainer, state, eval_data, metric_reporter, train_config
):
"""
Analysis the sensitivity of each weight tensor to the metric.
Prune the weight tensor one by one and evaluate the metric if the
correspond weight tensor is pruned.
Args:
trainer (trainer): batch iterator of training data
state (TrainingState): the state of the current training
eval_data (BatchIterator): batch iterator of evaluation data
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
Returns:
analysis_result: a string of each layer sensitivity to metric.
"""
print("Analyzed_sparsity: {}".format(self.analyzed_sparsity))
print("Evaluation metric_reporter: {}".format(type(metric_reporter).__name__))
output_path = (
os.path.dirname(train_config.task.metric_reporter.output_path)
+ "/sensitivity_analysis_sparsifier.ckp"
)
# param_dict: the dict maps weight tensor to the parameter name
self.param_dict = self.get_sparsifiable_params(state.model)
# load the pretrained model
print("load the pretrained model from: " + self.pre_train_model_path)
self.loaded_model = torch.load(
self.pre_train_model_path, map_location=torch.device("cpu")
)
# set model to evaluation mode
state.stage = Stage.EVAL
state.model.eval(Stage.EVAL)
metric_dict = {}
all_param_list = [None] + list(self.param_dict.keys())
print("All prunable parameters", all_param_list)
# print the sensitivity results for each weight
print("#" * 40)
print("save the analysis result to: ", output_path)
print("Pruning Sensitivity Test: param / shape / eval metric")
# iterate through all_param_list to test pruning snesitivity
for param_name in all_param_list:
print("=" * 40)
print("Testing {}".format(param_name))
state.model.load_state_dict(self.loaded_model["model_state"])
current_metric, prunable_param_shape = self.layer_wise_analysis(
param_name, self.param_dict, trainer, state, eval_data, metric_reporter
)
if param_name is None:
baseline_metric = current_metric
metric_dict[param_name] = current_metric - baseline_metric
print("#" * 40)
# remove baseline metric from the analysis results
if None in metric_dict:
del metric_dict[None]
# write the test result into the checkpoint
if state.rank == 0:
with PathManager.open(output_path, "w") as fp:
json.dump(metric_dict, fp)
return metric_dict
def sparsification_condition(self, state):
return state.stage == Stage.TRAIN
def apply_masks(self, model: Model, masks: List[torch.Tensor]):
"""
apply given masks to zero-out learnable weights in model
"""
learnable_params = self.get_required_sparsifiable_params(model)
assert len(learnable_params) == len(masks)
for m, w in zip(masks, learnable_params):
if len(m.size()):
assert m.size() == w.size()
w.data *= m
def get_current_sparsity(self, model: Model) -> float:
trainable_params = sum(
module.weight.data.numel()
for name, module in model.named_modules()
if isinstance(module, nn.Linear)
)
nonzero_params = sum(
module.weight.data.nonzero().size(0)
for name, module in model.named_modules()
if isinstance(module, nn.Linear)
)
return (trainable_params - nonzero_params) / trainable_params
def sparsify(self, state):
"""
apply the mask to sparsify the weight tensor
"""
# do not sparsify the weight tensor during the analysis
if self.analysis_state == State.ANALYSIS:
return
model = state.model
# compute new mask when conditions are True
if self.sparsification_condition(state):
# applied the computed mask to sparsify the weight
self.apply_masks(model, self._masks)
def get_required_sparsifiable_params(self, model: Model):
# param_dict contains all parameters, select requied weights
# if we reload analysis result from file, we need to calculate
# all param_dict again.
if self.param_dict is None:
self.param_dict = self.get_sparsifiable_params(model)
return [self.param_dict[p] for p in self.require_mask_parameters]
def get_masks(self, model: Model) -> List[torch.Tensor]:
"""
Note: this function returns the masks for each weight tensor if
that tensor is required to be pruned
prune x% of weights items among the weights with "1" in mask (self._mask)
indicate the remained weights, with "0" indicate pruned weights
Args:
model: Model
Return:
masks: List[torch.Tensor], the prune mask for the weight of all
layers
"""
learnable_params = self.get_required_sparsifiable_params(model)
masks = []
for param in learnable_params:
mask = self.get_mask_for_param(param, self.sparsity)
masks.append(mask)
return masks
def load_analysis_from_path(self):
assert PathManager.isfile(self.pre_analysis_path), "{} is not a file".format(
self.pre_analysis_path
)
with PathManager.open(self.pre_analysis_path, "r") as fp:
metric_dict = json.load(fp)
return metric_dict
@timing.time("sparsifier initialize")
def initialize(self, trainer, state, eval_data, metric_reporter, train_config):
# if user specify the analysis file, load it from path
if self.pre_analysis_path:
metric_dict = self.load_analysis_from_path()
else:
self.analysis_state = State.ANALYSIS
metric_dict = self.sensitivity_analysis(
trainer, state, eval_data, metric_reporter, train_config
)
# finish the analysis, sparsifier can apply prune mask.
self.analysis_state = State.OTHERS
# skip some of the weight tensors from pruning. The user can
# specify the max_skipped_weight, which limit the max number
# of weight to be skipped.
self.require_mask_parameters, skipped_weight_num = self.find_params_to_prune(
metric_dict, self.max_skipped_weight
)
for p in self.require_mask_parameters:
print(p, " ", metric_dict[p])
print("#" * 40)
sys.stdout.flush()
print(str(skipped_weight_num) + " weight tensors are skipped for pruning")
# initialize and generate the pruning mask. We don't want to generate
# the mask for each step. Otherwise, it will be time inefficient.
self._masks = self.get_masks(state.model)
|
the-stack_106_21980
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <[email protected]>
# Released under the AGPL-3.0 License.
from git import Repo, RemoteReference, TagReference, InvalidGitRepositoryError, GitCommandError
from tempfile import NamedTemporaryFile
import shutil
import os
class Git:
def __init__(self, git_repo, repo_dir, pkey=None):
self.git_repo = git_repo
self.repo_dir = repo_dir
self.repo = None
self.pkey = pkey
self.fd = None
self.env = {}
def archive(self, filepath, commit):
with open(filepath, 'wb') as f:
self.repo.archive(f, commit)
def fetch_branches_tags(self):
self._fetch()
branches, tags = {}, {}
for ref in self.repo.references:
if isinstance(ref, RemoteReference):
if ref.remote_head != 'HEAD':
branches[ref.remote_head] = self._get_commits(f'origin/{ref.remote_head}')
elif isinstance(ref, TagReference):
tags[ref.name] = {
'id': ref.tag.hexsha,
'author': ref.tag.tagger.name,
'date': ref.tag.tagged_date,
'message': ref.tag.message.strip()
} if ref.tag else {
'id': ref.commit.binsha.hex(),
'author': ref.commit.author.name,
'date': ref.commit.authored_date,
'message': ref.commit.message.strip()
}
tags = sorted(tags.items(), key=lambda x: x[1]['date'], reverse=True)
return branches, dict(tags)
def _fetch(self):
try:
self.repo.remotes.origin.fetch(p=True, P=True)
except GitCommandError as e:
if self.env:
self.repo.remotes.origin.fetch(env=self.env, p=True, P=True)
else:
raise e
def _get_repo(self):
if os.path.exists(self.repo_dir):
try:
return Repo(self.repo_dir)
except InvalidGitRepositoryError:
if os.path.isdir(self.repo_dir):
shutil.rmtree(self.repo_dir)
else:
os.remove(self.repo_dir)
try:
repo = Repo.clone_from(self.git_repo, self.repo_dir)
except GitCommandError as e:
if self.env:
repo = Repo.clone_from(self.git_repo, self.repo_dir, env=self.env)
else:
raise e
return repo
def _get_commits(self, branch, count=10):
commits = []
for commit in self.repo.iter_commits(branch):
if len(commits) == count:
break
commits.append({
'id': commit.hexsha,
'author': commit.author.name,
'date': commit.committed_date,
'message': commit.message.strip()
})
return commits
def __enter__(self):
if self.pkey:
self.fd = NamedTemporaryFile()
self.fd.write(self.pkey.encode())
self.fd.flush()
self.env = {'GIT_SSH_COMMAND': f'ssh -i {self.fd.name}'}
self.repo = self._get_repo()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.fd:
self.fd.close()
|
the-stack_106_21981
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import logging
import os
import logging
import traceback
import six
logger = logging.getLogger(__name__)
DATE_FORMATS = {1: "%Y:%m:%d-%H:%M:%S", 2: "%Y/%m/%d %H:%M:%S", 3: "%Y/%m/%d %H:%M:%S"}
def conv_resol(resolution):
return resolution
def conv_datetime(dt, version=2):
"""Converts dt to string like
version 1 = 2014:12:15-00:00:00
version 2 = 2014/12/15 00:00:00
version 3 = 2014/12/15 00:00:00
"""
try:
fmt = DATE_FORMATS[int(version)]
return dt.strftime(fmt)
except (ValueError, TypeError):
logger.warning("conv_datetime returns %s" % dt)
return dt
def conv_to_ms(td):
"""Converts td to integer number of milliseconds"""
try:
if isinstance(td, six.integer_types):
return td
else:
return int(td.total_seconds() * 1000.0)
except ValueError:
logger.error(traceback.format_exc())
logger.warning("conv_to_ms returns '%s'" % td)
return td
def remove(cache):
"""Remove cache"""
try:
filename = "%s.sqlite" % cache
print("remove %s" % filename)
os.remove(filename)
except Exception:
pass
def create_logger(logger_name, file_name=None):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(file_name)
formatter = logging.Formatter('%(asctime)s(%(levelname)s): %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logging.getLogger(logger_name)
|
the-stack_106_21982
|
"""
In this module, there are NN that use siamese neural network and receive pairs.
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import ModuleList, Sequential, Linear
from model.basic_module import MultilayerDense, meanVector
def computeListOutputSize(encoders):
outputSize = 0
for encoder in encoders:
outputSize += encoder.getOutputSize()
return outputSize
class WordMean(nn.Module):
def __init__(self, wordEmbedding, updateEmbedding, hidden_size=0, standardization=False, dropout=0.0,
batch_normalization=False):
super(WordMean, self).__init__()
if standardization:
wordEmbedding.zscoreNormalization()
self.embeddingSize = wordEmbedding.getEmbeddingSize()
self.embedding = nn.Embedding(wordEmbedding.getNumberOfVectors(), self.embeddingSize,
padding_idx=wordEmbedding.getPaddingIdx())
self.embedding.weight.data.copy_(torch.from_numpy(wordEmbedding.getEmbeddingMatrix()))
self.embedding.weight.requires_grad = updateEmbedding
self.dropout = nn.Dropout(dropout) if dropout > 0.0 else None
self.batch_norm = nn.BatchNorm1d(self.embeddingSize) if batch_normalization else None
self.hidden = Linear(self.embeddingSize, self.embeddingSize) if hidden_size > 0 else None
def forward(self, x, initialHidden, lengths):
x = self.embedding(x)
if self.hidden:
x = x + F.relu(self.hidden(x))
x = meanVector(x, lengths)
if self.batch_norm:
x = self.batch_norm(x)
if self.dropout:
x = self.dropout(x)
return x
def getOutputSize(self):
return self.embeddingSize
class CosinePairNN(nn.Module):
def __init__(self, encoders):
super(CosinePairNN, self).__init__()
self.logger = logging.getLogger(__name__)
self.encoders = ModuleList(encoders)
self.logger.info("Cosine Pair NN")
def encode(self, bugInput):
x = [encoder(*input) for input, encoder in zip(bugInput, self.encoders)]
x = torch.cat(x, 1)
return x
def forward(self, bug1, bug2):
"""
:param inputs: (batch, seq_len) tensor containing word indexes for each example
:return: (batch, num_classes) tensor containing scores for each class
"""
bugEmb1 = self.encode(bug1)
bugEmb2 = self.encode(bug2)
return self.similarity(bugEmb1, bugEmb2)
def similarity(self, bugEmb1, bugEmb2):
return F.cosine_similarity(bugEmb1, bugEmb2)
class CosineTripletNN(nn.Module):
def __init__(self, encoders, dropout=0.0):
super(CosineTripletNN, self).__init__()
self.logger = logging.getLogger(__name__)
self.encoders = ModuleList(encoders)
self.logger.info("Cosine Triplet NN")
self.dropout = nn.Dropout(dropout) if dropout > 0 else None
def encode(self, bugInput):
x = [encoder(*input) for input, encoder in zip(bugInput, self.encoders)]
x = torch.cat(x, 1)
if self.dropout:
x = self.dropout(x)
return x
def forward(self, anchor, pos, neg):
"""
:param inputs: (batch, seq_len) tensor containing word indexes for each example
:return: (batch, num_classes) tensor containing scores for each class
"""
anchorEmb = self.encode(anchor)
posEmb = self.encode(pos)
negEmb = self.encode(neg)
return self.similarity(anchorEmb, posEmb), self.similarity(anchorEmb, negEmb)
def similarity(self, bugEmb1, bugEmb2):
return F.cosine_similarity(bugEmb1, bugEmb2)
class ProbabilityPairNN(nn.Module):
"""
"""
def __init__(self, encoders, withoutBugEmbedding=False, hiddenLayerSizes=[100], batchNormalization=True,
dropout=0.0):
super(ProbabilityPairNN, self).__init__()
self.logger = logging.getLogger(__name__)
self.encoders = ModuleList(encoders)
encOutSize = computeListOutputSize(encoders)
self.logger.info("%sUsing raw embeddings" % ("Not " if withoutBugEmbedding else ""))
hiddenInput = 2 * encOutSize if withoutBugEmbedding else 4 * encOutSize
self.withoutBugEmbedding = withoutBugEmbedding
self.logger.info(
"Probability Pair NN: without_raw_bug={}, batch_normalization={}".format(self.withoutBugEmbedding,
batchNormalization))
seq = []
last = hiddenInput
for currentSize in hiddenLayerSizes:
seq.append(nn.Linear(last, currentSize))
if batchNormalization:
seq.append(nn.BatchNorm1d(currentSize))
seq.append(nn.ReLU())
if dropout > 0.0:
seq.append(nn.Dropout(dropout))
self.logger.info("==> Create Hidden Layer (%d,%d) in the classifier" % (last, currentSize))
last = currentSize
seq.append(nn.Linear(last, 2))
self.sequential = Sequential(*seq)
def encode(self, bugInput):
x = [encoder(*input) for input, encoder in zip(bugInput, self.encoders)]
x = torch.cat(x, 1)
return x
def forward(self, bug1, bug2):
"""
:param inputs: (batch, seq_len) tensor containing word indexes for each example
:return: (batch, num_classes) tensor containing scores for each class
"""
bugEmb1 = self.encode(bug1)
bugEmb2 = self.encode(bug2)
return self.similarity(bugEmb1, bugEmb2)
def similarity(self, bugEmb1, bugEmb2):
hiddenIn = [torch.pow(bugEmb2 - bugEmb1, 2), bugEmb2 * bugEmb1]
if not self.withoutBugEmbedding:
hiddenIn.append(bugEmb1)
hiddenIn.append(bugEmb2)
x = torch.cat(hiddenIn, 1)
x = self.sequential(x)
return F.log_softmax(x, dim=1)
class CategoricalEncoder(nn.Module):
"""
Encode the categorical information into a vector.
"""
def __init__(self, lexicons, embeddingSize, hiddenSizes, activationFunc=F.tanh, batchNormalization=False,
applyBatchLastLayer=True, dropoutLastLayer=0.0, layerNorm=False):
super(CategoricalEncoder, self).__init__()
logging.getLogger(__name__).info("Categorical Encoder: emb_size={}".format(embeddingSize))
self.embeddingSize = embeddingSize
self.embeddings = ModuleList([nn.Embedding(lex.getLen(), self.embeddingSize) for lex in lexicons])
self.dense = MultilayerDense(len(lexicons) * embeddingSize, hiddenSizes, activationFunc, batchNormalization,
applyBatchLastLayer, dropoutLastLayer, layerNorm)
def forward(self, x):
embList = []
for em, _in in zip(self.embeddings, x):
embList.append(em(_in))
x = torch.cat(embList, 1)
return self.dense(x)
def getOutputSize(self):
return self.dense.getOutputSize()
|
the-stack_106_21984
|
"""RoomMessagePosterFunction
Allows posting a message to a room. Returns the message ID of the posted
message.
"""
from __future__ import print_function
import os
import json
import time
import hashlib
import boto3
import botocore
from apigateway_helpers.exception import APIGatewayException
from apigateway_helpers.headers import get_response_headers
client_message_id_max_length = 36
sns_client = boto3.client("sns")
cognito_idp_client = boto3.client("cognito-idp")
logs_client = boto3.client("logs")
room_streams_created_map = {}
def lambda_handler(event, context):
print("Event: {}".format(json.dumps(event)))
if "warming" in event and "{}".format(event["warming"]).lower() == "true":
return {
"message": "Warmed!"
}
event["request-body"] = json.loads(event["body"])
cognito_identity_id = event["requestContext"]["identity"]["cognitoIdentityId"]
if event["request-body"].get("version", "1") != "1":
raise APIGatewayException("Unsupported message version: {}".format(event["request-body"]["version"]), 400)
client_message_id = event["request-body"].get("client-message-id")
if client_message_id is not None and len(client_message_id) > client_message_id_max_length:
raise APIGatewayException("Parameter \"client-message-id\" must be {} bytes or fewer.".format(client_message_id_max_length), 400)
room_id = event["pathParameters"]["room-id"]
sns_topic_arn = get_room_topic_arn(event, context, room_id)
user_profile_dataset_name = os.environ["COGNITO_USER_PROFILE_DATASET_NAME"]
identity_pool_id = event["requestContext"]["identity"]["cognitoIdentityPoolId"]
cognito_auth_provider_string = event["requestContext"]["identity"]["cognitoAuthenticationProvider"]
cognito_idp_name = cognito_auth_provider_string.split(",")[0]
user_pool_id = "/".join(cognito_idp_name.split("/")[1:])
cognito_user_pool_sub_value = cognito_auth_provider_string.split(",")[1].split(":")[2]
author_name = cognito_identity_id
response = cognito_idp_client.list_users(
UserPoolId = user_pool_id,
AttributesToGet = ["email"],
Filter = "sub = \"{}\"".format(cognito_user_pool_sub_value),
Limit = 1
)
author_attributes_list = response["Users"][0]["Attributes"]
author_email_address = None
author_name = None
author_avatar_hash = None
for each_attribute_set in author_attributes_list:
if each_attribute_set["Name"] == "email":
author_email_address = each_attribute_set["Value"]
break
if author_email_address is not None:
author_name = author_email_address
# Need a value that will change when the user's Gravatar URL should change
# but also should not be a plain MD5 hash of the e-mail address.
# The Gravatar is based on the e-mail address, so this should work.
author_avatar_hash = hashlib.md5("{}{}".format(cognito_user_pool_sub_value, author_email_address)).hexdigest()
message_object = {
"identity-id": cognito_identity_id,
"author-name": author_name,
"author-avatar-hash": author_avatar_hash,
"message": event["request-body"].get("message", ""),
"timestamp": int(time.time())
}
if client_message_id is not None:
message_object["client-message-id"] = client_message_id
try:
response = sns_client.publish(
TopicArn = sns_topic_arn,
Message = json.dumps(message_object)
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] in ['InvalidParameter', 'AuthorizationError']:
raise APIGatewayException("Room \"{}\" either doesn't exist or you don't have access to it.".format(room_id), 400)
else:
raise
return {
"message-id": response["MessageId"]
}
def generate_room_sns_topic_name(room_id):
return "{}-{}".format(
os.environ["PROJECT_GLOBAL_PREFIX"],
room_id
)
def get_room_topic_arn(event, context, room_id):
sns_topic_name = generate_room_sns_topic_name(room_id)
return "arn:aws:sns:{aws_region}:{aws_account_id}:{sns_topic_name}".format(
aws_region = context.invoked_function_arn.split(":")[3],
aws_account_id = context.invoked_function_arn.split(":")[4],
sns_topic_name = sns_topic_name
)
def proxy_lambda_handler(event, context):
response_headers = get_response_headers(event, context)
try:
return_dict = lambda_handler(event, context)
except APIGatewayException as e:
return {
"statusCode": e.http_status_code,
"headers": response_headers,
"body": json.dumps({
"message": e.http_status_message
})
}
return {
"statusCode": 200,
"headers": response_headers,
"body": json.dumps(return_dict)
}
|
the-stack_106_21985
|
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.preprocessing.text import Tokenizer
import pandas as pd
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
def preprocess(s):
return stripPunctuation(removeBr(s.lower()))
def stripPunctuation(s):
for c in string.punctuation + "’":
s = s.replace(c, "")
return s
def removeBr(s):
return s.replace("<br /><br />", "")
class Dataset:
def __init__(self, max_seq_len, vocab_size, dataset="imdb"):
self.MAX_SEQ_LEN = max_seq_len
self.VOCAB_SIZE = vocab_size
if dataset == "imdb":
print('Loading IMDB dataset')
df = pd.read_csv('dataset/imdb.csv', names=["X","Y"], skiprows=1)
# cast X to str and preprocess
df['X'] = df.X.apply(str)
df['X'] = df.X.apply(preprocess)
X = df.X
Y = df.Y
# encode labels
label_encoder = LabelEncoder()
Y = label_encoder.fit_transform(Y)
Y = Y.reshape(-1, 1)
# 15/85 train test split
self.X_train, self.X_test, self.Y_train, self.Y_test = train_test_split(X, Y, test_size=0.15)
self.tokenizer = Tokenizer(
num_words=self.VOCAB_SIZE, oov_token="<OOV>")
self.tokenizer.fit_on_texts(self.X_train)
self.tokenize()
self.pad()
print(self.X_train[:30])
print(self.Y_train[:30])
def tokenize(self):
self.X_train = self.tokenizer.texts_to_sequences(self.X_train)
self.X_test = self.tokenizer.texts_to_sequences(self.X_test)
def pad(self):
self.X_train = sequence.pad_sequences(self.X_train, maxlen=self.MAX_SEQ_LEN, padding="post")
self.X_test = sequence.pad_sequences(
self.X_test, maxlen=self.MAX_SEQ_LEN, padding="post")
|
the-stack_106_21986
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1GridSearch(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kind': 'str',
'params': 'dict(str, object)',
'num_runs': 'int',
'seed': 'int',
'concurrency': 'int',
'early_stopping': 'list[object]'
}
attribute_map = {
'kind': 'kind',
'params': 'params',
'num_runs': 'numRuns',
'seed': 'seed',
'concurrency': 'concurrency',
'early_stopping': 'earlyStopping'
}
def __init__(self, kind='grid', params=None, num_runs=None, seed=None, concurrency=None, early_stopping=None, local_vars_configuration=None): # noqa: E501
"""V1GridSearch - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kind = None
self._params = None
self._num_runs = None
self._seed = None
self._concurrency = None
self._early_stopping = None
self.discriminator = None
if kind is not None:
self.kind = kind
if params is not None:
self.params = params
if num_runs is not None:
self.num_runs = num_runs
if seed is not None:
self.seed = seed
if concurrency is not None:
self.concurrency = concurrency
if early_stopping is not None:
self.early_stopping = early_stopping
@property
def kind(self):
"""Gets the kind of this V1GridSearch. # noqa: E501
:return: The kind of this V1GridSearch. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1GridSearch.
:param kind: The kind of this V1GridSearch. # noqa: E501
:type: str
"""
self._kind = kind
@property
def params(self):
"""Gets the params of this V1GridSearch. # noqa: E501
:return: The params of this V1GridSearch. # noqa: E501
:rtype: dict(str, object)
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1GridSearch.
:param params: The params of this V1GridSearch. # noqa: E501
:type: dict(str, object)
"""
self._params = params
@property
def num_runs(self):
"""Gets the num_runs of this V1GridSearch. # noqa: E501
:return: The num_runs of this V1GridSearch. # noqa: E501
:rtype: int
"""
return self._num_runs
@num_runs.setter
def num_runs(self, num_runs):
"""Sets the num_runs of this V1GridSearch.
:param num_runs: The num_runs of this V1GridSearch. # noqa: E501
:type: int
"""
self._num_runs = num_runs
@property
def seed(self):
"""Gets the seed of this V1GridSearch. # noqa: E501
:return: The seed of this V1GridSearch. # noqa: E501
:rtype: int
"""
return self._seed
@seed.setter
def seed(self, seed):
"""Sets the seed of this V1GridSearch.
:param seed: The seed of this V1GridSearch. # noqa: E501
:type: int
"""
self._seed = seed
@property
def concurrency(self):
"""Gets the concurrency of this V1GridSearch. # noqa: E501
:return: The concurrency of this V1GridSearch. # noqa: E501
:rtype: int
"""
return self._concurrency
@concurrency.setter
def concurrency(self, concurrency):
"""Sets the concurrency of this V1GridSearch.
:param concurrency: The concurrency of this V1GridSearch. # noqa: E501
:type: int
"""
self._concurrency = concurrency
@property
def early_stopping(self):
"""Gets the early_stopping of this V1GridSearch. # noqa: E501
:return: The early_stopping of this V1GridSearch. # noqa: E501
:rtype: list[object]
"""
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
"""Sets the early_stopping of this V1GridSearch.
:param early_stopping: The early_stopping of this V1GridSearch. # noqa: E501
:type: list[object]
"""
self._early_stopping = early_stopping
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GridSearch):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GridSearch):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_21989
|
# coding=utf-8
import os
import shutil
import sys
import time
import logging
import cv2
import numpy as np
import tensorflow as tf
sys.path.append(os.getcwd())
from nets import model_train as model
from utils.rpn_msr.proposal_layer import proposal_layer
from utils.text_connector.detectors import TextDetector
logging.getLogger().setLevel(logging.DEBUG)
tf.app.flags.DEFINE_string('video_path', '', '')
tf.app.flags.DEFINE_string('output_path', 'data/res/', '')
tf.app.flags.DEFINE_string('gpu', '0', '')
tf.app.flags.DEFINE_string('checkpoint_path', 'checkpoints_mlt/', '')
FLAGS = tf.app.flags.FLAGS
def resize_image(img):
img_size = img.shape
im_size_min = np.min(img_size[0:2])
im_size_max = np.max(img_size[0:2])
im_scale = float(600) / float(im_size_min)
if np.round(im_scale * im_size_max) > 1200:
im_scale = float(1200) / float(im_size_max)
new_h = int(img_size[0] * im_scale)
new_w = int(img_size[1] * im_scale)
new_h = new_h if new_h // 16 == 0 else (new_h // 16 + 1) * 16
new_w = new_w if new_w // 16 == 0 else (new_w // 16 + 1) * 16
re_im = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return re_im, (new_h / img_size[0], new_w / img_size[1])
def main(argv=None):
if os.path.exists(FLAGS.output_path):
shutil.rmtree(FLAGS.output_path)
os.makedirs(FLAGS.output_path)
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.get_default_graph().as_default():
input_image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
input_im_info = tf.placeholder(tf.float32, shape=[None, 3], name='input_im_info')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
bbox_pred, cls_pred, cls_prob = model.model(input_image)
variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
saver = tf.train.Saver(variable_averages.variables_to_restore())
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
model_path = os.path.join(FLAGS.checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
logging.info('Restore from {}'.format(model_path))
saver.restore(sess, model_path)
frame_ct = 0
cap = cv2.VideoCapture(FLAGS.video_path)
while(cap.isOpened()):
logging.info('===============')
logging.info('Frame {}'.format(frame_ct))
start = time.time()
try:
ret, im = cap.read()
#TODO check ret
except:
logging.error("Error reading video frame {}!".format(frame_ct))
continue
img, (rh, rw) = resize_image(im)
h, w, c = img.shape
im_info = np.array([h, w, c]).reshape([1, 3])
bbox_pred_val, cls_prob_val = sess.run([bbox_pred, cls_prob],
feed_dict={input_image: [img],
input_im_info: im_info})
textsegs, _ = proposal_layer(cls_prob_val, bbox_pred_val, im_info)
scores = textsegs[:, 0]
textsegs = textsegs[:, 1:5]
textdetector = TextDetector(DETECT_MODE='H')
boxes = textdetector.detect(textsegs, scores[:, np.newaxis], img.shape[:2])
boxes = np.array(boxes, dtype=np.int)
cost_time = (time.time() - start)
logging.debug("cost time: {:.2f}s".format(cost_time))
for i, box in enumerate(boxes):
cv2.polylines(img, [box[:8].astype(np.int32).reshape((-1, 1, 2))], True, color=(0, 255, 0),
thickness=2)
img = cv2.resize(img, None, None, fx=1.0 / rh, fy=1.0 / rw, interpolation=cv2.INTER_LINEAR)
# save images with markup from text localization
save_image_filename = os.path.join(FLAGS.output_path, os.path.basename(FLAGS.video_path) + '.{:06d}.jpg'.format(frame_ct))
logging.debug('Saving image to {}'.format(save_image_filename))
#cv2.imwrite(save_image_filename, img[:, :, ::-1])
cv2.imwrite(save_image_filename, img)
# save text localization boxes
save_boxes_filename = os.path.join(FLAGS.output_path, os.path.basename(FLAGS.video_path) + '.{:06d}.txt'.format(frame_ct))
logging.debug('Saving boxes to {}'.format(save_boxes_filename))
with open(save_boxes_filename, "w") as f:
for i, box in enumerate(boxes):
line = ",".join(str(box[k]) for k in range(8))
line += "," + str(scores[i]) + "\r\n"
f.writelines(line)
frame_ct = frame_ct + 1
cap.release()
if __name__ == '__main__':
tf.app.run()
|
the-stack_106_21991
|
from .base import DiscordModelsBase
from quart import current_app
import discord
from .. import configs
class Guild(DiscordModelsBase):
"""Class representing discord Guild the user is part of.
Operations
----------
x == y
Checks if two guild's are the same.
x != y
Checks if two guild's are not the same.
str(x)
Returns the guild's name.
Attributes
----------
id : int
Discord ID of the guild.
name : str
Name of the guild.
icon_hash : str
Hash of guild's icon.
is_owner : bool
Boolean determining if current user is owner of the guild or not.
permissions : discord.Permissions
An instance of discord.Permissions representing permissions of current user in the guild.
"""
MANY = True
ROUTE = "/users/@me/guilds"
def __init__(self, payload):
super().__init__(payload)
self.id = int(self._payload["id"])
self.name = self._payload["name"]
self.icon_hash = self._payload.get("icon")
self.is_owner = self._payload.get("owner")
self.permissions = self.__get_permissions(self._payload.get("permissions"))
@staticmethod
def __get_permissions(permissions_value):
if permissions_value is None:
return
return discord.Permissions(int(permissions_value))
def __str__(self):
return self.name
def __eq__(self, guild):
return isinstance(guild, Guild) and guild.id == self.id
def __ne__(self, guild):
return not self.__eq__(guild)
@property
def icon_url(self):
"""A property returning direct URL to the guild's icon. Returns None if guild has no icon set."""
if not self.icon_hash:
return
return configs.DISCORD_GUILD_ICON_BASE_URL.format(guild_id=self.id, icon_hash=self.icon_hash)
@classmethod
def fetch_from_api(cls, cache=True):
"""A class method which returns an instance or list of instances of this model by implicitly making an
API call to Discord. If an instance of :py:class:`quart_discord.User` exists in the users internal cache
who belongs to these guilds then, the cached property :py:attr:`quart_discord.User.guilds` is updated.
Parameters
----------
cache : bool
Determines if the :py:attr:`quart_discord.User.guilds` cache should be updated with the new guilds.
Returns
-------
list[quart_discord.Guild, ...]
List of instances of :py:class:`quart_discord.Guild` to which this user belongs.
"""
guilds = super().fetch_from_api()
if cache:
user = current_app.discord.users_cache.get(current_app.discord.user_id)
try:
user.guilds = {guild.id: guild for guild in guilds}
except AttributeError:
pass
return guilds
|
the-stack_106_21992
|
"""Install SciencePlots.
This will copy the *.mplstyle files into the appropriate directory.
This code is based on a StackOverflow answer:
https://stackoverflow.com/questions/31559225/how-to-ship-or-distribute-a-matplotlib-stylesheet
"""
import atexit
import glob
import os
import shutil
import matplotlib
from setuptools import setup
from setuptools.command.install import install
# Get description from README
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
def install_styles():
# Find all style files
stylefiles = glob.glob('styles/**/*.mplstyle', recursive=True)
# Find stylelib directory (where the *.mplstyle files go)
mpl_stylelib_dir = os.path.join(matplotlib.get_configdir() ,"stylelib")
if not os.path.exists(mpl_stylelib_dir):
os.makedirs(mpl_stylelib_dir)
# Copy files over
print("Installing styles into", mpl_stylelib_dir)
for stylefile in stylefiles:
print(os.path.basename(stylefile))
shutil.copy(
stylefile,
os.path.join(mpl_stylelib_dir, os.path.basename(stylefile)))
class PostInstallMoveFile(install):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
atexit.register(install_styles)
setup(
name='SciencePlots',
version='1.0.3',
author="John Garrett",
author_email="[email protected]",
description="Format Matplotlib for scientific plotting",
long_description=long_description,
long_description_content_type='text/markdown',
license="MIT",
keywords=[
"matplotlib-style-sheets",
"matplotlib-figures",
"scientific-papers",
"thesis-template",
"matplotlib-styles",
"python"
],
url="https://github.com/garrettj403/SciencePlots/",
install_requires=['matplotlib',],
cmdclass={'install': PostInstallMoveFile,},
)
|
the-stack_106_21998
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_bzoinq
----------------------------------
Tests for `bzoinq` module.
"""
import pytest
from bzoinq import bzoinq
# @pytest.fixture
# def response():
# """Sample pytest fixture.
# See more at: http://doc.pytest.org/en/latest/fixture.html
# """
# # import requests
# # return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
#
#
# def test_content(response):
# """Sample pytest test function with the pytest fixture as an argument.
# """
# # from bs4 import BeautifulSoup
# # assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_to_datetime():
import datetime
mytime = "2017-10-1 10:20:00"
assert bzoinq.to_datetime(mytime) == datetime.datetime(2017, 10, 1, 10, 20, 0)
def test_sound_and_task():
a = bzoinq.Bzoinq()
a.create_task()
# test that the first id is 1
assert a.task_id == 1
def test_monitor():
import time
a = bzoinq.Bzoinq()
a.create_task("First task")
b = bzoinq.Monitor(a)
b.start()
time.sleep(5)
b.stop()
def test_two_tasks():
import datetime
import time
current_time = datetime.datetime.now()
time_in_10 = current_time + datetime.timedelta(seconds=10)
time_in_5 = current_time + datetime.timedelta(seconds=5)
a = bzoinq.Bzoinq()
a.create_task("10 seconds task", time_in_10)
a.create_task("5 seconds task", time_in_5)
b = bzoinq.Monitor(a)
b.start()
time.sleep(15)
b.stop()
def test_monitor_again():
import time
a = bzoinq.Bzoinq()
b = bzoinq.Monitor(a)
b.start()
a.create_task("Task to test the Monitor")
time.sleep(3)
a.create_task("Second task to test the Monitor")
time.sleep(3)
b.stop()
def testfunction():
def printme():
print("function run ok")
import time
a = bzoinq.Bzoinq()
b = bzoinq.Monitor(a)
b.start()
a.create_task("Testing a function", function=printme)
time.sleep(2)
b.stop()
|
the-stack_106_21999
|
import os
import sys
import glob
import math
from random import shuffle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import nibabel as nib
import torch
import torch.nn as nn
import util
from model import UNet3D
def get_data_and_target(synb0prep_dir_path, device):
# Get paths
T1_path = os.path.join(synb0prep_dir_path, 'T1_norm_lin_atlas_2_5.nii.gz')
b0_d_path = os.path.join(synb0prep_dir_path, 'b0_d_lin_atlas_2_5.nii.gz')
b0_u_path = os.path.join(synb0prep_dir_path, 'b0_u_lin_atlas_2_5.nii.gz')
mask_path = os.path.join(synb0prep_dir_path, 'mask_lin.nii.gz')
# Get image
img_T1 = np.expand_dims(util.get_nii_img(T1_path), axis=3)
img_b0_d = np.expand_dims(util.get_nii_img(b0_d_path), axis=3)
img_b0_u = np.expand_dims(util.get_nii_img(b0_u_path), axis=3)
img_mask = np.expand_dims(util.get_nii_img(mask_path), axis=3)
# Pad array since I stupidly used template with dimensions not factorable by 8
# Assumes input is (77, 91, 77) and pad to (80, 96, 80) with zeros
img_T1 = np.pad(img_T1, ((2, 1), (3, 2), (2, 1), (0, 0)), 'constant')
img_b0_d = np.pad(img_b0_d, ((2, 1), (3, 2), (2, 1), (0, 0)), 'constant')
img_b0_u = np.pad(img_b0_u, ((2, 1), (3, 2), (2, 1), (0, 0)), 'constant')
img_mask = np.pad(img_mask, ((2, 1), (3, 2), (2, 1), (0, 0)), 'constant')
# Convert to torch img format
img_T1 = util.nii2torch(img_T1)
img_b0_d = util.nii2torch(img_b0_d)
img_b0_u = util.nii2torch(img_b0_u)
img_mask = util.nii2torch(img_mask) != 0
# Normalize data
img_T1 = util.normalize_img(img_T1, 150, 0, 1, -1) # Based on freesurfers T1 normalization
max_img_b0_d = np.percentile(img_b0_d, 99) # This usually makes majority of CSF be the upper bound
min_img_b0_d = 0 # Assumes lower bound is zero (direct from scanner)
img_b0_d = util.normalize_img(img_b0_d, max_img_b0_d, min_img_b0_d, 1, -1)
img_b0_u = util.normalize_img(img_b0_u, max_img_b0_d, min_img_b0_d, 1, -1) # Use min() and max() from distorted data
# Set "data" and "target"
img_data = np.concatenate((img_b0_d, img_T1), axis=1)
img_target = img_b0_u
# Send data to device
img_data = torch.from_numpy(img_data).float().to(device)
img_target = torch.from_numpy(img_target).float().to(device)
img_mask = torch.from_numpy(np.array(img_mask, dtype=np.uint8))
return img_data, img_target, img_mask
def compute_loss(derivatives_path, model, device):
# Get blip directories
synb0prep_dir_paths = glob.glob(os.path.join(derivatives_path, 'synb0prep_*'))
# Get predicted images and masks
img_models = []
img_targets = []
img_masks = []
for synb0prep_dir_path in synb0prep_dir_paths:
# Get data, target, and mask
img_data, img_target, img_mask = get_data_and_target(synb0prep_dir_path, device)
# Pass through model
img_model = model(img_data)
# Append
img_models.append(img_model)
img_targets.append(img_target)
img_masks.append(img_mask)
# Compute loss
loss = torch.zeros(1, 1, device=device) # Initialize to zero
# First, get "truth loss"
for idx in range(len(synb0prep_dir_paths)):
# Get model, target, and mark
img_model = img_models[idx]
img_target = img_targets[idx]
img_mask = img_masks[idx]
# Compute loss
loss += F.mse_loss(img_model[img_mask], img_target[img_mask])
# Divide loss by number of synb0prep directories
loss /= len(synb0prep_dir_paths)
# Next, get "difference loss"
if len(synb0prep_dir_paths) == 2:
# Get model, target, and mark
img_model1 = img_models[0]
img_model2 = img_models[1]
img_mask = img_masks[0] & img_masks[1]
# Add difference loss
loss += F.mse_loss(img_model1[img_mask], img_model2[img_mask])
elif len(synb0prep_dir_paths) == 1:
pass # Don't add any difference loss
else:
raise RunTimeError(train_dir_path + ': Only single and double blips are supported')
return loss
def train(derivatives_path, model, device, optimizer):
# Train mode
model.train()
# Zero gradient
optimizer.zero_grad()
# Compute loss
loss = compute_loss(derivatives_path, model, device)
# Compute gradient
loss.backward()
# Step optimizer
optimizer.step()
# Return loss
return loss.item()
def validate(derivatives_path, model, device):
# Eval mode
model.eval()
# Compute loss
loss = compute_loss(derivatives_path, model, device)
# Return loss
return loss.item()
if __name__ == '__main__':
# Get input arguments ----------------------------------#
learning_subjects_path = sys.argv[1]
test_subjects_path = sys.argv[2]
num_fold = int(sys.argv[3])
total_folds = int(sys.argv[4])
results_dir_path = sys.argv[5]
print('Learning subjects path: ' + str(learning_subjects_path))
print('Test subjects path: ' + str(test_subjects_path))
print('Fold number: ' + str(num_fold))
print('Total folds: ' + str(total_folds))
print('Results dir path: ' + results_dir_path)
# Handle training/validation/test lists ----------------#
# Read learning subjects
with open(learning_subjects_path, 'r') as f:
learning_subjects = np.asarray(f.read().splitlines())
# Get fold indices
idx_folds = np.array_split(np.arange(learning_subjects.size), total_folds)
# Get validation indices
idx_validation = idx_folds[num_fold-1]
# Get training indices
del idx_folds[num_fold - 1]
idx_training = np.concatenate(idx_folds)
# Get training and validation subjects
training_subjects = learning_subjects[idx_training]
validation_subjects = learning_subjects[idx_validation]
# Read test subjects
with open(test_subjects_path, 'r') as f:
test_subjects = np.asarray(f.read().splitlines())
print('Training subjects: ' + str(training_subjects))
print('Validation subjects: ' + str(validation_subjects))
print('Test subjects: ' + str(test_subjects))
# Set params -------------------------------------------#
seed = 1
num_epochs = 100
lr = 0.0001
betas = (0.9, 0.999)
weight_decay = 1e-5
print('Seed: ' + str(seed))
print('num_epochs: ' + str(num_epochs))
print('learning rate: ' + str(lr))
print('betas: ' + str(betas))
print('weight decay: ' + str(weight_decay))
# Run code ---------------------------------------------#
# Get output prefix
prefix = '_'.join(['num_fold', str(num_fold),
'total_folds', str(total_folds),
'seed', str(seed),
'num_epochs', str(num_epochs),
'lr', str(lr),
'betas', str(betas),
'weight_decay', str(weight_decay)])
prefix = os.path.join(results_dir_path, prefix)
# Log training and validation curve
train_curve_path = prefix + '_train.txt'
validation_curve_path = prefix + '_validation.txt'
test_path = prefix + '_test.txt'
open(train_curve_path, 'w').close()
open(validation_curve_path, 'w').close()
open(test_path, 'w').close()
# Set seed
torch.manual_seed(seed)
# Get device
device = torch.device("cuda")
# Get model
model = UNet3D(2, 1).to(device)
# Get optimizer
optimizer = optim.Adam(model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
# Train
model_path_best = ''
l_validation_best = float("inf")
for num_epoch in range(num_epochs):
print('Epoch: ' + str(num_epoch))
# Train -------------------------------------------#
# Jumble data set for each epoch
shuffle(training_subjects)
l_train_total = 0
num_train_total = 0
for training_subject in training_subjects:
# Get sessions
training_sessions = glob.glob(os.path.join(training_subject, '*'))
for training_session in training_sessions:
# Get derivatives
derivatives_path = os.path.join(training_session, 'derivatives')
# Train
l_train = train(derivatives_path, model, device, optimizer)
# Sum loss
l_train_total += l_train
# Increment
num_train_total += 1
l_train_mean = l_train_total/num_train_total
print('Training loss: ' + str(l_train_mean))
with open(train_curve_path, "a") as f:
f.write(str(l_train_mean) + '\n')
# Validate ----------------------------------------#
l_validation_total = 0
num_validation_total = 0
for validation_subject in validation_subjects:
# Get sessions
validation_sessions = glob.glob(os.path.join(validation_subject, '*'))
for validation_session in validation_sessions:
# Get derivatives
derivatives_path = os.path.join(validation_session, 'derivatives')
# Validate
l_validation = validate(derivatives_path, model, device)
# Sum loss
l_validation_total += l_validation
# Increment
num_validation_total += 1
l_validation_mean = l_validation_total/num_validation_total
print('Validation loss: ' + str(l_validation_mean))
with open(validation_curve_path, "a") as f:
f.write(str(l_validation_mean) + '\n')
# Check if this is better
if l_validation_mean < l_validation_best:
print('Validation improved... check pointing')
# Save
model_path_best = prefix + '_num_epoch_' + str(num_epoch) + '.pth'
torch.save(model.state_dict(), model_path_best)
# Update
l_validation_best = l_validation_mean
# Test ----------------------------------------#
print('Performing test on best validation model: ' + model_path_best)
# Load best model
model.load_state_dict(torch.load(model_path_best))
# Test
l_test_total = 0
num_test_total = 0
for test_subject in test_subjects:
# Get sessions
test_sessions = glob.glob(os.path.join(test_subject, '*'))
for test_session in test_sessions:
# Get derivatives
derivatives_path = os.path.join(test_session, 'derivatives')
# test
l_test = validate(derivatives_path, model, device)
# Sum loss
l_test_total += l_test
# Increment
num_test_total += 1
l_test_mean = l_test_total/num_test_total
print('test loss: ' + str(l_test_mean))
with open(test_path, "a") as f:
f.write(str(l_test_mean) + '\n')
|
the-stack_106_22000
|
import editor
from common import msg, utils, shared as G
from collections import defaultdict
vim = None
# Foreground: background
COLORS = (
('white', 'red'),
('black', 'yellow'),
('black', 'green'),
('white', 'blue'),
)
HL_RULES = ['ctermfg=%s ctermbg=%s guifg=%s guibg=%s' % (fg, bg, fg, bg) for fg, bg in COLORS]
def user_id_to_region(user_id):
return "floobitsuser%s" % user_id
def vim_buf_to_text(vim_buf):
# Work around EOF new line handling in Vim. Vim always puts a newline at the end of a file,
# but never exposes that newline in the view text.
tail = '\n'
if vim_buf[-1] == '':
tail = ''
text = '\n'.join(vim_buf[:]) + tail
return text.decode('utf-8')
class View(object):
"""editors representation of the buffer"""
current_highlights = defaultdict(list)
pending_highlights = {}
def __init__(self, vim_buf):
self.vim_buf = vim_buf
def __repr__(self):
return '%s %s' % (self.native_id, self.vim_buf.name)
def __str__(self):
return repr(self)
def _offset_to_vim(self, offset):
current_offset = 0
for line_num, line in enumerate(self.vim_buf):
next_offset = len(line) + 1
if current_offset + next_offset > offset:
break
current_offset += next_offset
col = offset - current_offset
msg.debug('offset %s is line %s column %s' % (offset, line_num + 1, col + 1))
return line_num + 1, col + 1
@property
def native_id(self):
return self.vim_buf.number
def is_loading(self):
return False
def get_text(self):
return vim_buf_to_text(self.vim_buf)
def update(self, data, message=True):
self.set_text(data["buf"])
def set_text(self, text):
msg.debug('About to patch %s %s' % (str(self), self.vim_buf.name))
lines = text.encode('utf-8').split('\n')
new_len = len(lines)
end = start = -1
i = 0
def stomp_buffer():
msg.debug('Stomping buffer.')
G.AGENT.patching += 1
self.vim_buf[:] = lines
try:
if new_len != len(self.vim_buf):
stomp_buffer()
return
while i < new_len:
if lines[i] != self.vim_buf[i]:
msg.debug('Lines are not the same. "%s" "%s"' % (self.vim_buf[i], lines[i]))
if start > -1:
if end > -1:
stomp_buffer() # More than one contiguous change in patch.
return
else:
start = i
else:
msg.debug('Lines are the same. "%s"' % lines[i])
if start > -1 and end == -1:
end = i
i += 1
if start == -1 and end == -1:
msg.debug("Nothing to do here, buffers are the same.")
return
if start > -1 and end == -1:
end = i
msg.debug('Stomping lines %d to %d: "%s" -> "%s"' % (start, end, self.vim_buf[start:end],
lines[start:end]))
G.AGENT.patching += 1
self.vim_buf[start:end] = lines[start:end]
except Exception as e:
msg.error('Couldn\'t apply patches because: %s!\nThe unencoded text was: "%s"' % (
str(e), text))
raise
msg.debug('All done patching.')
def set_read_only(self, read_only=True):
pass
def set_status(self, *args):
pass
def apply_patches(self, buf, patches, username):
cursor_offset = self.get_cursor_offset()
msg.debug('cursor offset is %s bytes' % cursor_offset)
self.set_text(patches[0])
for patch in patches[2]:
offset = patch[0]
length = patch[1]
patch_text = patch[2]
if cursor_offset > offset:
new_offset = len(patch_text) - length
cursor_offset += new_offset
self.set_cursor_position(cursor_offset)
def focus(self):
editor.open_file(self.vim_buf.name)
def set_cursor_position(self, offset):
line_num, col = self._offset_to_vim(offset)
command = ':silent! call setpos(".", [%s, %s, %s, %s])' % (self.native_id, line_num, col, 0)
msg.debug('setting pos: %s' % command)
vim.command(command)
def get_cursor_offset(self):
return int(vim.eval('line2byte(line("."))+col(".")')) - 2
def get_selections(self):
# Vim likes to return strings for numbers even if you use str2nr:
return [[int(pos) for pos in range_] for range_ in vim.eval("g:FloobitsGetSelection()")]
def clear_highlight(self, user_id):
msg.debug('clearing selections for user %s in view %s' % (user_id, self.vim_buf.name))
if user_id not in self.current_highlights:
return
for hl in self.current_highlights[user_id]:
vim.command(":silent! :call matchdelete(%s)" % (hl,))
del self.current_highlights[user_id]
def clear_all_highlights(self):
for user_id in self.current_highlights.keys():
self.clear_highlight(user_id)
def highlight(self, ranges, user_id):
msg.debug("got a highlight %s" % ranges)
def doit():
msg.debug("doing timed highlights")
stored_ranges = self.pending_highlights[user_id]
del self.pending_highlights[user_id]
self._set_highlight(stored_ranges, user_id)
if user_id not in self.pending_highlights:
utils.set_timeout(doit, 150)
self.pending_highlights[user_id] = ranges
def _set_highlight(self, ranges, user_id):
msg.debug('highlighting ranges %s' % (ranges))
if vim.current.buffer.number != self.vim_buf.number:
return
region = user_id_to_region(user_id)
hl_rule = HL_RULES[user_id % len(HL_RULES)]
vim.command(":silent! highlight %s %s" % (region, hl_rule))
self.clear_highlight(user_id)
for _range in ranges:
start_row, start_col = self._offset_to_vim(_range[0])
end_row, end_col = self._offset_to_vim(_range[1])
if start_row == end_row and start_col == end_col:
if end_col >= len(self.vim_buf[end_row - 1]):
end_row += 1
end_col = 1
else:
end_col += 1
vim_region = "matchadd('{region}', '\%{start_row}l\%{start_col}v\_.*\%{end_row}l\%{end_col}v', 100)".\
format(region=region, start_row=start_row, start_col=start_col, end_row=end_row, end_col=end_col)
msg.debug("vim_region: %s" % (vim_region,))
try:
self.current_highlights[user_id].append(vim.eval(vim_region))
except vim.api.NvimError:
pass
def rename(self, name):
msg.debug('renaming %s to %s' % (self.vim_buf.name, name))
current = vim.current.buffer
text = self.get_text()
old_name = self.vim_buf.name
old_number = self.native_id
with open(name, 'wb') as fd:
fd.write(text.encode('utf-8'))
vim.command('edit! %s' % name)
self.vim_buf = vim.current.buffer
vim.command('edit! %s' % current.name)
try:
vim.command('bdelete! %s' % old_number)
except Exception as e:
msg.debug("couldn't bdelete %s... maybe thats OK? err: %s" % (old_number, str(e)))
try:
utils.rm(old_name)
except Exception as e:
msg.debug("couldn't delete %s... maybe thats OK? err: %s" % (old_name, str(e)))
def save(self):
# TODO: switch to the correct buffer, then save, then switch back (or use writefile)
if vim.current.buffer.name != self.vim_buf.name:
return
try:
vim.command('silent w!')
except Exception as e:
msg.log('Error saving %s: %s' % (self.vim_buf.name, str(e)))
def file_name(self):
return self.vim_buf.name
|
the-stack_106_22001
|
data = [[float(y) for y in x.strip().split(', ')] for x in open('block_datadump.csv').readlines()]
for i in range(0, 2283416, 200000):
print('Checking 200k blocks from %d' % i)
dataset = []
totuncles, totuncreward = 0, 0
totbs = [0 for j in range(40)]
totus = [0 for j in range(40)]
for num, uncs, uncrew, uncgas, txs, gas, length, zeroes in data[i:i+200000]:
dataset.append([gas, 0])
for i in range(int(uncs)):
dataset.append([uncgas / uncs * 1.0, 1])
totuncles += uncs
totuncreward += uncrew
totus[int(gas / 100000)] += uncs
totbs[int(gas / 100000)] += 1
print([totus[j] * 100.0 / (totbs[j] + 0.000000001) for j in range(40)])
print('Average uncle reward:', totuncreward * 1.0 / totuncles)
print('Average nephew reward:', totuncles * 5 / 32. / len(dataset))
mean_x = sum([x[0] for x in dataset]) * 1.0 / len(dataset)
mean_y = sum([x[1] for x in dataset]) * 1.0 / len(dataset)
print('Average gas used:', mean_x)
print('Average uncle rate:', mean_y)
covar = sum([(x[0] - mean_x) * (x[1] - mean_y) for x in dataset])
var = sum([(x[0] - mean_x) ** 2 for x in dataset])
print('m = ', covar / var)
print('b = ', mean_y - mean_x * (covar / var))
|
the-stack_106_22003
|
# encoding: utf-8
import re
import subprocess
import sys
import tempfile
from textwrap import dedent
from bpython import args
from bpython.test import (FixLanguageTestCase as TestCase, unittest)
try:
from nose.plugins.attrib import attr
except ImportError:
def attr(*args, **kwargs):
def identity(func):
return func
return identity
@attr(speed='slow')
class TestExecArgs(unittest.TestCase):
def test_exec_dunder_file(self):
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(dedent("""\
import sys
sys.stderr.write(__file__)
sys.stderr.flush()"""))
f.flush()
p = subprocess.Popen(
[sys.executable] +
(['-W', 'ignore'] if sys.version_info[:2] == (2, 6) else []) +
["-m", "bpython.curtsies", f.name],
stderr=subprocess.PIPE,
universal_newlines=True)
(_, stderr) = p.communicate()
self.assertEqual(stderr.strip(), f.name)
def test_exec_nonascii_file(self):
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(dedent('''\
#!/usr/bin/env python
# coding: utf-8
"你好 # nonascii"
'''))
f.flush()
try:
subprocess.check_call([
sys.executable, '-m', 'bpython.curtsies',
f.name])
except subprocess.CalledProcessError:
self.fail('Error running module with nonascii characters')
def test_exec_nonascii_file_linenums(self):
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(dedent("""\
#!/usr/bin/env python
# coding: utf-8
1/0
"""))
f.flush()
p = subprocess.Popen(
[sys.executable, "-m", "bpython.curtsies",
f.name],
stderr=subprocess.PIPE,
universal_newlines=True)
(_, stderr) = p.communicate()
self.assertIn('line 3', clean_colors(stderr))
def clean_colors(s):
return re.sub(r'\x1b[^m]*m', '', s)
class TestParse(TestCase):
def test_version(self):
with self.assertRaises(SystemExit):
args.parse(['--version'])
|
the-stack_106_22005
|
# =================================================================
#
# Authors: Ian Edwards
#
# Copyright (c) 2020, OpenCDMS Project
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# NOTE: Currently this module only contains a provider for "MIDAS Open"
import logging
import os
from .base import CDMSProvider
from ..fileformats.text import read_badc
LOGGER = logging.getLogger(__name__)
# MIDAS Open paths and filenames are of the form:
# badc/ukmo-midas-open/data/
# uk-daily-weather-obs/dataset-version-201908/
# berkshire/00838_bracknell-beaufort-park/
# qc-version-1/
#
# midas-open_uk-daily-weather-obs_dv-201908_
# berkshire_00838_bracknell-beaufort-park_qcv-1_1991.csv
DEFAULT_PATH = os.path.join("badc", "ukmo-midas-open", "data")
DEFAULT_DATASET_VERSION = "201908"
DEFAULT_QC_VERSION = 1
element_lookup = {
"wind_speed": {"hourly": "uk-hourly-weather-obs"},
"wind_direction": {"hourly": "uk-hourly-weather-obs"},
'mean_wind_speed': {'hourly': 'uk-mean-wind-obs'},
'mean_wind_dir': {'hourly': 'uk-mean-wind-obs'},
'prcp_amt': {'hourly': 'uk-hourly-rain-obs', 'daily': 'uk-daily-rain-obs'},
'max_air_temp': {'daily': 'uk-daily-temperature-obs'},
'min_air_temp': {'daily': 'uk-daily-temperature-obs'},
'glbl_irad_amt': {'daily': 'uk-radiation-obs'},
'difu_irad_amt': {'daily': 'uk-radiation-obs'},
'q5cm_soil_temp': {'daily': 'uk-soil-temperature-obs'},
'q10cm_soil_temp': {'daily': 'uk-soil-temperature-obs'},
}
station_county_lookup = {
838: "berkshire",
}
station_filename_lookup = {
838: "00838_bracknell-beaufort-park",
}
date_time_column_lookup = {
'uk-daily-rain-obs': 'ob_date',
'uk-daily-temperature-obs': 'ob_end_time',
'uk-daily-weather-obs': 'ob_end_time',
'uk-hourly-rain-obs': 'ob_end_time',
'uk-hourly-weather-obs': 'ob_time',
'uk-mean-wind-obs': 'ob_end_time',
'uk-radiation-obs': 'ob_end_time',
'uk-soil-temperature-obs': 'ob_time'
}
valid_dataset_versions = ["201901", "201908"]
valid_qc_versions = [0, 1]
class MidasOpen(CDMSProvider):
"""Provider for MIDAS Open data"""
def __init__(self, connection_string, *args, **kwargs):
self.connection_string = connection_string
def obs(self, src_id, elements, period, qc_version=None, **kwargs):
"""Return observatons as Pandas DataFrame
Args:
src_id (int): The ID of the required station
elements (list): List of elements to return
period (str): Either 'hourly' or 'daily'
qc_version (int): 0 or 1
Returns:
DataFrame: Pandas DataFrame containing onservations data
"""
if "year" not in kwargs.keys():
raise ValueError(
"NOTE: Currently you must supply a year, e.g. year=1991")
year = kwargs["year"]
for element in elements:
if element not in element_lookup:
raise ValueError('"{}" element not recognised'.format(element))
qc_version = DEFAULT_QC_VERSION if qc_version is None else qc_version
dataset_version = kwargs.get(
"dataset_version", DEFAULT_DATASET_VERSION)
if src_id not in station_county_lookup:
raise ValueError("Station ID not recognised")
if period not in element_lookup[element]:
raise ValueError('"{} period not available for {} element'.format(
period, element))
if qc_version not in valid_qc_versions:
raise ValueError(
"qc_version must be one of: {}".format(
", ".join(map(str, valid_qc_versions))
)
)
if dataset_version not in valid_dataset_versions:
raise ValueError(
"dataset_version must be one of: {}".format(
", ".join(valid_dataset_versions)
)
)
station_county = station_county_lookup[src_id]
station_filename = station_filename_lookup[src_id]
# The following code should be moved to a get_path() method
directory = os.path.join(
DEFAULT_PATH,
element_lookup[element][period],
"dataset-version-{}".format(dataset_version),
station_county,
station_filename,
"qc-version-{}".format(qc_version),
)
filename = "_".join(
[
"midas-open",
element_lookup[element][period],
"dv-{}".format(dataset_version),
station_county,
station_filename,
"qcv-{}".format(qc_version),
"{}.csv".format(year),
]
)
filepath = os.path.join(self.connection_string, directory, filename)
return read_badc(filepath, usecols=[
"src_id",
date_time_column_lookup[element_lookup[element][period]],
*elements
])
|
the-stack_106_22006
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def get_laststep(model, lens):
shape = model.size()
idx = (lens - 1).view(-1, 1).expand(shape[0], model.size(2)).unsqueeze(1)
model = model.gather(1, Variable(idx)).squeeze(1)
return model
def pooled_output(model):
# global average pooling
avg_pool = torch.mean(model, 1)
# global max pooling
max_pool, _ = torch.max(model, 1)
model = torch.cat((max_pool, avg_pool), 1)
return model
|
the-stack_106_22007
|
# coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for gene expression from DNA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import contrib
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow.compat.v1 as tf
@registry.register_model
class GeneExpressionConv(t2t_model.T2TModel):
"""Gene expression conv net.
Based on "Basenji" model from
http://www.biorxiv.org/content/early/2017/07/10/161851
Uses layer_norm instead of batch_norm.
Model expects that if targets are of length m, inputs are of length 32*m. The
original data expected that inputs would be of length 128*m, but the data has
been preprocessed to chunk every 4 bases into 1 ID (see
data_generators/gene_expression.py).
The magnitude of the length reduction is controlled by the pooling sizes
(hparams.pooling_windows) at each conv layer (hparams.num_conv_layers).
"""
def body(self, features):
inputs = features["inputs"]
inputs.get_shape().assert_has_rank(4)
hp = self._hparams
out = inputs
out = common_layers.flatten4d3d(out)
# Conv layers
assert hp.num_conv_layers == len(hp.pooling_windows)
for i in range(hp.num_conv_layers):
out = conv_layer(
out,
hp.hidden_size,
hp.kernel_width,
hp.stride,
hp.pooling_windows[i],
hp.dropout,
dilation_rate=1,
name="conv_%d" % (i + 1))
# Dense dilated conv layers
for i in range(hp.num_dconv_layers):
dilation_rate = 2**(i + 1)
dconv_out = conv_layer(
out,
hp.hidden_size,
hp.kernel_width,
stride=1,
pooling_window=0,
dropout_rate=hp.dropout,
dilation_rate=dilation_rate,
name="dconv_%d" % (i + 1))
out = tf.concat([out, dconv_out], axis=2)
# Fully connected layer
out = fc_layer(out, hp.hidden_size, hp.dropout, name="fc")
out.get_shape().assert_has_rank(3)
out = tf.expand_dims(out, 2)
return out
def conv_layer(x,
hidden_size,
kernel_size,
stride,
pooling_window,
dropout_rate,
dilation_rate,
name="conv"):
"""Single conv layer with relu, optional pooling, and dropout."""
with tf.variable_scope(name):
out = x
out = common_layers.conv1d_block(
out,
hidden_size, [(dilation_rate, kernel_size)],
strides=stride,
first_relu=False,
padding="same")
out = tf.nn.relu(out)
if pooling_window:
out = tf.layers.max_pooling1d(
out, pooling_window, pooling_window, padding="same")
out = tf.layers.dropout(out, dropout_rate)
return out
def fc_layer(x, num_out, dropout_rate, name="fc"):
with tf.variable_scope(name):
out = x
out = tf.layers.dense(out, num_out)
out = contrib.layers().layer_norm(out)
out = tf.nn.relu(out)
out = tf.layers.dropout(out, dropout_rate)
return out
@registry.register_hparams
def gene_expression_conv_base():
"""Hparams for GeneExpressionConv model."""
hparams = common_hparams.basic_params1()
batch_size = 10
output_length = 2048
inputs_per_output = 128
chunk_size = 4
input_length = output_length * inputs_per_output // chunk_size
hparams.batch_size = input_length * batch_size
hparams.dropout = 0.1
hparams.add_hparam("num_conv_layers", 4)
hparams.add_hparam("num_dconv_layers", 7)
# The product of these pooling windows should match
# input_length/target_length.
hparams.add_hparam("pooling_windows", [2, 2, 2, 4])
hparams.hidden_size = 256
hparams.kernel_width = 20
hparams.add_hparam("stride", 1)
return hparams
|
the-stack_106_22009
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_like:
left, right = left.reindex_like(right), right
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
the-stack_106_22012
|
"""
This example demonstrates the use of the progress reporting feature in RPC calls.
It (ab)uses the feature to download a file from the server (server.py) in chunks to a temporary
directory.
The file path parameter should be a path relative to the directory where the server is serving
files from.
"""
import logging
import sys
from pathlib import Path
from tempfile import mkdtemp
from asphalt.core import CLIApplicationComponent, Context, run_application
from asphalt.serialization.serializers.cbor import CBORSerializer
logger = logging.getLogger(__name__)
class FileGetterComponent(CLIApplicationComponent):
async def start(self, ctx: Context):
self.add_component('wamp', serializer=CBORSerializer())
await super().start(ctx)
async def run(self, ctx: Context):
def on_progress(data: bytes):
# This gets called for every chunk the server sends (ctx.progress() on the other side)
outfile.write(data)
print('\r{} bytes written'.format(outfile.tell()), end='')
remote_path = sys.argv[1]
local_path = Path(mkdtemp()) / Path(remote_path).name
with local_path.open('wb') as outfile:
await ctx.wamp.call('send_file', remote_path, on_progress=on_progress)
print('\nFile saved as %s' % local_path)
if len(sys.argv) < 2:
print('Usage: {} <file path>'.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
run_application(FileGetterComponent(), logging=logging.INFO)
|
the-stack_106_22013
|
# stdlib
from urllib.parse import urljoin
# 3rd Party
import requests
import json
# project
from checks import AgentCheck
SERVICE_CHECK_NAME = 'burrow.can_connect'
DEFAULT_BURROW_URI = 'http://localhost:8000'
CLUSTER_ENDPOINT = '/v3/kafka'
CONFIG_ENDPOINT = '/v3/config'
CHECK_TIMEOUT = 10
class BurrowCheck(AgentCheck):
'''
Extract consumer offsets, topic offsets and offset lag from Burrow REST API
'''
def check(self, instance):
burrow_address = instance.get("burrow_uri", DEFAULT_BURROW_URI)
target_clusters = instance.get("clusters")
extra_tags = instance.get("tags", [])
self._check_burrow(burrow_address, extra_tags)
clusters = self._find_clusters(burrow_address, target_clusters)
self.log.debug("Collecting Topic Offsets")
self._topic_offsets(clusters, burrow_address, extra_tags)
self.log.debug("Collecting Consumer Group Offsets")
self._consumer_groups_offsets(clusters, burrow_address, extra_tags)
self.log.debug("Collecting Consumer Group lags")
self._consumer_groups_lags(clusters, burrow_address, extra_tags)
def _consumer_groups_lags(self, clusters, burrow_address, extra_tags):
"""
Retrieve the offsets for all consumer groups in the clusters
Getting Consumer list could be factored out
"""
for cluster in clusters:
consumers_path = "%s/%s/consumer" % (CLUSTER_ENDPOINT, cluster)
consumers_list = self._rest_request_to_json(burrow_address, consumers_path).get("consumers", [])
for consumer in consumers_list:
if consumer != "":
lags_path = "%s/%s/lag" % (consumers_path, consumer)
lag_json = self._rest_request_to_json(burrow_address, lags_path)
if not lag_json:
continue
status = lag_json["status"]
if status["status"] != "NOTFOUND":
consumer_tags = ["cluster:%s" % cluster, "consumer:%s" % consumer] + extra_tags
self.gauge("kafka.consumer.maxlag", status["maxlag"]["current_lag"], tags=consumer_tags)
self.gauge("kafka.consumer.totallag", status["totallag"], tags=consumer_tags)
self._submit_lag_status("kafka.consumer.lag_status", status["status"], tags=consumer_tags)
for partition in status.get("partitions", []):
if partition is not None:
partition_tags = consumer_tags + ["topic:%s" % partition["topic"], "partition:%s" % partition["partition"]]
self._submit_partition_lags(partition, partition_tags)
self._submit_lag_status("kafka.consumer.partition_lag_status", partition["status"], tags=partition_tags)
def _submit_lag_status(self, metric_namespace, status, tags):
burrow_status = {
"UNKNOWN" : 0,
"OK": 0,
"WARN": 0,
"ERR": 0,
"STOP": 0,
"STALL": 0,
"REWIND": 0
}
if status not in list(burrow_status.keys()):
self.log.error("Invalid lag status: '%s' for '%s'" % (status, tags))
return
burrow_status[status] = 1
for metric_name, value in list(burrow_status.items()):
self.gauge("%s.%s" % (metric_namespace, metric_name.lower()), value, tags=tags)
def _submit_partition_lags(self, partition, tags):
end = partition.get("end")
if end is not None:
lag = end.get("lag")
timestamp = end.get("timestamp") / 1000
self.gauge("kafka.consumer.partition_lag", lag, tags=tags)
def _check_burrow(self, burrow_address, extra_tags):
"""
Check the Burrow health endpoint
"""
url = urljoin(burrow_address, "/burrow/admin")
try:
tags = ['instance:%s' % self.hostname] + extra_tags
response = requests.get(url, timeout=CHECK_TIMEOUT)
response.raise_for_status()
except Exception as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL, tags=tags,
message=str(e))
raise
else:
self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK,
tags=tags,
message='Connection to %s was successful' % url)
def _topic_offsets(self, clusters, burrow_address, extra_tags):
"""
Retrieve the offsets for all topics in the clusters
"""
for cluster in clusters:
cluster_path = "%s/%s" % (CLUSTER_ENDPOINT, cluster)
config_path = "%s/%s/%s" % (CONFIG_ENDPOINT, "/consumer/", cluster)
offsets_topic = self._rest_request_to_json(burrow_address, config_path)["module"]["offsets-topic"]
topics_path = "%s/topic" % cluster_path
topics_list = self._rest_request_to_json(burrow_address, topics_path).get("topics", [])
for topic in topics_list:
if topic == offsets_topic:
continue
topic_path = "%s/%s" % (topics_path, topic)
response = self._rest_request_to_json(burrow_address, topic_path)
tags = ["topic:%s" % topic, "cluster:%s" % cluster] + extra_tags
self._submit_offsets_from_json(offsets_type="topic", json=response, tags=tags)
def _consumer_groups_offsets(self, clusters, burrow_address, extra_tags):
"""
Retrieve the offsets for all consumer groups in the clusters
"""
for cluster in clusters:
consumers_path = "%s/%s/consumer" % (CLUSTER_ENDPOINT, cluster)
consumers_list = self._rest_request_to_json(burrow_address, consumers_path).get("consumers", [])
for consumer in consumers_list:
topics_path = "%s/%s" % (consumers_path, consumer)
# topics_list = self._rest_request_to_json(burrow_address, topics_path).get("topics", [])
topics_response = self._rest_request_to_json(burrow_address, topics_path)
if 'topics' in topics_response:
for topic_name, offsets in list(topics_response["topics"].items()):
# topic_path = "%s/%s" % (topics_path, topic)
# response = self._rest_request_to_json(burrow_address, topic_path)
# if not response:
# continue
def u(offset):
if len(offset["offsets"]) > 0:
last_offset = offset["offsets"].pop()
if last_offset:
return last_offset["offset"]
else:
return 0
offsets_num = list(map(u, offsets))
tags = ["topic:%s" % topic_name, "cluster:%s" % cluster,
"consumer:%s" % consumer] + extra_tags
self._submit_offsets_from_json(offsets_type="consumer", json=dict(offsets= offsets_num), tags=tags)
else:
self.log.info("Skipping consumer: {0}".format(consumer))
def _submit_offsets_from_json(self, offsets_type, json, tags):
"""
Find the offsets and push them into the metrics
"""
offsets = json.get("offsets")
if offsets:
# for unconsumed or empty partitions, change an offset of -1 to 0 so the
# sum isn't affected by the number of empty partitions.
offsets = [max(offset, 0) for offset in offsets]
self.gauge("kafka.%s.offsets.total" % offsets_type, sum(offsets), tags=tags)
for partition_number, offset in enumerate(offsets):
new_tags = tags + ["partition:%s" % partition_number]
self.gauge("kafka.%s.offsets" % offsets_type, offset, tags=new_tags)
def _find_clusters(self, address, target):
"""
Find the available clusters in Burrow, return all clusters if
target is not set.
"""
available_clusters = self._rest_request_to_json(address, CLUSTER_ENDPOINT).get("clusters")
if not available_clusters:
raise Exception("There are no clusters in Burrow")
if not target:
return available_clusters
else:
clusters = []
for name in target:
if name in available_clusters:
clusters.append(name)
else:
self.log.error("Cluster '%s' does not exist" % name )
return clusters
def _rest_request_to_json(self, address, object_path):
'''
Query the given URL and return the JSON response
'''
response_json = None
service_check_tags = ['instance:%s' % self.hostname]
url = urljoin(address, object_path)
try:
response = requests.get(url)
# response.raise_for_status()
response_json = response.json()
if response_json["error"]:
self.log.error("Burrow Request failed: %s: %s" % (object_path, response_json["message"]))
return {}
except requests.exceptions.Timeout as e:
self.log.error("Request timeout: {0}, {1}".format(url, e))
raise
except (requests.exceptions.HTTPError,
requests.exceptions.InvalidURL,
requests.exceptions.ConnectionError) as e:
self.log.error("Request failed: {0}, {1}".format(url, e))
raise
except ValueError as e:
self.log.error(str(e))
raise
else:
self.log.debug('Connection to %s was successful' % url)
return response_json
|
the-stack_106_22014
|
# -*- coding: utf-8 -*-
"""Deletion functions to supplement :mod:`pybel.struct.mutation.expansion`."""
import logging
import typing
from collections import Counter, defaultdict
from typing import Collection, Iterable, Optional, Tuple
import pybel.struct.mutation.expansion.neighborhood
from pybel import BELGraph
from pybel.constants import ANNOTATIONS
from pybel.dsl import BaseEntity, CentralDogma, ComplexAbundance, CompositeAbundance, Reaction
from pybel.struct.filters import and_edge_predicates, concatenate_node_predicates
from pybel.struct.filters.edge_predicates import edge_has_annotation, is_causal_relation
from pybel.struct.filters.node_predicates import true_node_predicate
from pybel.struct.filters.typing import EdgeIterator, EdgePredicates, NodePredicates
from pybel.struct.pipeline import uni_in_place_transformation
from pybel.typing import EdgeData
__all__ = [
'get_peripheral_successor_edges',
'get_peripheral_predecessor_edges',
'count_sources',
'count_targets',
'count_peripheral_successors',
'count_peripheral_predecessors',
'get_subgraph_edges',
'get_subgraph_peripheral_nodes',
'expand_periphery',
'enrich_complexes',
'enrich_composites',
'enrich_reactions',
'enrich_variants',
'enrich_unqualified',
'expand_internal_causal',
]
logger = logging.getLogger(__name__)
def get_peripheral_successor_edges(graph: BELGraph, subgraph: Collection[BaseEntity]) -> EdgeIterator:
"""Get the set of possible successor edges peripheral to the sub-graph.
The source nodes in this iterable are all inside the sub-graph, while the targets are outside.
"""
for u in subgraph:
for _, v, k in graph.out_edges(u, keys=True):
if v not in subgraph:
yield u, v, k
def get_peripheral_predecessor_edges(graph: BELGraph, subgraph: Collection[BaseEntity]) -> EdgeIterator:
"""Get the set of possible predecessor edges peripheral to the sub-graph.
The target nodes in this iterable are all inside the sub-graph, while the sources are outside.
"""
for v in subgraph:
for u, _, k in graph.in_edges(v, keys=True):
if u not in subgraph:
yield u, v, k
def count_sources(edge_iter: EdgeIterator) -> Counter:
"""Count the source nodes in an edge iterator with keys and data.
:return: A counter of source nodes in the iterable
"""
return Counter(u for u, _, _ in edge_iter)
def count_targets(edge_iter: EdgeIterator) -> Counter:
"""Count the target nodes in an edge iterator with keys and data.
:return: A counter of target nodes in the iterable
"""
return Counter(v for _, v, _ in edge_iter)
def count_peripheral_successors(graph: BELGraph, subgraph: BELGraph) -> typing.Counter[BaseEntity]:
"""Count all peripheral successors of the subgraph.
:param graph: A BEL graph
:param subgraph: An iterator of BEL nodes
:return: A counter of possible successor nodes
"""
return count_targets(get_peripheral_successor_edges(graph, subgraph))
def count_peripheral_predecessors(graph: BELGraph, subgraph: BELGraph) -> typing.Counter[BaseEntity]:
"""Count all peripheral predecessors of the subgraph.
:param graph: A BEL graph
:param subgraph: An iterator of BEL nodes
:return: A counter of possible predecessor nodes
"""
return count_sources(get_peripheral_predecessor_edges(graph, subgraph))
def get_subgraph_edges(
graph: BELGraph,
annotation: str,
value: str,
source_filter: Optional[NodePredicates] = None,
target_filter: Optional[NodePredicates] = None,
) -> Iterable[Tuple[BaseEntity, BaseEntity, str, EdgeData]]:
"""Get all edges from a given subgraph whose source and target nodes pass all of the given filters.
:param graph: A BEL graph
:param annotation: The annotation to search
:param value: The annotation value to search by
:param source_filter: Optional filter for source nodes (graph, node) -> bool
:param target_filter: Optional filter for target nodes (graph, node) -> bool
:return: An iterable of (source node, target node, key, data) for all edges that match the annotation/value and
node filters
"""
if source_filter is None:
source_filter = true_node_predicate
if target_filter is None:
target_filter = true_node_predicate
for u, v, k, data in graph.edges(keys=True, data=True):
if not edge_has_annotation(data, annotation):
continue
if data[ANNOTATIONS][annotation] == value and source_filter(graph, u) and target_filter(graph, v):
yield u, v, k, data
def get_subgraph_peripheral_nodes(
graph: BELGraph,
subgraph: Collection[BaseEntity],
node_predicates: Optional[NodePredicates] = None,
edge_predicates: Optional[EdgePredicates] = None,
):
"""Get a summary dictionary of all peripheral nodes to a given sub-graph.
:return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)},
'predecessor': {internal node: list of (key, dict)}}}
:rtype: dict
For example, it might be useful to quantify the number of predecessors and successors:
>>> from pybel.struct.filters.node_predicates import not_pathology
>>> value = 'Blood vessel dilation subgraph'
>>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value)
>>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=not_pathology)
>>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True):
>>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']):
>>> continue
>>> print(node,
>>> len(p[node]['successor']),
>>> len(p[node]['predecessor']),
>>> len(set(p[node]['successor']) | set(p[node]['predecessor'])))
"""
node_filter = concatenate_node_predicates(node_predicates=node_predicates)
edge_filter = and_edge_predicates(edge_predicates=edge_predicates)
result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for u, v, k, d in get_peripheral_successor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[v]['predecessor'][u].append((k, d))
for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[u]['successor'][v].append((k, d))
return result
@uni_in_place_transformation
def expand_periphery(
universe: BELGraph,
graph: BELGraph,
node_predicates: Optional[NodePredicates] = None,
edge_predicates: Optional[EdgePredicates] = None,
threshold: int = 2,
) -> None:
"""Iterate over all possible edges, peripheral to a given subgraph, that could be added from the given graph.
Edges could be added if they go to nodes that are involved in relationships that occur with more than the
threshold (default 2) number of nodes in the subgraph.
:param universe: The universe of BEL knowledge
:param graph: The (sub)graph to expand
:param threshold: Minimum frequency of betweenness occurrence to add a gap node
A reasonable edge filter to use is :func:`pybel_tools.filters.keep_causal_edges` because this function can allow
for huge expansions if there happen to be hub nodes.
"""
nd = get_subgraph_peripheral_nodes(
universe, graph,
node_predicates=node_predicates,
edge_predicates=edge_predicates,
)
for node, dd in nd.items():
pred_d = dd['predecessor']
succ_d = dd['successor']
in_subgraph_connections = set(pred_d) | set(succ_d)
if threshold > len(in_subgraph_connections):
continue
graph.add_node(node, attr_dict=universe[node])
for u, edges in pred_d.items():
for key, data in edges:
graph.add_edge(u, node, key=key, **data)
for v, edges in succ_d.items():
for key, data in edges:
graph.add_edge(node, v, key=key, **data)
@uni_in_place_transformation
def enrich_complexes(graph: BELGraph) -> None:
"""Add all of the members of the complex abundances to the graph."""
for u in list(graph):
if not isinstance(u, ComplexAbundance):
continue
for v in u.members:
graph.add_part_of(v, u)
@uni_in_place_transformation
def enrich_composites(graph: BELGraph) -> None:
"""Add all of the members of the composite abundances to the graph."""
for u in list(graph):
if not isinstance(u, CompositeAbundance):
continue
for v in u.members:
graph.add_part_of(v, u)
@uni_in_place_transformation
def enrich_reactions(graph: BELGraph) -> None:
"""Add all of the reactants and products of reactions to the graph."""
for u in list(graph):
if not isinstance(u, Reaction):
continue
for v in u.reactants:
graph.add_has_reactant(u, v)
for v in u.products:
graph.add_has_product(u, v)
@uni_in_place_transformation
def enrich_variants(graph: BELGraph) -> None:
"""Add the reference nodes for all variants of the given function."""
for u in list(graph):
if not isinstance(u, CentralDogma):
continue
parent = u.get_parent()
if parent is None:
continue
if parent not in graph:
graph.add_has_variant(parent, u)
@uni_in_place_transformation
def enrich_unqualified(graph: BELGraph) -> None:
"""Enrich the sub-graph with the unqualified edges from the graph.
The reason you might want to do this is you induce a sub-graph from the original graph based on an annotation
filter, but the unqualified edges that don't have annotations that most likely connect elements within your graph
are not included.
.. seealso::
This function thinly wraps the successive application of the following functions:
- :func:`enrich_complexes`
- :func:`enrich_composites`
- :func:`enrich_reactions`
- :func:`enrich_variants`
Equivalent to:
>>> enrich_complexes(graph)
>>> enrich_composites(graph)
>>> enrich_reactions(graph)
>>> enrich_variants(graph)
"""
enrich_complexes(graph)
enrich_composites(graph)
enrich_reactions(graph)
enrich_variants(graph)
@uni_in_place_transformation
def expand_internal_causal(universe: BELGraph, graph: BELGraph) -> None:
"""Add causal edges between entities in the sub-graph.
Is an extremely thin wrapper around :func:`expand_internal`.
:param universe: A BEL graph representing the universe of all knowledge
:param graph: The target BEL graph to enrich with causal relations between contained nodes
Equivalent to:
>>> from pybel.struct import expand_internal, is_causal_relation
>>> expand_internal(universe, graph, edge_predicates=is_causal_relation)
"""
for u, v, key in pybel.struct.mutation.expansion.neighborhood.iterate_internal(universe, graph):
data = universe.edges[u][v][key]
if is_causal_relation(data):
graph.add_edge(u, v, key=key, **data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.